ngram
listlengths
0
82k
[ "not None: ttl = self.force_ttl self.last_ttl = ttl self.last_ttl_known =", "try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin) except", "strictly # correct, but it is correct almost all of", "returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not", "self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin) else: token", "that the above copyright notice and this permission notice #", "self.force_ttl self.last_ttl = ttl self.last_ttl_known = True else: token =", "rdclass self.txn = txn self.saved_state = [] self.current_file = None", "idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass,", "to the owner name; dnspython does not do IDNA for", "minttl if no $TTL statement is present before the #", "loffset, lwidth, _ = self._parse_modify(lhs) rmod, rsign, roffset, rwidth, _", "i - int(loffset) if rsign == '-': rindex = i", "type is not compatible with a ' 'CNAME node') elif", "one or more rrsets from the specified text, possibly subject", "*rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If not ``None``, then", "ttl is None: raise dns.exception.SyntaxError(\"Missing default TTL value\") self.txn.add(name, ttl,", "and \\ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not", "notice # appear in all copies. # # THE SOFTWARE", "didn't have a TTL on the SOA, set it! ttl", "lhs.replace('$%s' % (lmod), lzfindex) rdata = rhs.replace('$%s' % (rmod), rzfindex)", "self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is not zone's class\") # Type", "occur in the processing of rdata # are treated as", "if no $TTL statement is present before the # SOA", "and self._changed(): rrsets = [] for (name, _, _), rdataset", "# provided that the above copyright notice and this permission", "self.last_ttl = ttl self.last_ttl_known = True token = None except", "isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str):", "python is exclusive if lsign == '+': lindex = i", "self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as detail: (filename, line_number) = self.tok.where()", "\"\"\"Read a DNS zone file into a transaction.\"\"\" def __init__(self,", "not strictly # correct, but it is correct almost all", "name; dnspython does not do IDNA for names in rdata,", "except Exception: raise dns.exception.SyntaxError # lhs (required) try: lhs =", "token.value token = self.tok.get() if token.is_identifier(): new_origin =\\ dns.name.from_text(token.value, self.current_origin,", "the input. If it is not specified, then the *default_ttl*", "if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl", "self.current_origin is None: raise UnknownOrigin token = self.tok.get(want_leading=True) if not", "use, copy, modify, and distribute this software and its #", "WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \"\"\"DNS Zones.\"\"\"", "the TTL is forced to be the specified value and", "First remove any changes involving the name remove = []", "rzfindex) self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name = self.last_name if", "token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin) else: token = self.tok.get() if", "self.saved_state = [] self.current_file = None self.allow_include = allow_include self.allow_directives", "= self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl # Class try:", "remove = [] for key in self.rdatasets: if key[0] ==", "rdatasets = [] for (rdataset_name, _, _), rdataset in self.rdatasets.items():", "if self.current_origin is None: raise UnknownOrigin token = self.tok.get() #", "= [] for key in self.rdatasets: if key[0] == name:", "rdataset def _delete_name(self, name): # First remove any changes involving", "== '': sign = '+' g2 = is_generate2.match(side) if g2:", "i + int(loffset) elif lsign == '-': lindex = i", "!= self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is not zone's class\") #", "lzfindex) rdata = rhs.replace('$%s' % (rmod), rzfindex) self.last_name = dns.name.from_text(name,", "_get_rdataset(self, name, rdtype, covers): return self.rdatasets.get((name, rdtype, covers)) def _get_node(self,", "if ``None``, then if the TTL is not forced an", "self.last_ttl = 0 self.last_ttl_known = False if force_ttl is not", "``dns.rdatatype.RdataType``, string, or ``None``. If not ``None``, then the type", "+ int(loffset) elif lsign == '-': lindex = i -", "= self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name", "'$INCLUDE' and self.allow_include: token = self.tok.get() filename = token.value token", "at the zone origin @raises dns.zone.NoNS: No NS RRset was", "as detail: (filename, line_number) = self.tok.where() if detail is None:", "= allow_directives self.force_name = force_name self.force_ttl = force_ttl self.force_rdclass =", "is not compatible with a ' 'regular data node') #", "self.tok.get_eol() continue elif token.value[0] == '$' and self.allow_directives: c =", "self.rdclass def origin_information(self): if self.relativize: effective = dns.name.empty else: effective", "\"\"\" if isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root, idna_codec) if", "NotImplementedError() return mod, sign, offset, width, base def _generate_line(self): #", "self.tok.get() # Range (required) try: start, stop, step = dns.grange.from_text(token.value)", "self._get_identifier() ttl = None try: ttl = dns.ttl.from_text(token.value) self.last_ttl =", "rsign == '+': rindex = i + int(roffset) lzfindex =", "to wait until now to do this as the SOA", "not zone's class\") # Type try: rdtype = dns.rdatatype.from_text(token.value) token", "exception {}: {}\".format(str(ty), str(va))) if not self.default_ttl_known and rdtype ==", "filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught exception", "THIS SOFTWARE. \"\"\"DNS Zones.\"\"\" import re import sys import dns.exception", "force_ttl is not None: default_ttl = force_ttl if default_ttl is", "OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM", "return None node = dns.node.Node() node.rdatasets = rdatasets return node", "they were EOL/EOF. return self.tok.unget(token) name = self.last_name if not", "if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError #", "exclusive if lsign == '+': lindex = i + int(loffset)", "raise dns.exception.SyntaxError(\"Missing default TTL value\") self.txn.add(name, ttl, rd) def _parse_modify(self,", "dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass = self.zone_rdclass", "if not token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin) else: token =", "'+' g2 = is_generate2.match(side) if g2: mod, sign, offset =", "len(remove) > 0: for key in remove: del self.rdatasets[key] def", "statement from a DNS zone file.\"\"\" if self.current_origin is None:", "self.tok.get() if token.is_eol_or_eof(): break def _get_identifier(self): token = self.tok.get() if", "== rdataset_name: rdatasets.append(rdataset) if len(rdatasets) == 0: return None node", "not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass)", "_ = self._parse_modify(rhs) for i in range(start, stop + 1,", "or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default TTL value\") if self.default_ttl_known: ttl", "is not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize,", "= new_origin elif c == '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError(", "ignored lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs) rmod, rsign,", "Make names g1 = is_generate1.match(side) if g1: mod, sign, offset,", "self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root,", "continue break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif", "if force_ttl is not None: default_ttl = force_ttl if default_ttl", "# Type if self.force_rdtype is not None: rdtype = self.force_rdtype", "tok, rdclass, txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None):", "DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR", "covers)] except KeyError: pass def _name_exists(self, name): for (n, _,", "self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad origin", "from the SOA minttl if no $TTL statement is present", "rindex = i - int(roffset) elif rsign == '+': rindex", "self.default_ttl_known = True if ttl is None: # if we", "%s\" % (str(ty), str(va))) self.txn.add(name, ttl, rd) def read(self): \"\"\"Read", "raise dns.exception.SyntaxError( \"Unknown zone file directive '\" + c +", "= self.tok.get(True, True) if token.is_eof(): if self.current_file is not None:", "try: lhs = token.value token = self.tok.get() if not token.is_identifier():", "# +1 because bind is inclusive and python is exclusive", "not None: rdclass = self.force_rdclass else: token = self._get_identifier() try:", "dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass = self.zone_rdclass if rdclass", "or a string, is the input to process. *name*, a", "rhs = token.value # The code currently only supports base", "input to process. *name*, a string, ``dns.name.Name``, or ``None``, is", "may be convenient when cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass``", "self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file = open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file,", "rdatatype '%s'\" % token.value) # rhs (required) rhs = token.value", "detail is None: detail = \"syntax error\" ex = dns.exception.SyntaxError(", "c + \"'\") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as detail:", "name of the rrset. If not ``None``, then the owner", "the hostname. These come after # the dollar sign. They", "# Sometimes there are modifiers in the hostname. These come", "rdata, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise.", "and the input must not specify a type. If ``None``,", "width, base def _generate_line(self): # range lhs [ttl] [class] type", "self.last_ttl_known = True token = None except dns.ttl.BadTTL: if self.default_ttl_known:", "specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If ``None``,", "(name, _, _), rdataset in self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass,", "in self.rdatasets: if n == name: return True return False", "sign, offset, width = g3.groups() if sign == '': sign", "dns.exception.SyntaxError( \"Unknown zone file directive '\" + c + \"'\")", "self.current_origin = self.zone_origin self.last_ttl = 0 self.last_ttl_known = False if", "g3: mod, sign, offset, width = g3.groups() if sign ==", "(self.zone_origin, self.relativize, _) = \\ txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl", "parsed. self.default_ttl = rd.minimum self.default_ttl_known = True if ttl is", "self.default_ttl_known) = self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif token.is_comment():", "license # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # #", "= 0 width = 0 base = 'd' if base", "None. If not ``None``, the the TTL is forced to", "elif self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token) # Class if self.force_rdclass", "dns.ttl.from_text(default_ttl) if rdclass is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass", "dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import dns.transaction import dns.ttl import", "dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import dns.transaction import", "return self.rdclass def origin_information(self): if self.relativize: effective = dns.name.empty else:", "is_generate3.match(side) if g3: mod, sign, offset, width = g3.groups() if", "import sys import dns.exception import dns.name import dns.node import dns.rdataclass", "and the input must not specify an owner name. If", "to relativize to if *relativize* is ``True``. *relativize*, a bool.", "(C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use,", "= is_generate2.match(side) if g2: mod, sign, offset = g2.groups() if", "g3 = is_generate3.match(side) if g3: mod, sign, offset, width =", "name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) # TTL", "dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name,", "side): # Here we catch everything in '{' '}' in", "not specify a TTL. If ``None``, then a TTL may", "is not strictly # correct, but it is correct almost", "or ``None``. If not ``None``, then if the TTL is", "default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype, default_ttl=default_ttl) reader.read() return", "try: del self.rdatasets[(name, rdtype, covers)] except KeyError: pass def _name_exists(self,", "allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok = tok (self.zone_origin,", "rrsets = [] for (name, _, _), rdataset in self.rdatasets.items():", "text, possibly subject to restrictions. *text*, a file object or", "present for each RR. *default_ttl*, an ``int``, string, or ``None``.", "with manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader", "error\" ex = dns.exception.SyntaxError( \"%s:%d: %s\" % (filename, line_number, detail))", "because bind is inclusive and python is exclusive if lsign", "'d': raise NotImplementedError() return mod, sign, offset, width, base def", "lindex = i + int(loffset) elif lsign == '-': lindex", "try: rdtype = dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError( \"unknown rdatatype", "ttl self.last_ttl_known = True else: token = self._get_identifier() ttl =", "raise dns.exception.SyntaxError( \"bad origin in $INCLUDE\") else: new_origin = self.current_origin", "zone object. @raises dns.zone.NoSOA: No SOA RR was found at", "SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES #", "'d', so the last value # in the tuple _parse_modify", "= dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol() elif c == '$ORIGIN':", "names in rdata, as there is no IDNA zonefile format.", "except dns.exception.SyntaxError: raise except Exception: rdclass = self.zone_rdclass self.tok.unget(token) if", "specified in the input. If it is not specified, then", "self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name = self.last_name if not", "self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue break elif", "at the zone origin \"\"\" try: while 1: token =", "a TTL may be specified in the input. If it", "origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin", "*rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not ``None``, then", "self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad origin in", "rdtype == dns.rdatatype.SOA: # The pre-RFC2308 and pre-BIND9 behavior inherits", "value\") self.txn.add(name, ttl, rd) def _parse_modify(self, side): # Here we", "== dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not compatible with a", "or g3): mod = '' sign = '+' offset =", "sign == '': sign = '+' width = 0 base", "TTL is not forced and is not specified, then this", "must be present for each RR. *default_ttl*, an ``int``, string,", "None: raise dns.exception.SyntaxError(\"Missing default TTL value\") self.txn.add(name, ttl, rd) def", "ARISING OUT # OF OR IN CONNECTION WITH THE USE", "name == rdataset_name: rdatasets.append(rdataset) if len(rdatasets) == 0: return None", "except Exception: # All exceptions that occur in the processing", "@raises dns.zone.NoSOA: No SOA RR was found at the zone", "2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy,", "rdtype is not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin,", "'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif c", "# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE", "FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN", "self.current_origin = new_origin elif c == '$GENERATE': self._generate_line() else: raise", "if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known,", "is correct almost all of the time. # We convert", "No SOA RR was found at the zone origin @raises", "= self.tok.where() if detail is None: detail = \"syntax error\"", "1: token = self.tok.get() if token.is_eol_or_eof(): break def _get_identifier(self): token", "REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF #", "dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad", "used. if ``None``, then if the TTL is not forced", "None: rdclass = self.force_rdclass else: token = self._get_identifier() try: rdclass", "rhs.replace('$%s' % (rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name", "and its # documentation for any purpose with or without", "NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR", "in the form: ${offset[,width[,base]]}. # Make names g1 = is_generate1.match(side)", "build a zone object. @raises dns.zone.NoSOA: No SOA RR was", "and pre-BIND9 behavior inherits the zone default # TTL from", "= self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError return token def", "True else: token = self._get_identifier() ttl = None try: ttl", "Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype, default_ttl=default_ttl) reader.read()", "assert replacement is True return RRsetsReaderTransaction(self, True, False) def get_class(self):", "self.default_ttl_known = True self.tok.get_eol() elif c == '$ORIGIN': self.current_origin =", "if ttl is None: # if we didn't have a", "is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there are modifiers in the", "sys import dns.exception import dns.name import dns.node import dns.rdataclass import", "WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF", "self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) try: rd =", "= self.last_ttl self.tok.unget(token) # Class if self.force_rdclass is not None:", "to syntax errors so that we can emit # helpful", "its minimum. if ttl is None: raise dns.exception.SyntaxError(\"Missing default TTL", "# TTL from the SOA minttl if no $TTL statement", "owner name; dnspython does not do IDNA for names in", "except dns.ttl.BadTTL: if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl", "'<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl,", "AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR", "leading WS followed by EOL/EOF as if they were EOL/EOF.", "rhs [ comment ] \"\"\"Process one line containing the GENERATE", "currently only supports base 'd', so the last value #", "dns.exception.SyntaxError return token def _rr_line(self): \"\"\"Process one line from a", "and must be present in the input. *ttl*, an ``int``,", "# range lhs [ttl] [class] type rhs [ comment ]", "c == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone_origin is", "self.force_rdclass = force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while", "[] self.current_file = None self.allow_include = allow_include self.allow_directives = allow_directives", "not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) #", "Class if self.force_rdclass is not None: rdclass = self.force_rdclass else:", "= sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught exception {}: {}\".format(str(ty), str(va))) if", "distribute this software and its # documentation for any purpose", "forced to be the specified value and the input must", "Inc. # # Permission to use, copy, modify, and distribute", "= dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str): name = dns.name.from_text(name,", "self.tok.get_eol() elif c == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if", "idna_codec) if isinstance(name, str): name = dns.name.from_text(name, origin, idna_codec) if", "and distribute this software and its # documentation for any", "\\ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not compatible", "self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif", "i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name", "re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there", "rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is not zone's class\")", "hostname. These come after # the dollar sign. They are", "was found at the zone origin @raises dns.zone.NoNS: No NS", "- int(roffset) elif rsign == '+': rindex = i +", "ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement, read_only):", "# The pre-RFC2308 and pre-BIND9 behavior inherits the zone default", "dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl", "*ttl*, an ``int``, string, or None. If not ``None``, the", "simply allows the user to optionally type a class as", "= self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol()", "self.rdatasets: if n == name: return True return False def", "# in the tuple _parse_modify returns is ignored lmod, lsign,", "key in remove: del self.rdatasets[key] def _delete_rdataset(self, name, rdtype, covers):", "not specify a class. If ``None``, then the input may", "# Permission to use, copy, modify, and distribute this software", "elif token.value[0] == '$' and self.allow_directives: c = token.value.upper() if", "node and the rdataset is neutral, so # adding the", "a class. If ``None``, then the input may specify a", "neutral. return node_kind = node.classify() if node_kind == dns.node.NodeKind.CNAME and", "if self.relativize: effective = dns.name.empty else: effective = self.origin return", "# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE", "# Here we catch everything in '{' '}' in a", "default IDNA 2003 encoder/decoder is used. Note that codecs only", "# Class if self.force_rdclass is not None: rdclass = self.force_rdclass", "raise dns.exception.SyntaxError( \"caught exception {}: {}\".format(str(ty), str(va))) if not self.default_ttl_known", "- int(loffset) if rsign == '-': rindex = i -", "None: detail = \"syntax error\" ex = dns.exception.SyntaxError( \"%s:%d: %s\"", "manager, replacement, read_only): assert not read_only super().__init__(manager, replacement, read_only) self.rdatasets", "if not (self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default TTL value\")", "name is \"forced\", and the input must not specify an", "def read(self): \"\"\"Read a DNS zone file and build a", "file object or a string, is the input to process.", "if len(rdatasets) == 0: return None node = dns.node.Node() node.rdatasets", "Range (required) try: start, stop, step = dns.grange.from_text(token.value) token =", "self.default_ttl = rd.minimum self.default_ttl_known = True if ttl is None:", "If ``None``, then any owner names are allowed and must", "copyright notice and this permission notice # appear in all", "_put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset def _delete_name(self,", "not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception:", "rdtype, rdata, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and", "base != 'd': raise NotImplementedError() return mod, sign, offset, width,", "return rrsets with differing classes; specifying ``None`` for the class", "rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not compatible with", "then the input may specify a class that matches *default_rdclass*.", "any owner names are allowed and must be present in", "token = self.tok.get() if token.is_identifier(): new_origin =\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec)", "string, is the input to process. *name*, a string, ``dns.name.Name``,", "except dns.ttl.BadTTL: if not (self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default", "\"unknown rdatatype '%s'\" % token.value) try: rd = dns.rdata.from_text(rdclass, rdtype,", "try: rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize, self.zone_origin) except", "[] for key in self.rdatasets: if key[0] == name: remove.append(key)", "the origin to relativize to if *relativize* is ``True``. *relativize*,", "dns.grange.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except", "self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError", "dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not compatible with a '", "= self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise", "len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl,", "forced and is not specified, then this value will be", "sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception %s: %s\" % (str(ty), str(va))) self.txn.add(name,", "class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin", "origin, idna_codec) if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl,", "= self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError(", "# helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(", "zone file.\"\"\" token = None # Name if self.force_name is", "USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF", "it! ttl = rd.minimum # TTL check. We had to", "IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT", "node.classify() if node_kind == dns.node.NodeKind.CNAME and \\ rdataset_kind == dns.node.NodeKind.REGULAR:", "continue elif token.value[0] == '$' and self.allow_directives: c = token.value.upper()", "if sign == '': sign = '+' width = 0", "== '-': lindex = i - int(loffset) if rsign ==", "width = g3.groups() if sign == '': sign = '+'", "= i - int(loffset) if rsign == '-': rindex =", "except dns.exception.SyntaxError as detail: (filename, line_number) = self.tok.where() if detail", "if detail is None: detail = \"syntax error\" ex =", "str): name = dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str): ttl", "# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING", "= ttl self.last_ttl_known = True token = None except dns.ttl.BadTTL:", "comment ] \"\"\"Process one line containing the GENERATE statement from", "and rdtype == dns.rdatatype.SOA: # The pre-RFC2308 and pre-BIND9 behavior", "if self.force_rdtype is not None: rdtype = self.force_rdtype else: token", "will be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If", "name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node", "int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s'", "this value will be used. if ``None``, then if the", "in range(start, stop + 1, step): # +1 because bind", "bind is inclusive and python is exclusive if lsign ==", "= dns.grange.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError", "return node_kind = node.classify() if node_kind == dns.node.NodeKind.CNAME and \\", "elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] ==", "_get_identifier(self): token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError return", "a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If ``None``, the default", "UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has a CNAME", "rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset def _delete_name(self, name): #", "g2 = is_generate2.match(side) if g2: mod, sign, offset = g2.groups()", "rrsets from the specified text, possibly subject to restrictions. *text*,", "# We convert them to syntax errors so that we", "raise dns.exception.SyntaxError(\"RR class is not zone's class\") # Type if", "not self.default_ttl_known and rdtype == dns.rdatatype.SOA: # The pre-RFC2308 and", "The pre-RFC2308 and pre-BIND9 behavior inherits the zone default #", "a ``dns.rdataclass.RdataClass`` or string. The class of the returned rrsets.", "RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin self.relativize", "= self.zone_rdclass if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is", "dns.node.NodeKind.REGULAR and \\ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is", "if rdtype is not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager =", "AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,", "other data\"\"\" def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node", "False if force_ttl is not None: default_ttl = force_ttl if", "RRset was found at the zone origin \"\"\" try: while", "OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN", "involving the name remove = [] for key in self.rdatasets:", "default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read one or more rrsets from", "dns.rrset import dns.tokenizer import dns.transaction import dns.ttl import dns.grange class", "not compatible with a ' 'CNAME node') elif node_kind ==", "appending the *origin*. \"\"\" if isinstance(origin, str): origin = dns.name.from_text(origin,", "rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise", "forced to the specified value, and the input must not", "UnknownOrigin token = self.tok.get() # Range (required) try: start, stop,", "a bool. If ``True``, names are relativized to the *origin*;", "relativize=False): \"\"\"Read one or more rrsets from the specified text,", "if not token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known", "value and the input must not specify a TTL. If", "an ``int``, string, or None. If not ``None``, the the", "force_ttl self.force_rdclass = force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self):", "must be present in the input. *ttl*, an ``int``, string,", "must not specify a type. If ``None``, then a type", "def _name_exists(self, name): for (n, _, _) in self.rdatasets: if", "= self.origin return (self.origin, self.relativize, effective) def set_rrsets(self, rrsets): self.rrsets", "Contributors, see LICENSE for text of ISC license # Copyright", "``None``, then if the TTL is not forced and is", "token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # lhs (required)", "str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod), lzfindex)", "is present before the # SOA is parsed. self.default_ttl =", "followed by EOL/EOF as if they were EOL/EOF. return self.tok.unget(token)", "software and its # documentation for any purpose with or", "treated as syntax errors. This is not strictly # correct,", "dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif c == '$GENERATE': self._generate_line()", "= force_ttl self.force_rdclass = force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def", "return if self.relativize: name = name.relativize(self.zone_origin) # TTL if self.force_ttl", "None except dns.ttl.BadTTL: if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known:", "EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT,", "catch everything in '{' '}' in a group so we", "string. The class of the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``,", "IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO", "= [] for (name, _, _), rdataset in self.rdatasets.items(): rrset", "self.tok.get() if token.is_eol_or_eof(): # treat leading WS followed by EOL/EOF", "base def _generate_line(self): # range lhs [ttl] [class] type rhs", "for key in self.rdatasets: if key[0] == name: remove.append(key) if", "line from a DNS zone file.\"\"\" token = None #", "be used. if ``None``, then if the TTL is not", "is not None: rdclass = self.force_rdclass else: token = self._get_identifier()", "True self.last_name = self.current_origin self.zone_rdclass = rdclass self.txn = txn", "an error will occur if the TTL is not specified.", "None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement, read_only): assert not", "if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except", "if default_ttl is None: self.default_ttl = 0 self.default_ttl_known = False", "the TTL is not forced an error will occur if", "class is forced to the specified value, and the input", "the input. *ttl*, an ``int``, string, or None. If not", "= rd.minimum self.default_ttl_known = True if ttl is None: #", "NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR", "raise UnknownOrigin token = self.tok.get() # Range (required) try: start,", "self.relativize = relativize self.rdclass = rdclass self.rrsets = [] def", "is inclusive and python is exclusive if lsign == '+':", "string, ``dns.name.Name``, or ``None``, is the origin for any relative", "ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token) #", "string, or ``None``. If not ``None``, then the class is", "def _end_transaction(self, commit): if commit and self._changed(): rrsets = []", "``None``. If not ``None``, then if the TTL is not", "va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception %s: %s\" % (str(ty),", "and self.allow_include: token = self.tok.get() filename = token.value token =", "rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass class", "rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def", "RRsetsReaderTransaction(self, True, False) def get_class(self): return self.rdclass def origin_information(self): if", "a CNAME and other data\"\"\" def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind", "be inferred from its minimum. if ttl is None: raise", "specify a TTL. If ``None``, then a TTL may be", "raise dns.exception.SyntaxError except Exception: rdclass = self.zone_rdclass if rdclass !=", "SOA is parsed. self.default_ttl = rd.minimum self.default_ttl_known = True if", "import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import dns.transaction import dns.ttl", "commit): if commit and self._changed(): rrsets = [] for (name,", "def _generate_line(self): # range lhs [ttl] [class] type rhs [", "zonefile format. *origin*, a string, ``dns.name.Name``, or ``None``, is the", "import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset", "line_number) = self.tok.where() if detail is None: detail = \"syntax", "neutral, so # adding the rdataset is ok class Reader:", "EOL/EOF. return self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line()", "relativize to if *relativize* is ``True``. *relativize*, a bool. If", "correct almost all of the time. # We convert them", "*origin*. \"\"\" if isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root, idna_codec)", "replacement is True return RRsetsReaderTransaction(self, True, False) def get_class(self): return", "last value # in the tuple _parse_modify returns is ignored", "it # with ''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\")", "AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS", "self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof(): # treat leading", "do IDNA for names in rdata, as there is no", "= token.value # The code currently only supports base 'd',", "specify a class that matches *default_rdclass*. Note that it is", "self.current_origin self.txn._set_origin(self.current_origin) elif c == '$INCLUDE' and self.allow_include: token =", "PERFORMANCE OF THIS SOFTWARE. \"\"\"DNS Zones.\"\"\" import re import sys", "\\ txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl = 0 self.last_ttl_known =", "token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass", "token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl", "specify a class. If ``None``, then the input may specify", "# Catch and reraise. raise except Exception: # All exceptions", "the input must not specify a TTL. If ``None``, then", "dns.tokenizer import dns.transaction import dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown", "rrsets): self.rrsets = rrsets def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN,", "rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not", "rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise", "ttl = self.force_ttl self.last_ttl = ttl self.last_ttl_known = True else:", "not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # TTL", "(filename, line_number, detail)) tb = sys.exc_info()[2] raise ex.with_traceback(tb) from None", "'$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone_origin is None: self.zone_origin", "default_ttl=None): self.tok = tok (self.zone_origin, self.relativize, _) = \\ txn.manager.origin_information()", "elif c == '$INCLUDE' and self.allow_include: token = self.tok.get() filename", "self.rdatasets[(name, rdtype, covers)] except KeyError: pass def _name_exists(self, name): for", "detail = \"syntax error\" ex = dns.exception.SyntaxError( \"%s:%d: %s\" %", "is hereby granted, # provided that the above copyright notice", "Note that codecs only apply to the owner name; dnspython", "# TTL try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known", "ex = dns.exception.SyntaxError( \"%s:%d: %s\" % (filename, line_number, detail)) tb", "self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError( \"unknown", "effective) def set_rrsets(self, rrsets): self.rrsets = rrsets def read_rrsets(text, name=None,", "on the SOA, set it! ttl = rd.minimum # TTL", "compatible with a ' 'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR", "a TTL. If ``None``, then a TTL may be specified", "= self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: if", "key[0] == name: remove.append(key) if len(remove) > 0: for key", "\"bad origin in $INCLUDE\") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin,", "if rsign == '-': rindex = i - int(roffset) elif", "the type is forced to the specified value, and the", "no IDNA zonefile format. *origin*, a string, ``dns.name.Name``, or ``None``,", "a string, ``dns.name.Name``, or ``None``, is the owner name of", "is forced to the specified value, and the input must", "self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token) # Class if", "= self.zone_rdclass self.tok.unget(token) if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class", "zone file directive '\" + c + \"'\") continue self.tok.unget(token)", "= True self.last_name = self.current_origin self.zone_rdclass = rdclass self.txn =", "'\" + c + \"'\") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError", "classes; specifying ``None`` for the class simply allows the user", "# Make names g1 = is_generate1.match(side) if g1: mod, sign,", "*default_ttl* will be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``.", "self.tok = tok (self.zone_origin, self.relativize, _) = \\ txn.manager.origin_information() self.current_origin", "Exception: raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) self.last_ttl", "is \"forced\", and the input must not specify an owner", "lsign, loffset, lwidth, _ = self._parse_modify(lhs) rmod, rsign, roffset, rwidth,", "# First remove any changes involving the name remove =", "name remove = [] for key in self.rdatasets: if key[0]", "None: ttl = self.force_ttl self.last_ttl = ttl self.last_ttl_known = True", "if self.current_file is not None: self.current_file.close() if len(self.saved_state) > 0:", "class simply allows the user to optionally type a class", "or ``None``. If not ``None``, then the class is forced", "force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok = tok (self.zone_origin, self.relativize, _) =", "dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except", "${offset[,width[,base]]}. # Make names g1 = is_generate1.match(side) if g1: mod,", "OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE", "These come after # the dollar sign. They are in", "and is not specified, then this value will be used.", "c = token.value.upper() if c == '$TTL': token = self.tok.get()", "(filename, line_number) = self.tok.where() if detail is None: detail =", "a transaction.\"\"\" def __init__(self, tok, rdclass, txn, allow_include=False, allow_directives=True, force_name=None,", "node.rdatasets = rdatasets return node def _put_rdataset(self, name, rdataset): self.rdatasets[(name,", "dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol() elif", "provided that the above copyright notice and this permission notice", "= '+' base = 'd' if not (g1 or g2", "base = 'd' if base != 'd': raise NotImplementedError() return", "return if self.relativize: name = name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass,", "the above copyright notice and this permission notice # appear", "self.last_ttl = ttl self.last_ttl_known = True token = self.tok.get() if", "TTL is forced to be the specified value and the", "0 width = 0 base = 'd' if base !=", "that may be convenient when cutting and pasting. *default_rdclass*, a", "dns.ttl.BadTTL: if not (self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default TTL", "self.rdclass = rdclass self.rrsets = [] def writer(self, replacement=False): assert", "origin=dns.name.root, relativize=False): \"\"\"Read one or more rrsets from the specified", "(str(ty), str(va))) self.txn.add(name, ttl, rd) def read(self): \"\"\"Read a DNS", "GENERATE statement from a DNS zone file.\"\"\" if self.current_origin is", "class\") # Type try: rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get()", "self.rdatasets.items(): if name == rdataset_name: rdatasets.append(rdataset) if len(rdatasets) == 0:", "force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok = tok (self.zone_origin, self.relativize, _)", "not zone's class\") # Type if self.force_rdtype is not None:", "ttl, rd) def _parse_modify(self, side): # Here we catch everything", "and the rdataset is neutral, so # adding the rdataset", "not None: default_ttl = force_ttl if default_ttl is None: self.default_ttl", "= self.zone_origin self.last_ttl = 0 self.last_ttl_known = False if force_ttl", "sign == '': sign = '+' g2 = is_generate2.match(side) if", "if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: if not (self.last_ttl_known", "value, and the input must not specify a type. If", "data node') # Otherwise at least one of the node", "convert them to syntax errors so that we can emit", "if self.zone_origin is None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif c", "``None`` for the class simply allows the user to optionally", "raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # lhs (required) try:", "\"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has a CNAME and", "= re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there are modifiers", "the input are made absolute by appending the *origin*. \"\"\"", "self.current_origin self.zone_rdclass = rdclass self.txn = txn self.saved_state = []", "re import sys import dns.exception import dns.name import dns.node import", "All exceptions that occur in the processing of rdata #", "own TTL can be inferred from its minimum. if ttl", "self._parse_modify(lhs) rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs) for i", "= origin self.relativize = relativize self.rdclass = rdclass self.rrsets =", "return (self.origin, self.relativize, effective) def set_rrsets(self, rrsets): self.rrsets = rrsets", "import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import dns.transaction", "a zone object. @raises dns.zone.NoSOA: No SOA RR was found", "if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is not zone's", "self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset def _delete_name(self, name): # First", "dns.rdatatype.SOA: # The pre-RFC2308 and pre-BIND9 behavior inherits the zone", "self.last_name = self.current_origin self.zone_rdclass = rdclass self.txn = txn self.saved_state", "is not None: name = self.force_name else: if self.current_origin is", "return mod, sign, offset, width, base def _generate_line(self): # range", "and self.allow_directives: c = token.value.upper() if c == '$TTL': token", "found at the zone origin \"\"\" try: while 1: token", "try: rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except Exception: rdclass", "code currently only supports base 'd', so the last value", "if g3: mod, sign, offset, width = g3.groups() if sign", "else: self.default_ttl = default_ttl self.default_ttl_known = True self.last_name = self.current_origin", "class of the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or", "self.relativize: name = name.relativize(self.zone_origin) # TTL if self.force_ttl is not", "'+': lindex = i + int(loffset) elif lsign == '-':", "as syntax errors. This is not strictly # correct, but", "'{' '}' in a group so we can replace it", "idna_codec) if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str):", "TTL try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known =", "is not None: ttl = self.force_ttl self.last_ttl = ttl self.last_ttl_known", "break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0]", "= 'd' if base != 'd': raise NotImplementedError() return mod,", "self._rr_line() except dns.exception.SyntaxError as detail: (filename, line_number) = self.tok.where() if", "dns.zone.NoNS: No NS RRset was found at the zone origin", "info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught exception {}:", "key in self.rdatasets: if key[0] == name: remove.append(key) if len(remove)", "from None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement, read_only): assert", "the input, and also the origin to relativize to if", "not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # lhs", "== dns.node.NodeKind.CNAME and \\ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type", "[] for (name, _, _), rdataset in self.rdatasets.items(): rrset =", "# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,", "appear in all copies. # # THE SOFTWARE IS PROVIDED", "not possible to return rrsets with differing classes; specifying ``None``", "'-': lindex = i - int(loffset) if rsign == '-':", "class is not zone's class\") # Type if self.force_rdtype is", "\\ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not compatible", "made absolute by appending the *origin*. \"\"\" if isinstance(origin, str):", "string, or None. If not ``None``, the the TTL is", "transaction.\"\"\" def __init__(self, tok, rdclass, txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None,", "self.rdatasets.get((name, rdtype, covers)) def _get_node(self, name): rdatasets = [] for", "raise dns.exception.SyntaxError( \"unknown rdatatype '%s'\" % token.value) try: rd =", "self.zone_rdclass if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is not", "THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND", "self.default_ttl = default_ttl self.default_ttl_known = True self.last_name = self.current_origin self.zone_rdclass", "(self.origin, self.relativize, effective) def set_rrsets(self, rrsets): self.rrsets = rrsets def", "start, stop, step = dns.grange.from_text(token.value) token = self.tok.get() if not", "the zone origin @raises dns.zone.NoNS: No NS RRset was found", "``None``. If not ``None``, then the type is forced to", "= token.value token = self.tok.get() if token.is_identifier(): new_origin =\\ dns.name.from_text(token.value,", "rdtype, covers): return self.rdatasets.get((name, rdtype, covers)) def _get_node(self, name): rdatasets", "token = self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise", "rdtype, covers)] except KeyError: pass def _name_exists(self, name): for (n,", "None: name = self.force_name else: if self.current_origin is None: raise", "+ int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name =", "a ' 'regular data node') # Otherwise at least one", "if not token.is_identifier(): raise dns.exception.SyntaxError return token def _rr_line(self): \"\"\"Process", "2009-2011 Nominum, Inc. # # Permission to use, copy, modify,", "= txn.get_node(name) if node is None: # empty nodes are", "1, step): # +1 because bind is inclusive and python", "a DNS zone file and build a zone object. @raises", "is ok class Reader: \"\"\"Read a DNS zone file into", "_get_node(self, name): rdatasets = [] for (rdataset_name, _, _), rdataset", "SOA minttl if no $TTL statement is present before the", "g2.groups() if sign == '': sign = '+' width =", "bool. If ``True``, names are relativized to the *origin*; if", "not (self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default TTL value\") if", "be specified in the input. If it is not specified,", "lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' %", "DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING", "raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # TTL try: ttl", "owner name is \"forced\", and the input must not specify", "filename = token.value token = self.tok.get() if token.is_identifier(): new_origin =\\", "= self.force_ttl self.last_ttl = ttl self.last_ttl_known = True else: token", "and the input must not specify a TTL. If ``None``,", "= True token = None except dns.ttl.BadTTL: if self.default_ttl_known: ttl", "$TTL statement is present before the # SOA is parsed.", "*default_ttl*, an ``int``, string, or ``None``. If not ``None``, then", "c == '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError( \"Unknown zone file", "zone's class\") # Type if self.force_rdtype is not None: rdtype", "(g1 or g2 or g3): mod = '' sign =", "OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS", "self.tok.as_name(token, self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof(): # treat", "not token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad origin in $INCLUDE\") else: new_origin", "self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1: token =", "self.rrsets = rrsets def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None,", "reraise. raise except Exception: # All exceptions that occur in", "self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: if not", "' 'regular data node') # Otherwise at least one of", "token.value) try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin)", "return self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return", "super().__init__(manager, replacement, read_only) self.rdatasets = {} def _get_rdataset(self, name, rdtype,", "*origin*, a string, ``dns.name.Name``, or ``None``, is the origin for", "# appear in all copies. # # THE SOFTWARE IS", "SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS.", "type is forced to the specified value, and the input", "or ``None``, is the origin for any relative names in", "the zone default # TTL from the SOA minttl if", "name = name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin,", "for any purpose with or without fee is hereby granted,", "dns.exception.SyntaxError except dns.ttl.BadTTL: if not (self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing", "= g1.groups() if sign == '': sign = '+' g2", "set_rrsets(self, rrsets): self.rrsets = rrsets def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN,", "is ignored lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs) rmod,", "self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl = dns.ttl.from_text(token.value)", "try: rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_identifier():", "dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError( \"unknown rdatatype '%s'\" % token.value)", "the input must not specify an owner name. If ``None``,", "OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION", "in the input, and also the origin to relativize to", "+1 because bind is inclusive and python is exclusive if", "raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol()", "\"Unknown zone file directive '\" + c + \"'\") continue", "were EOL/EOF. return self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone_origin):", "*text*, a file object or a string, is the input", "= Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype, default_ttl=default_ttl)", "class Reader: \"\"\"Read a DNS zone file into a transaction.\"\"\"", "default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read one or more", "0: (self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) =", "pass def _name_exists(self, name): for (n, _, _) in self.rdatasets:", "self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise. raise except", "specified, then the *default_ttl* will be used. *rdclass*, a ``dns.rdataclass.RdataClass``,", "= dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node is None: #", "syntax errors. This is not strictly # correct, but it", "specify an owner name. If ``None``, then any owner names", "self.zone_origin is None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif c ==", "can be inferred from its minimum. if ttl is None:", "1: token = self.tok.get(True, True) if token.is_eof(): if self.current_file is", "the TTL is not forced and is not specified, then", "error will occur if the TTL is not specified. *idna_codec*,", "in self.rdatasets: if key[0] == name: remove.append(key) if len(remove) >", "are allowed and must be present in the input. *ttl*,", "is not specified, then this value will be used. if", "*default_rdclass*. Note that it is not possible to return rrsets", "_parse_modify returns is ignored lmod, lsign, loffset, lwidth, _ =", "NS RRset was found at the zone origin \"\"\" try:", "elif node_kind == dns.node.NodeKind.REGULAR and \\ rdataset_kind == dns.node.NodeKind.CNAME: raise", "self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file", "i - int(roffset) elif rsign == '+': rindex = i", "== '+': rindex = i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth))", "= \"syntax error\" ex = dns.exception.SyntaxError( \"%s:%d: %s\" % (filename,", "= self.tok.get() # Range (required) try: start, stop, step =", "return node def _put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] =", "THE USE OR PERFORMANCE OF THIS SOFTWARE. \"\"\"DNS Zones.\"\"\" import", "0 base = 'd' g3 = is_generate3.match(side) if g3: mod,", "a string, ``dns.name.Name``, or ``None``, is the origin for any", "is the input to process. *name*, a string, ``dns.name.Name``, or", "force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1: token", "Dnspython Contributors, see LICENSE for text of ISC license #", "CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \"\"\"DNS", "file.\"\"\" if self.current_origin is None: raise UnknownOrigin token = self.tok.get()", "node_kind = node.classify() if node_kind == dns.node.NodeKind.CNAME and \\ rdataset_kind", "modify, and distribute this software and its # documentation for", "'$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError( \"Unknown zone file directive '\"", "or g2 or g3): mod = '' sign = '+'", "rdclass, txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok", "mod, sign, offset, width, base def _generate_line(self): # range lhs", "that matches *default_rdclass*. Note that it is not possible to", "False else: self.default_ttl = default_ttl self.default_ttl_known = True self.last_name =", "to process. *name*, a string, ``dns.name.Name``, or ``None``, is the", "file.\"\"\" token = None # Name if self.force_name is not", "in $INCLUDE\") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file,", "default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None: rdtype =", "force_name self.force_ttl = force_ttl self.force_rdclass = force_rdclass self.force_rdtype = force_rdtype", "Permission to use, copy, modify, and distribute this software and", "ttl = self.last_ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token", "if sign == '': sign = '+' g2 = is_generate2.match(side)", "the owner name of the rrset. If not ``None``, then", "text of ISC license # Copyright (C) 2003-2007, 2009-2011 Nominum,", "= dns.node.Node() node.rdatasets = rdatasets return node def _put_rdataset(self, name,", "mod = '' sign = '+' offset = 0 width", "0 def _end_transaction(self, commit): if commit and self._changed(): rrsets =", "None self.allow_include = allow_include self.allow_directives = allow_directives self.force_name = force_name", "allow_include self.allow_directives = allow_directives self.force_name = force_name self.force_ttl = force_ttl", "RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement, read_only): assert not read_only super().__init__(manager,", "no $TTL statement is present before the # SOA is", "detail: (filename, line_number) = self.tok.where() if detail is None: detail", "dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass is", "rsign, roffset, rwidth, _ = self._parse_modify(rhs) for i in range(start,", "Nominum, Inc. # # Permission to use, copy, modify, and", "Exception: rdclass = self.zone_rdclass self.tok.unget(token) if rdclass != self.zone_rdclass: raise", "remove: del self.rdatasets[key] def _delete_rdataset(self, name, rdtype, covers): try: del", "if self.force_name is not None: name = self.force_name else: if", "is not specified, then the *default_ttl* will be used. *rdclass*,", "name: remove.append(key) if len(remove) > 0: for key in remove:", "for i in range(start, stop + 1, step): # +1", "a type. If ``None``, then a type must be present", "else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known,", "if c == '$TTL': token = self.tok.get() if not token.is_identifier():", "replacement, read_only): assert not read_only super().__init__(manager, replacement, read_only) self.rdatasets =", "one line containing the GENERATE statement from a DNS zone", "rindex = i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex =", "self.default_ttl_known = False else: self.default_ttl = default_ttl self.default_ttl_known = True", "dns.exception.SyntaxError as detail: (filename, line_number) = self.tok.where() if detail is", "emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise", "_), rdataset in self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers)", "self._generate_line() else: raise dns.exception.SyntaxError( \"Unknown zone file directive '\" +", "default_rdclass) with manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec)", "self.zone_origin self.last_ttl = 0 self.last_ttl_known = False if force_ttl is", "WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER", "= \\ txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl = 0 self.last_ttl_known", "self.force_ttl = force_ttl self.force_rdclass = force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data)", "self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol() elif c ==", "relative names in the input, and also the origin to", "wait until now to do this as the SOA RR's", "mod, sign, offset, width = g3.groups() if sign == '':", "value\") if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl =", "= relativize self.rdclass = rdclass self.rrsets = [] def writer(self,", "replacement, read_only) self.rdatasets = {} def _get_rdataset(self, name, rdtype, covers):", "``None``. If not ``None``, then the class is forced to", "file into a transaction.\"\"\" def __init__(self, tok, rdclass, txn, allow_include=False,", "self._changed(): rrsets = [] for (name, _, _), rdataset in", "(required) try: lhs = token.value token = self.tok.get() if not", "\"\"\"Read a DNS zone file and build a zone object.", "raise ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement,", "self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif c == '$INCLUDE' and self.allow_include:", "re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there are modifiers in", "_delete_name(self, name): # First remove any changes involving the name", "= 0 base = 'd' g3 = is_generate3.match(side) if g3:", "g2: mod, sign, offset = g2.groups() if sign == '':", "rwidth, _ = self._parse_modify(rhs) for i in range(start, stop +", "_changed(self): return len(self.rdatasets) > 0 def _end_transaction(self, commit): if commit", "if ``False`` then any relative names in the input are", "value # in the tuple _parse_modify returns is ignored lmod,", "' 'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR and \\ rdataset_kind", "is_generate1.match(side) if g1: mod, sign, offset, width, base = g1.groups()", "= dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif c == '$GENERATE':", "a DNS zone file.\"\"\" token = None # Name if", "with or without fee is hereby granted, # provided that", "They are in the form: ${offset[,width[,base]]}. # Make names g1", "self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file = open(filename, 'r') self.tok =", "then a type must be present for each RR. *default_ttl*,", "dns.exception.SyntaxError except Exception: rdclass = self.zone_rdclass if rdclass != self.zone_rdclass:", "this permission notice # appear in all copies. # #", "name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize:", "rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not compatible with", "that we can emit # helpful filename:line info. (ty, va)", "self.current_origin is None: raise UnknownOrigin token = self.tok.get() # Range", "ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL", "__init__(self, manager, replacement, read_only): assert not read_only super().__init__(manager, replacement, read_only)", "a ' 'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR and \\", "IDNA 2003 encoder/decoder is used. Note that codecs only apply", "rdtype, covers): try: del self.rdatasets[(name, rdtype, covers)] except KeyError: pass", "''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\")", "lhs = token.value token = self.tok.get() if not token.is_identifier(): raise", "raise dns.exception.SyntaxError(\"caught exception %s: %s\" % (str(ty), str(va))) self.txn.add(name, ttl,", "them to syntax errors so that we can emit #", "relativize, default_rdclass) with manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text, '<input>',", "read_only super().__init__(manager, replacement, read_only) self.rdatasets = {} def _get_rdataset(self, name,", "(ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception %s: %s\" %", "= dns.exception.SyntaxError( \"%s:%d: %s\" % (filename, line_number, detail)) tb =", "self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file = open(filename, 'r')", "import dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException):", "% (rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name =", "$TTL\") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol() elif c", "rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs) for i in", "string, or ``None``. If not ``None``, then if the TTL", "# lhs (required) try: lhs = token.value token = self.tok.get()", "class that matches *default_rdclass*. Note that it is not possible", "not token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin) else: token = self.tok.get()", "at least one of the node and the rdataset is", "'$' and self.allow_directives: c = token.value.upper() if c == '$TTL':", "``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If ``None``, the default IDNA", "into a transaction.\"\"\" def __init__(self, tok, rdclass, txn, allow_include=False, allow_directives=True,", "= True else: token = self._get_identifier() ttl = None try:", "If ``None``, the default IDNA 2003 encoder/decoder is used. Note", "any purpose with or without fee is hereby granted, #", "is None: raise UnknownOrigin token = self.tok.get() # Range (required)", "``None``, then the owner name is \"forced\", and the input", "the # SOA is parsed. self.default_ttl = rd.minimum self.default_ttl_known =", "CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has a CNAME and other data\"\"\" def", "self.tok.unget(token) if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is not", "raise CNAMEAndOtherData('rdataset type is not compatible with a ' 'CNAME", "input must not specify a type. If ``None``, then a", "= re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes", "info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception %s: %s\"", "the rrset. If not ``None``, then the owner name is", "``None``, then a type must be present for each RR.", "__init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin self.relativize = relativize", "self.allow_directives = allow_directives self.force_name = force_name self.force_ttl = force_ttl self.force_rdclass", "import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer", "read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False):", "possible to return rrsets with differing classes; specifying ``None`` for", "can emit # helpful filename:line info. (ty, va) = sys.exc_info()[:2]", "KeyError: pass def _name_exists(self, name): for (n, _, _) in", "lindex = i - int(loffset) if rsign == '-': rindex", "self.last_ttl_known = True token = self.tok.get() if not token.is_identifier(): raise", "default_ttl = dns.ttl.from_text(default_ttl) if rdclass is not None: rdclass =", "if isinstance(name, str): name = dns.name.from_text(name, origin, idna_codec) if isinstance(ttl,", "not ``None``, then the class is forced to the specified", "If not ``None``, then the owner name is \"forced\", and", "'+': rindex = i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex", "(self.last_ttl_known or self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default TTL value\") if self.default_ttl_known:", "TTL may be specified in the input. If it is", "self.rdatasets: if key[0] == name: remove.append(key) if len(remove) > 0:", "in rdata, as there is no IDNA zonefile format. *origin*,", "== dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset is not compatible with a", "sign, offset, width, base = g1.groups() if sign == '':", "self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token)", "raise dns.exception.SyntaxError(\"Missing default TTL value\") if self.default_ttl_known: ttl = self.default_ttl", "str(va))) if not self.default_ttl_known and rdtype == dns.rdatatype.SOA: # The", "= dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None: rdtype = dns.rdatatype.RdataType.make(rdtype)", "in '{' '}' in a group so we can replace", "origin @raises dns.zone.NoNS: No NS RRset was found at the", "# treat leading WS followed by EOL/EOF as if they", "input. If it is not specified, then the *default_ttl* will", "correct, but it is correct almost all of the time.", "rdata # are treated as syntax errors. This is not", "dns.exception.SyntaxError: # Catch and reraise. raise except Exception: # All", "\"'\") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as detail: (filename, line_number)", "= str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod),", "specify a type. If ``None``, then a type must be", "elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$' and self.allow_directives:", "True token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except", "_), rdataset in self.rdatasets.items(): if name == rdataset_name: rdatasets.append(rdataset) if", "dns.exception.SyntaxError(\"Missing default TTL value\") if self.default_ttl_known: ttl = self.default_ttl elif", "dns.transaction import dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class", "\"\"\"Process one line from a DNS zone file.\"\"\" token =", "origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has a CNAME and other", "to do this as the SOA RR's # own TTL", "CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS", "OR PERFORMANCE OF THIS SOFTWARE. \"\"\"DNS Zones.\"\"\" import re import", "rdataset.rdtype, rdataset.covers)] = rdataset def _delete_name(self, name): # First remove", "zone origin \"\"\" try: while 1: token = self.tok.get(True, True)", "False) def get_class(self): return self.rdclass def origin_information(self): if self.relativize: effective", "*origin*; if ``False`` then any relative names in the input", "[] for (rdataset_name, _, _), rdataset in self.rdatasets.items(): if name", "as the SOA RR's # own TTL can be inferred", "specified value and the input must not specify a TTL.", "INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN", "n == name: return True return False def _changed(self): return", "def _rr_line(self): \"\"\"Process one line from a DNS zone file.\"\"\"", "If not ``None``, then the type is forced to the", "OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE,", "token.is_eol_or_eof(): break def _get_identifier(self): token = self.tok.get() if not token.is_identifier():", "self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue", "= [] def writer(self, replacement=False): assert replacement is True return", "if token.is_identifier(): new_origin =\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not", "'regular data node') # Otherwise at least one of the", "raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" % token.value) # rhs (required) rhs", "if token.is_eol_or_eof(): break def _get_identifier(self): token = self.tok.get() if not", "token.value.upper() if c == '$TTL': token = self.tok.get() if not", "If ``None``, then a TTL may be specified in the", "if *relativize* is ``True``. *relativize*, a bool. If ``True``, names", "token.is_eol_or_eof(): # treat leading WS followed by EOL/EOF as if", "self.tok.get_name() self.tok.get_eol() if self.zone_origin is None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin)", "0: for key in remove: del self.rdatasets[key] def _delete_rdataset(self, name,", "are neutral. return node_kind = node.classify() if node_kind == dns.node.NodeKind.CNAME", "name.relativize(self.zone_origin) # TTL if self.force_ttl is not None: ttl =", "True, False) def get_class(self): return self.rdclass def origin_information(self): if self.relativize:", "self.txn = txn self.saved_state = [] self.current_file = None self.allow_include", "only apply to the owner name; dnspython does not do", "names in the input are made absolute by appending the", "RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN", "name = lhs.replace('$%s' % (lmod), lzfindex) rdata = rhs.replace('$%s' %", "is not zone's class\") # Type if self.force_rdtype is not", "not read_only super().__init__(manager, replacement, read_only) self.rdatasets = {} def _get_rdataset(self,", "returns is ignored lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs)", "the TTL is not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the", "that codecs only apply to the owner name; dnspython does", "# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES", "return True return False def _changed(self): return len(self.rdatasets) > 0", "filename) self.current_origin = new_origin elif c == '$GENERATE': self._generate_line() else:", "sign = '+' base = 'd' if not (g1 or", "processing of rdata # are treated as syntax errors. This", "TTL value\") self.txn.add(name, ttl, rd) def _parse_modify(self, side): # Here", "it is not specified, then the *default_ttl* will be used.", "zone default # TTL from the SOA minttl if no", "a TTL on the SOA, set it! ttl = rd.minimum", "type a class as that may be convenient when cutting", "not (g1 or g2 or g3): mod = '' sign", "(n, _, _) in self.rdatasets: if n == name: return", "PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH", "rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except Exception: rdclass =", "exceptions that occur in the processing of rdata # are", "= is_generate3.match(side) if g3: mod, sign, offset, width = g3.groups()", "# Otherwise at least one of the node and the", "Type try: rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if not", "a DNS zone file.\"\"\" if self.current_origin is None: raise UnknownOrigin", "for text of ISC license # Copyright (C) 2003-2007, 2009-2011", "the rdataset is ok class Reader: \"\"\"Read a DNS zone", "from its minimum. if ttl is None: raise dns.exception.SyntaxError(\"Missing default", "sign, offset, width, base def _generate_line(self): # range lhs [ttl]", "rdclass=dns.rdataclass.IN): self.origin = origin self.relativize = relativize self.rdclass = rdclass", "rdataset_name: rdatasets.append(rdataset) if len(rdatasets) == 0: return None node =", "is None: self.default_ttl = 0 self.default_ttl_known = False else: self.default_ttl", "the *origin*. \"\"\" if isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root,", "(C) Dnspython Contributors, see LICENSE for text of ISC license", "self.zone_rdclass = rdclass self.txn = txn self.saved_state = [] self.current_file", "containing the GENERATE statement from a DNS zone file.\"\"\" if", "that occur in the processing of rdata # are treated", "sign = '+' offset = 0 width = 0 base", "else: effective = self.origin return (self.origin, self.relativize, effective) def set_rrsets(self,", "this as the SOA RR's # own TTL can be", "= dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True token =", "None: self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file,", "!= 'd': raise NotImplementedError() return mod, sign, offset, width, base", "if the TTL is not forced and is not specified,", "dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as txn:", "is not forced and is not specified, then this value", "except Exception: rdclass = self.zone_rdclass if rdclass != self.zone_rdclass: raise", "``None``, the the TTL is forced to be the specified", "len(self.rdatasets) > 0 def _end_transaction(self, commit): if commit and self._changed():", "not None: self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name,", "not None: rdtype = self.force_rdtype else: token = self._get_identifier() try:", "self.last_ttl_known: ttl = self.last_ttl # Class try: rdclass = dns.rdataclass.from_text(token.value)", "there is no IDNA zonefile format. *origin*, a string, ``dns.name.Name``,", "token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError:", "a DNS zone file into a transaction.\"\"\" def __init__(self, tok,", "(required) rhs = token.value # The code currently only supports", "= self._parse_modify(rhs) for i in range(start, stop + 1, step):", "name. If ``None``, then any owner names are allowed and", "(lmod), lzfindex) rdata = rhs.replace('$%s' % (rmod), rzfindex) self.last_name =", "DNS zone file and build a zone object. @raises dns.zone.NoSOA:", "are treated as syntax errors. This is not strictly #", "filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception %s:", "self.default_ttl, self.default_ttl_known)) self.current_file = open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename)", "None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif c == '$INCLUDE' and", "try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier():", "elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad origin in $INCLUDE\") else:", "txn.get_node(name) if node is None: # empty nodes are neutral.", "txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass,", "covers): return self.rdatasets.get((name, rdtype, covers)) def _get_node(self, name): rdatasets =", "token.value) # rhs (required) rhs = token.value # The code", "self.last_ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get()", "has a CNAME and other data\"\"\" def _check_cname_and_other_data(txn, name, rdataset):", "but it is correct almost all of the time. #", "remove.append(key) if len(remove) > 0: for key in remove: del", "self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError(\"unknown", "input must not specify a TTL. If ``None``, then a", "if node_kind == dns.node.NodeKind.CNAME and \\ rdataset_kind == dns.node.NodeKind.REGULAR: raise", "a ``dns.rdatatype.RdataType``, string, or ``None``. If not ``None``, then the", "modifiers in the hostname. These come after # the dollar", "is None: raise dns.exception.SyntaxError(\"Missing default TTL value\") self.txn.add(name, ttl, rd)", "ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True token", "class. If ``None``, then the input may specify a class", "# helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught", "sign, offset = g2.groups() if sign == '': sign =", "self.current_origin, self.tok.idna_codec) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return", "# The code currently only supports base 'd', so the", "the owner name; dnspython does not do IDNA for names", "If ``True``, names are relativized to the *origin*; if ``False``", "class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has a", "dns.exception.SyntaxError(\"Missing default TTL value\") self.txn.add(name, ttl, rd) def _parse_modify(self, side):", "token.value[0] == '$' and self.allow_directives: c = token.value.upper() if c", "self.origin = origin self.relativize = relativize self.rdclass = rdclass self.rrsets", "rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read one or more rrsets", "= is_generate1.match(side) if g1: mod, sign, offset, width, base =", "in all copies. # # THE SOFTWARE IS PROVIDED \"AS", "sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught exception {}: {}\".format(str(ty), str(va))) if not", "TTL is not forced an error will occur if the", "granted, # provided that the above copyright notice and this", "= [] self.current_file = None self.allow_include = allow_include self.allow_directives =", "tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn,", "lsign == '-': lindex = i - int(loffset) if rsign", "c == '$TTL': token = self.tok.get() if not token.is_identifier(): raise", "self.rdatasets[key] def _delete_rdataset(self, name, rdtype, covers): try: del self.rdatasets[(name, rdtype,", "in the processing of rdata # are treated as syntax", "width = 0 base = 'd' if base != 'd':", "# All exceptions that occur in the processing of rdata", "helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught", "{}\".format(str(ty), str(va))) if not self.default_ttl_known and rdtype == dns.rdatatype.SOA: #", "name, rdtype, covers): return self.rdatasets.get((name, rdtype, covers)) def _get_node(self, name):", "] \"\"\"Process one line containing the GENERATE statement from a", "specified value, and the input must not specify a class.", "== dns.node.NodeKind.REGULAR and \\ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME rdataset", "one of the node and the rdataset is neutral, so", "so # adding the rdataset is ok class Reader: \"\"\"Read", "= rdclass self.txn = txn self.saved_state = [] self.current_file =", "import dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node", "if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError(\"unknown rdatatype", "elif self.last_ttl_known: ttl = self.last_ttl # Class try: rdclass =", "EOL/EOF as if they were EOL/EOF. return self.tok.unget(token) name =", "= dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except Exception: rdclass = self.zone_rdclass", "the input may specify a class that matches *default_rdclass*. Note", "self.allow_include: token = self.tok.get() filename = token.value token = self.tok.get()", "return RRsetsReaderTransaction(self, True, False) def get_class(self): return self.rdclass def origin_information(self):", "not None: name = self.force_name else: if self.current_origin is None:", "type rhs [ comment ] \"\"\"Process one line containing the", "is None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif c == '$INCLUDE'", "differing classes; specifying ``None`` for the class simply allows the", "def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root,", "group so we can replace it # with ''. is_generate1", "return False def _changed(self): return len(self.rdatasets) > 0 def _end_transaction(self,", "node is None: # empty nodes are neutral. return node_kind", "Name if self.force_name is not None: name = self.force_name else:", "``int``, string, or ``None``. If not ``None``, then if the", "``None``, then the input may specify a class that matches", "TTL. If ``None``, then a TTL may be specified in", "is ``True``. *relativize*, a bool. If ``True``, names are relativized", "rd) def _parse_modify(self, side): # Here we catch everything in", "present in the input. *ttl*, an ``int``, string, or None.", "to the specified value, and the input must not specify", "Zones.\"\"\" import re import sys import dns.exception import dns.name import", "can replace it # with ''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2", "self.force_name is not None: name = self.force_name else: if self.current_origin", "token = self.tok.get() filename = token.value token = self.tok.get() if", "then the *default_ttl* will be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string,", "rdclass self.rrsets = [] def writer(self, replacement=False): assert replacement is", "\"%s:%d: %s\" % (filename, line_number, detail)) tb = sys.exc_info()[2] raise", "import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA", "for names in rdata, as there is no IDNA zonefile", "= rd.minimum # TTL check. We had to wait until", "as txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok,", "# are treated as syntax errors. This is not strictly", "for the class simply allows the user to optionally type", "def _get_node(self, name): rdatasets = [] for (rdataset_name, _, _),", "token.value # The code currently only supports base 'd', so", "DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF", "cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class", "ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA", "rsign == '-': rindex = i - int(roffset) elif rsign", "from a DNS zone file.\"\"\" if self.current_origin is None: raise", "== 0: return None node = dns.node.Node() node.rdatasets = rdatasets", "RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text,", "default_ttl self.default_ttl_known = True self.last_name = self.current_origin self.zone_rdclass = rdclass", "Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if not", "ttl, rd) def read(self): \"\"\"Read a DNS zone file and", "stop, step = dns.grange.from_text(token.value) token = self.tok.get() if not token.is_identifier():", "dollar sign. They are in the form: ${offset[,width[,base]]}. # Make", "= 0 self.last_ttl_known = False if force_ttl is not None:", "dns.exception.SyntaxError(\"unknown rdatatype '%s'\" % token.value) # rhs (required) rhs =", "Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to", "then the type is forced to the specified value, and", "to restrictions. *text*, a file object or a string, is", "self.rdatasets = {} def _get_rdataset(self, name, rdtype, covers): return self.rdatasets.get((name,", "node') elif node_kind == dns.node.NodeKind.REGULAR and \\ rdataset_kind == dns.node.NodeKind.CNAME:", "= allow_include self.allow_directives = allow_directives self.force_name = force_name self.force_ttl =", "ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl # Class", "FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR #", "+ 1, step): # +1 because bind is inclusive and", "raise UnknownOrigin token = self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name =", "dns.exception.SyntaxError( \"bad origin in $INCLUDE\") else: new_origin = self.current_origin self.saved_state.append((self.tok,", "try: start, stop, step = dns.grange.from_text(token.value) token = self.tok.get() if", "0 self.default_ttl_known = False else: self.default_ttl = default_ttl self.default_ttl_known =", "dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None: rdtype", "txn self.saved_state = [] self.current_file = None self.allow_include = allow_include", "token.is_identifier(): raise dns.exception.SyntaxError return token def _rr_line(self): \"\"\"Process one line", "origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin self.relativize = relativize self.rdclass", "*name*, a string, ``dns.name.Name``, or ``None``, is the owner name", "self.last_ttl = ttl self.last_ttl_known = True else: token = self._get_identifier()", "continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$' and", "= 0 self.default_ttl_known = False else: self.default_ttl = default_ttl self.default_ttl_known", "the class simply allows the user to optionally type a", "self.default_ttl_known and rdtype == dns.rdatatype.SOA: # The pre-RFC2308 and pre-BIND9", "new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl,", "if commit and self._changed(): rrsets = [] for (name, _,", "= self.tok.get() if token.is_identifier(): new_origin =\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol()", "self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise. raise except Exception:", "not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\"", "except dns.exception.SyntaxError: # Catch and reraise. raise except Exception: #", "except Exception: raise dns.exception.SyntaxError( \"unknown rdatatype '%s'\" % token.value) try:", "str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass is not None: rdclass", "the IDNA encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder", "token = None except dns.ttl.BadTTL: if self.default_ttl_known: ttl = self.default_ttl", "must not specify a TTL. If ``None``, then a TTL", "if token.is_eof(): if self.current_file is not None: self.current_file.close() if len(self.saved_state)", "= default_ttl self.default_ttl_known = True self.last_name = self.current_origin self.zone_rdclass =", "else: token = self._get_identifier() ttl = None try: ttl =", "> 0: for key in remove: del self.rdatasets[key] def _delete_rdataset(self,", "dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A", "except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass = self.zone_rdclass if", "effective = self.origin return (self.origin, self.relativize, effective) def set_rrsets(self, rrsets):", "if self.force_rdclass is not None: rdclass = self.force_rdclass else: token", "is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there are", "``None``, then the type is forced to the specified value,", "names in the input, and also the origin to relativize", "ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE", "lhs (required) try: lhs = token.value token = self.tok.get() if", "origin for any relative names in the input, and also", "dns.exception.SyntaxError: raise except Exception: rdclass = self.zone_rdclass self.tok.unget(token) if rdclass", "LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES", "the *default_ttl* will be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or", "possibly subject to restrictions. *text*, a file object or a", "names are allowed and must be present in the input.", "read_only): assert not read_only super().__init__(manager, replacement, read_only) self.rdatasets = {}", "CNAME and other data\"\"\" def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind =", "{}: {}\".format(str(ty), str(va))) if not self.default_ttl_known and rdtype == dns.rdatatype.SOA:", "rdata = rhs.replace('$%s' % (rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin,", "to use, copy, modify, and distribute this software and its", "``False`` then any relative names in the input are made", "``True``. *relativize*, a bool. If ``True``, names are relativized to", "the processing of rdata # are treated as syntax errors.", "string, ``dns.name.Name``, or ``None``, is the owner name of the", "any changes involving the name remove = [] for key", "ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read one", "present before the # SOA is parsed. self.default_ttl = rd.minimum", "and python is exclusive if lsign == '+': lindex =", "``None``, is the owner name of the rrset. If not", "= str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod), lzfindex) rdata =", "``dns.name.Name``, or ``None``, is the origin for any relative names", "DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER", "= self.tok.as_name(token, self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof(): #", "manager.writer(True) as txn: tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader =", "read_only) self.rdatasets = {} def _get_rdataset(self, name, rdtype, covers): return", "not token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known =", "ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO", "len(rdatasets) == 0: return None node = dns.node.Node() node.rdatasets =", "type must be present for each RR. *default_ttl*, an ``int``,", "set it! ttl = rd.minimum # TTL check. We had", "codecs only apply to the owner name; dnspython does not", "# correct, but it is correct almost all of the", "everything in '{' '}' in a group so we can", "str): origin = dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str): name", "not specified, then the *default_ttl* will be used. *rdclass*, a", "self.force_ttl is not None: ttl = self.force_ttl self.last_ttl = ttl", "= False else: self.default_ttl = default_ttl self.default_ttl_known = True self.last_name", "token = self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin)", "self.txn.add(name, ttl, rd) def read(self): \"\"\"Read a DNS zone file", "WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL", "dns.ttl.from_text(token.value) self.default_ttl_known = True self.tok.get_eol() elif c == '$ORIGIN': self.current_origin", "apply to the owner name; dnspython does not do IDNA", "# own TTL can be inferred from its minimum. if", "= {} def _get_rdataset(self, name, rdtype, covers): return self.rdatasets.get((name, rdtype,", "is True return RRsetsReaderTransaction(self, True, False) def get_class(self): return self.rdclass", "name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset def _delete_name(self, name):", "$INCLUDE\") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl,", "import dns.exception import dns.name import dns.node import dns.rdataclass import dns.rdatatype", "Sometimes there are modifiers in the hostname. These come after", "The class of the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string,", "if rdclass is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass =", "dns.name import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import", "%s\" % (filename, line_number, detail)) tb = sys.exc_info()[2] raise ex.with_traceback(tb)", "so that we can emit # helpful filename:line info. (ty,", "without fee is hereby granted, # provided that the above", "the input must not specify a type. If ``None``, then", "zone's class\") # Type try: rdtype = dns.rdatatype.from_text(token.value) token =", "TTL from the SOA minttl if no $TTL statement is", "If not ``None``, then if the TTL is not forced", "origin self.relativize = relativize self.rdclass = rdclass self.rrsets = []", "reader = Reader(tok, default_rdclass, txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype,", "_generate_line(self): # range lhs [ttl] [class] type rhs [ comment", "def __init__(self, manager, replacement, read_only): assert not read_only super().__init__(manager, replacement,", "None: # if we didn't have a TTL on the", "class as that may be convenient when cutting and pasting.", "_, _) in self.rdatasets: if n == name: return True", "name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize, self.zone_origin)", "dns.exception import dns.name import dns.node import dns.rdataclass import dns.rdatatype import", "# # THE SOFTWARE IS PROVIDED \"AS IS\" AND NOMINUM", "data\"\"\" def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node =", "an owner name. If ``None``, then any owner names are", "Exception: raise dns.exception.SyntaxError( \"unknown rdatatype '%s'\" % token.value) try: rd", "to be the specified value and the input must not", "RR was found at the zone origin @raises dns.zone.NoNS: No", "None: rdtype = self.force_rdtype else: token = self._get_identifier() try: rdtype", "= force_rdclass self.force_rdtype = force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1:", "dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # TTL try: ttl =", "TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH", "OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION", "ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if", "owner names are allowed and must be present in the", "if key[0] == name: remove.append(key) if len(remove) > 0: for", "any relative names in the input are made absolute by", "node def _put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset", "= sys.exc_info()[2] raise ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self,", "origin_information(self): if self.relativize: effective = dns.name.empty else: effective = self.origin", "the origin for any relative names in the input, and", "is not None: rdtype = self.force_rdtype else: token = self._get_identifier()", "the GENERATE statement from a DNS zone file.\"\"\" if self.current_origin", "rdataset is ok class Reader: \"\"\"Read a DNS zone file", "offset = 0 width = 0 base = 'd' if", "file and build a zone object. @raises dns.zone.NoSOA: No SOA", "self.force_rdtype else: token = self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value) except", "class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager, replacement, read_only): assert not read_only", "occur if the TTL is not specified. *idna_codec*, a ``dns.name.IDNACodec``,", "origin to relativize to if *relativize* is ``True``. *relativize*, a", "dns.node.NodeKind.CNAME and \\ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is", "be present in the input. *ttl*, an ``int``, string, or", "self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise. raise", "are modifiers in the hostname. These come after # the", "or None. If not ``None``, the the TTL is forced", "is None: # empty nodes are neutral. return node_kind =", "\"\"\"Read one or more rrsets from the specified text, possibly", "the specified text, possibly subject to restrictions. *text*, a file", "dns.rdataclass import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import", "if base != 'd': raise NotImplementedError() return mod, sign, offset,", "# rhs (required) rhs = token.value # The code currently", "+ \"'\") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as detail: (filename,", "DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT,", "is the origin for any relative names in the input,", "% (filename, line_number, detail)) tb = sys.exc_info()[2] raise ex.with_traceback(tb) from", "'+' width = 0 base = 'd' g3 = is_generate3.match(side)", "rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError:", "more rrsets from the specified text, possibly subject to restrictions.", "= node.classify() if node_kind == dns.node.NodeKind.CNAME and \\ rdataset_kind ==", "def _get_identifier(self): token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError", "= rdatasets return node def _put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype,", "Copyright (C) Dnspython Contributors, see LICENSE for text of ISC", "not ``None``, then if the TTL is not forced and", "self.force_name = force_name self.force_ttl = force_ttl self.force_rdclass = force_rdclass self.force_rdtype", "def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin self.relativize =", "and this permission notice # appear in all copies. #", "[ttl] [class] type rhs [ comment ] \"\"\"Process one line", "str(va))) self.txn.add(name, ttl, rd) def read(self): \"\"\"Read a DNS zone", "directive '\" + c + \"'\") continue self.tok.unget(token) self._rr_line() except", "token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad origin in $INCLUDE\") else: new_origin =", "True return RRsetsReaderTransaction(self, True, False) def get_class(self): return self.rdclass def", "the specified value, and the input must not specify a", "get_class(self): return self.rdclass def origin_information(self): if self.relativize: effective = dns.name.empty", "in the hostname. These come after # the dollar sign.", "_set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN):", "= self._get_identifier() ttl = None try: ttl = dns.ttl.from_text(token.value) self.last_ttl", "self.last_ttl self.tok.unget(token) # Class if self.force_rdclass is not None: rdclass", "IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS", "one line from a DNS zone file.\"\"\" token = None", "dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if", "the user to optionally type a class as that may", "except KeyError: pass def _name_exists(self, name): for (n, _, _)", "= self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except", "and reraise. raise except Exception: # All exceptions that occur", "purpose with or without fee is hereby granted, # provided", "all copies. # # THE SOFTWARE IS PROVIDED \"AS IS\"", "is the owner name of the rrset. If not ``None``,", "dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import dns.rrset import dns.tokenizer import", "self.tok.where() if detail is None: detail = \"syntax error\" ex", "node_kind == dns.node.NodeKind.CNAME and \\ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset", "dns.exception.SyntaxError(\"caught exception %s: %s\" % (str(ty), str(va))) self.txn.add(name, ttl, rd)", "the time. # We convert them to syntax errors so", "or more rrsets from the specified text, possibly subject to", "remove any changes involving the name remove = [] for", "is not possible to return rrsets with differing classes; specifying", "statement is present before the # SOA is parsed. self.default_ttl", "then the owner name is \"forced\", and the input must", "None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is", "USE OR PERFORMANCE OF THIS SOFTWARE. \"\"\"DNS Zones.\"\"\" import re", "name): # First remove any changes involving the name remove", "effective = dns.name.empty else: effective = self.origin return (self.origin, self.relativize,", "if the TTL is not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies", "then if the TTL is not forced and is not", "relativized to the *origin*; if ``False`` then any relative names", "a ``dns.rdataclass.RdataClass``, string, or ``None``. If not ``None``, then the", "Note that it is not possible to return rrsets with", "CNAMEAndOtherData('CNAME rdataset is not compatible with a ' 'regular data", "> 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)", "not specify an owner name. If ``None``, then any owner", "True if ttl is None: # if we didn't have", "the the TTL is forced to be the specified value", "rdataset is not compatible with a ' 'regular data node')", "LICENSE for text of ISC license # Copyright (C) 2003-2007,", "TTL on the SOA, set it! ttl = rd.minimum #", "_end_transaction(self, commit): if commit and self._changed(): rrsets = [] for", "== '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone_origin is None:", "if len(remove) > 0: for key in remove: del self.rdatasets[key]", "so the last value # in the tuple _parse_modify returns", "token = self.tok.get() if token.is_eol_or_eof(): # treat leading WS followed", "then this value will be used. if ``None``, then if", "not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If", "IDNA encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder is", "of the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``.", "ttl = self.last_ttl self.tok.unget(token) # Class if self.force_rdclass is not", "found at the zone origin @raises dns.zone.NoNS: No NS RRset", "matches *default_rdclass*. Note that it is not possible to return", "had to wait until now to do this as the", "pre-BIND9 behavior inherits the zone default # TTL from the", "the name remove = [] for key in self.rdatasets: if", "rdtype = self.force_rdtype else: token = self._get_identifier() try: rdtype =", "'}' in a group so we can replace it #", "% (lmod), lzfindex) rdata = rhs.replace('$%s' % (rmod), rzfindex) self.last_name", "def _delete_name(self, name): # First remove any changes involving the", "= None self.allow_include = allow_include self.allow_directives = allow_directives self.force_name =", "= i + int(roffset) lzfindex = str(lindex).zfill(int(lwidth)) rzfindex = str(rindex).zfill(int(rwidth))", "self.relativize: name = name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass, rdtype, rdata,", "# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission", "dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node is None: # empty", "in the input are made absolute by appending the *origin*.", "None: self.default_ttl = 0 self.default_ttl_known = False else: self.default_ttl =", "self.current_file = open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin =", "None: # empty nodes are neutral. return node_kind = node.classify()", "dns.exception.SyntaxError( \"%s:%d: %s\" % (filename, line_number, detail)) tb = sys.exc_info()[2]", "SOA, set it! ttl = rd.minimum # TTL check. We", "*relativize* is ``True``. *relativize*, a bool. If ``True``, names are", "= ttl self.last_ttl_known = True token = self.tok.get() if not", "raise CNAMEAndOtherData('CNAME rdataset is not compatible with a ' 'regular", "= dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: #", "*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder. If ``None``, the", "self.tok, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and reraise.", "g3): mod = '' sign = '+' offset = 0", "True self.tok.get_eol() elif c == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol()", "node') # Otherwise at least one of the node and", "rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read one or", "NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE", "rdata, as there is no IDNA zonefile format. *origin*, a", "= dns.rdatatype.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError", "\"\"\" try: while 1: token = self.tok.get(True, True) if token.is_eof():", "offset, width, base def _generate_line(self): # range lhs [ttl] [class]", "If not ``None``, the the TTL is forced to be", "is used. Note that codecs only apply to the owner", "node has a CNAME and other data\"\"\" def _check_cname_and_other_data(txn, name,", "in the input. If it is not specified, then the", "'+' base = 'd' if not (g1 or g2 or", "offset = g2.groups() if sign == '': sign = '+'", "specified text, possibly subject to restrictions. *text*, a file object", "user to optionally type a class as that may be", "input, and also the origin to relativize to if *relativize*", "each RR. *default_ttl*, an ``int``, string, or ``None``. If not", "= name.relativize(self.zone_origin) # TTL if self.force_ttl is not None: ttl", "in self.rdatasets.items(): if name == rdataset_name: rdatasets.append(rdataset) if len(rdatasets) ==", "rdatasets.append(rdataset) if len(rdatasets) == 0: return None node = dns.node.Node()", "in the input. *ttl*, an ``int``, string, or None. If", "SOFTWARE IS PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES", "base = g1.groups() if sign == '': sign = '+'", "token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl = dns.ttl.from_text(token.value) self.default_ttl_known = True", "do this as the SOA RR's # own TTL can", "width = 0 base = 'd' g3 = is_generate3.match(side) if", "type. If ``None``, then a type must be present for", "lhs [ttl] [class] type rhs [ comment ] \"\"\"Process one", "default_ttl = force_ttl if default_ttl is None: self.default_ttl = 0", "input. *ttl*, an ``int``, string, or None. If not ``None``,", "re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there are modifiers in the hostname. These", "self.last_ttl_known = False if force_ttl is not None: default_ttl =", "are made absolute by appending the *origin*. \"\"\" if isinstance(origin,", "Type if self.force_rdtype is not None: rdtype = self.force_rdtype else:", "of the node and the rdataset is neutral, so #", "convenient when cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string.", "(self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1)", "== name: remove.append(key) if len(remove) > 0: for key in", "dns.exception.SyntaxError( \"caught exception {}: {}\".format(str(ty), str(va))) if not self.default_ttl_known and", "a string, is the input to process. *name*, a string,", "= self.tok.get_name() self.tok.get_eol() if self.zone_origin is None: self.zone_origin = self.current_origin", "a class that matches *default_rdclass*. Note that it is not", "IS PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES #", "va) = sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught exception {}: {}\".format(str(ty), str(va)))", "the SOA minttl if no $TTL statement is present before", "Here we catch everything in '{' '}' in a group", "dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" % token.value) #", "stop + 1, step): # +1 because bind is inclusive", "self.force_name else: if self.current_origin is None: raise UnknownOrigin token =", "rhs (required) rhs = token.value # The code currently only", "is not compatible with a ' 'CNAME node') elif node_kind", "self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token) # Class if self.force_rdclass is", "dns.name.root, idna_codec) if isinstance(name, str): name = dns.name.from_text(name, origin, idna_codec)", "dns.exception.SyntaxError( \"unknown rdatatype '%s'\" % token.value) try: rd = dns.rdata.from_text(rdclass,", "we can emit # helpful filename:line info. (ty, va) =", "self.last_ttl_known = True else: token = self._get_identifier() ttl = None", "or without fee is hereby granted, # provided that the", "the dollar sign. They are in the form: ${offset[,width[,base]]}. #", "self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue break", "self.tok.get() filename = token.value token = self.tok.get() if token.is_identifier(): new_origin", "dns.zone.NoSOA: No SOA RR was found at the zone origin", "IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY", "(ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError( \"caught exception {}: {}\".format(str(ty),", "for each RR. *default_ttl*, an ``int``, string, or ``None``. If", "the SOA RR's # own TTL can be inferred from", "= True if ttl is None: # if we didn't", "line_number, detail)) tb = sys.exc_info()[2] raise ex.with_traceback(tb) from None class", "if node is None: # empty nodes are neutral. return", "= txn self.saved_state = [] self.current_file = None self.allow_include =", "will be used. if ``None``, then if the TTL is", "DNS zone file into a transaction.\"\"\" def __init__(self, tok, rdclass,", "self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) # TTL if", "must not specify an owner name. If ``None``, then any", "relative names in the input are made absolute by appending", "= [] for (rdataset_name, _, _), rdataset in self.rdatasets.items(): if", "self.zone_rdclass self.tok.unget(token) if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class is", "def get_class(self): return self.rdclass def origin_information(self): if self.relativize: effective =", "if n == name: return True return False def _changed(self):", "of rdata # are treated as syntax errors. This is", "dns.ttl.BadTTL: if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl =", "_, _), rdataset in self.rdatasets.items(): if name == rdataset_name: rdatasets.append(rdataset)", "the last value # in the tuple _parse_modify returns is", "= self.current_origin self.txn._set_origin(self.current_origin) elif c == '$INCLUDE' and self.allow_include: token", "detail)) tb = sys.exc_info()[2] raise ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction):", "'d' if not (g1 or g2 or g3): mod =", "None # Name if self.force_name is not None: name =", "``None``, then the class is forced to the specified value,", "try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True", "= True token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError", "= self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name = self.tok.as_name(token, self.current_origin) else:", "== '$INCLUDE' and self.allow_include: token = self.tok.get() filename = token.value", "errors so that we can emit # helpful filename:line info.", "is forced to be the specified value and the input", "if token.is_eol_or_eof(): # treat leading WS followed by EOL/EOF as", "BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL", "sys.exc_info()[2] raise ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction): def __init__(self, manager,", "``True``, names are relativized to the *origin*; if ``False`` then", "rdatatype '%s'\" % token.value) try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok,", "inferred from its minimum. if ttl is None: raise dns.exception.SyntaxError(\"Missing", "= ttl self.last_ttl_known = True else: token = self._get_identifier() ttl", "'' sign = '+' offset = 0 width = 0", "nodes are neutral. return node_kind = node.classify() if node_kind ==", "self.force_rdclass is not None: rdclass = self.force_rdclass else: token =", "from a DNS zone file.\"\"\" token = None # Name", "= dns.name.empty else: effective = self.origin return (self.origin, self.relativize, effective)", "if isinstance(origin, str): origin = dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name,", "offset, width = g3.groups() if sign == '': sign =", "rd.minimum self.default_ttl_known = True if ttl is None: # if", "if g2: mod, sign, offset = g2.groups() if sign ==", "an ``int``, string, or ``None``. If not ``None``, then if", "= dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass", "self.tok.get_eol() if self.zone_origin is None: self.zone_origin = self.current_origin self.txn._set_origin(self.current_origin) elif", "as if they were EOL/EOF. return self.tok.unget(token) name = self.last_name", "covers)) def _get_node(self, name): rdatasets = [] for (rdataset_name, _,", "= 'd' if not (g1 or g2 or g3): mod", "INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING", "is not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA encoder/decoder.", "except Exception: rdclass = self.zone_rdclass self.tok.unget(token) if rdclass != self.zone_rdclass:", "may specify a class that matches *default_rdclass*. Note that it", "import dns.tokenizer import dns.transaction import dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException):", "then a TTL may be specified in the input. If", "if self.force_ttl is not None: ttl = self.force_ttl self.last_ttl =", "and other data\"\"\" def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)", "None: raise UnknownOrigin token = self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name", "= dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name = self.last_name if not name.is_subdomain(self.zone_origin):", "dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch", "# empty nodes are neutral. return node_kind = node.classify() if", "self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl,", "dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager", "= dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec) reader = Reader(tok, default_rdclass, txn, allow_directives=False,", "== dns.rdatatype.SOA: # The pre-RFC2308 and pre-BIND9 behavior inherits the", "writer(self, replacement=False): assert replacement is True return RRsetsReaderTransaction(self, True, False)", "self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl #", "CNAMEAndOtherData('rdataset type is not compatible with a ' 'CNAME node')", "self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except Exception:", "DNS zone file.\"\"\" if self.current_origin is None: raise UnknownOrigin token", "rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager):", "in self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset)", "raise dns.exception.SyntaxError except dns.ttl.BadTTL: if not (self.last_ttl_known or self.default_ttl_known): raise", "covers): try: del self.rdatasets[(name, rdtype, covers)] except KeyError: pass def", "not specified, then this value will be used. if ``None``,", "rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node is None:", "zone file and build a zone object. @raises dns.zone.NoSOA: No", "== '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError( \"Unknown zone file directive", "= self.tok.get() if token.is_eol_or_eof(): break def _get_identifier(self): token = self.tok.get()", "= token.value.upper() if c == '$TTL': token = self.tok.get() if", "above copyright notice and this permission notice # appear in", "elif rsign == '+': rindex = i + int(roffset) lzfindex", "token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: if not (self.last_ttl_known or self.default_ttl_known):", "No NS RRset was found at the zone origin \"\"\"", "or ``None``. If not ``None``, then the type is forced", "rd.minimum # TTL check. We had to wait until now", "Otherwise at least one of the node and the rdataset", "offset, width, base = g1.groups() if sign == '': sign", "until now to do this as the SOA RR's #", "the default IDNA 2003 encoder/decoder is used. Note that codecs", "self.last_name = self.tok.as_name(token, self.current_origin) else: token = self.tok.get() if token.is_eol_or_eof():", "force_ttl if default_ttl is None: self.default_ttl = 0 self.default_ttl_known =", "permission notice # appear in all copies. # # THE", "name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) try: rd", "If ``None``, then a type must be present for each", "\"\"\"Process one line containing the GENERATE statement from a DNS", "== '+': lindex = i + int(loffset) elif lsign ==", "self._parse_modify(rhs) for i in range(start, stop + 1, step): #", "ttl self.last_ttl_known = True token = None except dns.ttl.BadTTL: if", "token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$' and self.allow_directives: c", "# documentation for any purpose with or without fee is", "treat leading WS followed by EOL/EOF as if they were", "name = name.relativize(self.zone_origin) # TTL if self.force_ttl is not None:", "width, base = g1.groups() if sign == '': sign =", "we can replace it # with ''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\")", "optionally type a class as that may be convenient when", "True) if token.is_eof(): if self.current_file is not None: self.current_file.close() if", "not ``None``, then the type is forced to the specified", "format. *origin*, a string, ``dns.name.Name``, or ``None``, is the origin", "def _changed(self): return len(self.rdatasets) > 0 def _end_transaction(self, commit): if", "2003 encoder/decoder is used. Note that codecs only apply to", "= self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl self.tok.unget(token) # Class", "self.txn._set_origin(self.current_origin) elif c == '$INCLUDE' and self.allow_include: token = self.tok.get()", "None try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known =", "only supports base 'd', so the last value # in", "also the origin to relativize to if *relativize* is ``True``.", "dns.exception.SyntaxError # lhs (required) try: lhs = token.value token =", "rrsets with differing classes; specifying ``None`` for the class simply", "dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # lhs (required) try: lhs", "raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise dns.exception.SyntaxError except Exception: rdclass =", "'-': rindex = i - int(roffset) elif rsign == '+':", "rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True)", "'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR and \\ rdataset_kind ==", "check. We had to wait until now to do this", "specifying ``None`` for the class simply allows the user to", "and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class of", "default TTL value\") self.txn.add(name, ttl, rd) def _parse_modify(self, side): #", "IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.", "= self.tok.get() if token.is_eol_or_eof(): # treat leading WS followed by", "changes involving the name remove = [] for key in", "g3.groups() if sign == '': sign = '+' base =", "raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" % token.value)", "# if we didn't have a TTL on the SOA,", "fee is hereby granted, # provided that the above copyright", "rrsets def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None,", "mod, sign, offset, width, base = g1.groups() if sign ==", "``None``, then if the TTL is not forced an error", "We convert them to syntax errors so that we can", "raise except Exception: rdclass = self.zone_rdclass self.tok.unget(token) if rdclass !=", "__init__(self, tok, rdclass, txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None,", "str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl)", "rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self,", "= token.value token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError", "= rdclass self.rrsets = [] def writer(self, replacement=False): assert replacement", "def __init__(self, tok, rdclass, txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None,", "[class] type rhs [ comment ] \"\"\"Process one line containing", "If not ``None``, then the class is forced to the", "# TTL if self.force_ttl is not None: ttl = self.force_ttl", "# Class try: rdclass = dns.rdataclass.from_text(token.value) token = self.tok.get() if", "roffset, rwidth, _ = self._parse_modify(rhs) for i in range(start, stop", "to the *origin*; if ``False`` then any relative names in", "else: token = self.tok.get() if token.is_eol_or_eof(): # treat leading WS", "else: raise dns.exception.SyntaxError( \"Unknown zone file directive '\" + c", "def _set_origin(self, origin): pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False,", "TTL if self.force_ttl is not None: ttl = self.force_ttl self.last_ttl", "names g1 = is_generate1.match(side) if g1: mod, sign, offset, width,", "was found at the zone origin \"\"\" try: while 1:", "by EOL/EOF as if they were EOL/EOF. return self.tok.unget(token) name", "while 1: token = self.tok.get(True, True) if token.is_eof(): if self.current_file", "name = dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str): ttl =", "is exclusive if lsign == '+': lindex = i +", "(required) try: start, stop, step = dns.grange.from_text(token.value) token = self.tok.get()", "if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl", "txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl = 0 self.last_ttl_known = False", "input must not specify a class. If ``None``, then the", "is None: raise UnknownOrigin token = self.tok.get(want_leading=True) if not token.is_whitespace():", "default TTL value\") if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known:", "value will be used. if ``None``, then if the TTL", "rdataset.covers)] = rdataset def _delete_name(self, name): # First remove any", "have a TTL on the SOA, set it! ttl =", "self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets)", "'d' if base != 'd': raise NotImplementedError() return mod, sign,", "isinstance(name, str): name = dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str):", "OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF", "dnspython does not do IDNA for names in rdata, as", "not forced and is not specified, then this value will", "is parsed. self.default_ttl = rd.minimum self.default_ttl_known = True if ttl", "force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1: token = self.tok.get() if", "= dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as", "as that may be convenient when cutting and pasting. *default_rdclass*,", "raise dns.exception.SyntaxError # lhs (required) try: lhs = token.value token", "token = self.tok.get() # Range (required) try: start, stop, step", "OF THIS SOFTWARE. \"\"\"DNS Zones.\"\"\" import re import sys import", "relativize self.rdclass = rdclass self.rrsets = [] def writer(self, replacement=False):", "rdataset is neutral, so # adding the rdataset is ok", "if not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin)", "not ``None``, the the TTL is forced to be the", "= force_rdtype self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1: token = self.tok.get()", "used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If not ``None``,", "specifies the IDNA encoder/decoder. If ``None``, the default IDNA 2003", "not do IDNA for names in rdata, as there is", "self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif c ==", "else: token = self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError:", "'': sign = '+' base = 'd' if not (g1", "or string. The class of the returned rrsets. *rdtype*, a", "range lhs [ttl] [class] type rhs [ comment ] \"\"\"Process", "documentation for any purpose with or without fee is hereby", "# the dollar sign. They are in the form: ${offset[,width[,base]]}.", "_eat_line(self): while 1: token = self.tok.get() if token.is_eol_or_eof(): break def", "token = self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value) except Exception: raise", "dns.name.empty else: effective = self.origin return (self.origin, self.relativize, effective) def", "def _put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset def", "in a group so we can replace it # with", "= '+' width = 0 base = 'd' g3 =", "self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file = open(filename,", "None: raise UnknownOrigin token = self.tok.get() # Range (required) try:", "by appending the *origin*. \"\"\" if isinstance(origin, str): origin =", "adding the rdataset is ok class Reader: \"\"\"Read a DNS", "and the input must not specify a class. If ``None``,", "base 'd', so the last value # in the tuple", "token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError return token", "# Range (required) try: start, stop, step = dns.grange.from_text(token.value) token", "the *origin*; if ``False`` then any relative names in the", "DNS zone file.\"\"\" token = None # Name if self.force_name", "[ comment ] \"\"\"Process one line containing the GENERATE statement", "input are made absolute by appending the *origin*. \"\"\" if", "not forced an error will occur if the TTL is", "= i - int(roffset) elif rsign == '+': rindex =", "= dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError( \"unknown rdatatype '%s'\" %", "if lsign == '+': lindex = i + int(loffset) elif", "def _eat_line(self): while 1: token = self.tok.get() if token.is_eol_or_eof(): break", "the zone origin \"\"\" try: while 1: token = self.tok.get(True,", "elif c == '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError( \"Unknown zone", "rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if node is", "after # the dollar sign. They are in the form:", "'': sign = '+' width = 0 base = 'd'", "manager = RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as txn: tok", "# with ''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3", "rdatasets return node def _put_rdataset(self, name, rdataset): self.rdatasets[(name, rdataset.rdtype, rdataset.covers)]", "self.tok.unget(token) # Class if self.force_rdclass is not None: rdclass =", "lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs) rmod, rsign, roffset,", "WS followed by EOL/EOF as if they were EOL/EOF. return", "= force_name self.force_ttl = force_ttl self.force_rdclass = force_rdclass self.force_rdtype =", "be convenient when cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or", "in the tuple _parse_modify returns is ignored lmod, lsign, loffset,", "object. @raises dns.zone.NoSOA: No SOA RR was found at the", "allow_directives self.force_name = force_name self.force_ttl = force_ttl self.force_rdclass = force_rdclass", "else: token = self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value) except Exception:", "'$TTL': token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\")", "SOFTWARE. \"\"\"DNS Zones.\"\"\" import re import sys import dns.exception import", "the input to process. *name*, a string, ``dns.name.Name``, or ``None``,", "= re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") # Sometimes there are modifiers in the hostname.", "self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError", "node = txn.get_node(name) if node is None: # empty nodes", "force_rdtype=None, default_ttl=None): self.tok = tok (self.zone_origin, self.relativize, _) = \\", "are in the form: ${offset[,width[,base]]}. # Make names g1 =", "try: while 1: token = self.tok.get(True, True) if token.is_eof(): if", "self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue break elif token.is_eol():", "new_origin elif c == '$GENERATE': self._generate_line() else: raise dns.exception.SyntaxError( \"Unknown", "\"\"\"DNS Zones.\"\"\" import re import sys import dns.exception import dns.name", "return token def _rr_line(self): \"\"\"Process one line from a DNS", "the class is forced to the specified value, and the", "self.rrsets = [] def writer(self, replacement=False): assert replacement is True", "self.allow_include = allow_include self.allow_directives = allow_directives self.force_name = force_name self.force_ttl", "owner name of the rrset. If not ``None``, then the", "if not (g1 or g2 or g3): mod = ''", "and build a zone object. @raises dns.zone.NoSOA: No SOA RR", "node = dns.node.Node() node.rdatasets = rdatasets return node def _put_rdataset(self,", "not ``None``, then the owner name is \"forced\", and the", "g2 or g3): mod = '' sign = '+' offset", "UnknownOrigin token = self.tok.get(want_leading=True) if not token.is_whitespace(): self.last_name = self.tok.as_name(token,", "syntax errors so that we can emit # helpful filename:line", "= dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError", "ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES", "= sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception %s: %s\" % (str(ty), str(va)))", "= self._parse_modify(lhs) rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs) for", "= '' sign = '+' offset = 0 width =", "dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not compatible with a '", "# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY", "``None``, then any owner names are allowed and must be", "behavior inherits the zone default # TTL from the SOA", "self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError( \"bad origin in $INCLUDE\")", "_) in self.rdatasets: if n == name: return True return", "Exception: raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" % token.value) # rhs (required)", "rdataset in self.rdatasets.items(): if name == rdataset_name: rdatasets.append(rdataset) if len(rdatasets)", "# Name if self.force_name is not None: name = self.force_name", "token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue elif token.value[0] == '$'", "mod, sign, offset = g2.groups() if sign == '': sign", "import dns.name import dns.node import dns.rdataclass import dns.rdatatype import dns.rdata", "we didn't have a TTL on the SOA, set it!", "== name: return True return False def _changed(self): return len(self.rdatasets)", "rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not ``None``,", "rdclass = self.force_rdclass else: token = self._get_identifier() try: rdclass =", "# SOA is parsed. self.default_ttl = rd.minimum self.default_ttl_known = True", "{} def _get_rdataset(self, name, rdtype, covers): return self.rdatasets.get((name, rdtype, covers))", "= dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: #", "and also the origin to relativize to if *relativize* is", "allows the user to optionally type a class as that", "def set_rrsets(self, rrsets): self.rrsets = rrsets def read_rrsets(text, name=None, ttl=None,", "Reader: \"\"\"Read a DNS zone file into a transaction.\"\"\" def", "open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin elif", "default_ttl is None: self.default_ttl = 0 self.default_ttl_known = False else:", "\"AS IS\" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD", "``None``, the default IDNA 2003 encoder/decoder is used. Note that", "True return False def _changed(self): return len(self.rdatasets) > 0 def", "Exception: raise dns.exception.SyntaxError # lhs (required) try: lhs = token.value", "_ = self._parse_modify(lhs) rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs)", "self.relativize: effective = dns.name.empty else: effective = self.origin return (self.origin,", "self.last_ttl_known, self.default_ttl, self.default_ttl_known) = self.saved_state.pop(-1) continue break elif token.is_eol(): continue", "of the rrset. If not ``None``, then the owner name", "g1 = is_generate1.match(side) if g1: mod, sign, offset, width, base", "WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED", "= dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype is not None:", "self.default_ttl_known = True self.last_name = self.current_origin self.zone_rdclass = rdclass self.txn", "the specified value and the input must not specify a", "is neutral, so # adding the rdataset is ok class", "allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok = tok", "self.tok.get(True, True) if token.is_eof(): if self.current_file is not None: self.current_file.close()", "token = self.tok.get() if token.is_eol_or_eof(): break def _get_identifier(self): token =", "there are modifiers in the hostname. These come after #", "self.current_file is not None: self.current_file.close() if len(self.saved_state) > 0: (self.tok,", "for (n, _, _) in self.rdatasets: if n == name:", "name: return True return False def _changed(self): return len(self.rdatasets) >", "_parse_modify(self, side): # Here we catch everything in '{' '}'", "if name == rdataset_name: rdatasets.append(rdataset) if len(rdatasets) == 0: return", "LOSS OF USE, DATA OR PROFITS, WHETHER IN AN #", "If ``None``, then the input may specify a class that", "is_generate2.match(side) if g2: mod, sign, offset = g2.groups() if sign", "rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch and", "SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT,", "'%s'\" % token.value) # rhs (required) rhs = token.value #", "this software and its # documentation for any purpose with", "dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\" class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has", "from the specified text, possibly subject to restrictions. *text*, a", "We had to wait until now to do this as", "= force_ttl if default_ttl is None: self.default_ttl = 0 self.default_ttl_known", "relativize=False, rdclass=dns.rdataclass.IN): self.origin = origin self.relativize = relativize self.rdclass =", "to if *relativize* is ``True``. *relativize*, a bool. If ``True``,", "_check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name) if", "hereby granted, # provided that the above copyright notice and", "== '$TTL': token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError(\"bad", "% token.value) try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize,", "== '': sign = '+' base = 'd' if not", "then any owner names are allowed and must be present", "= RRSetsReaderManager(origin, relativize, default_rdclass) with manager.writer(True) as txn: tok =", "The code currently only supports base 'd', so the last", "raise dns.exception.SyntaxError return token def _rr_line(self): \"\"\"Process one line from", "all of the time. # We convert them to syntax", "_delete_rdataset(self, name, rdtype, covers): try: del self.rdatasets[(name, rdtype, covers)] except", "if sign == '': sign = '+' base = 'd'", "must not specify a class. If ``None``, then the input", "is no IDNA zonefile format. *origin*, a string, ``dns.name.Name``, or", "_rr_line(self): \"\"\"Process one line from a DNS zone file.\"\"\" token", "of ISC license # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.", "default # TTL from the SOA minttl if no $TTL", "specified, then this value will be used. if ``None``, then", "almost all of the time. # We convert them to", "= self.force_rdtype else: token = self._get_identifier() try: rdtype = dns.rdatatype.from_text(token.value)", "then if the TTL is not forced an error will", "= rhs.replace('$%s' % (rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec)", "isinstance(ttl, str): ttl = dns.ttl.from_text(ttl) if isinstance(default_ttl, str): default_ttl =", "dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError: # Catch", "= self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError(\"bad $TTL\") self.default_ttl =", "self.relativize, effective) def set_rrsets(self, rrsets): self.rrsets = rrsets def read_rrsets(text,", "rdclass = self.zone_rdclass self.tok.unget(token) if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR", "True token = None except dns.ttl.BadTTL: if self.default_ttl_known: ttl =", "form: ${offset[,width[,base]]}. # Make names g1 = is_generate1.match(side) if g1:", "lsign == '+': lindex = i + int(loffset) elif lsign", "process. *name*, a string, ``dns.name.Name``, or ``None``, is the owner", "import dns.rrset import dns.tokenizer import dns.transaction import dns.ttl import dns.grange", "token def _rr_line(self): \"\"\"Process one line from a DNS zone", "sign = '+' g2 = is_generate2.match(side) if g2: mod, sign,", "supports base 'd', so the last value # in the", "None: rdtype = dns.rdatatype.RdataType.make(rdtype) manager = RRSetsReaderManager(origin, relativize, default_rdclass) with", "elif lsign == '-': lindex = i - int(loffset) if", "force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok = tok (self.zone_origin, self.relativize,", "token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError # TTL try:", "= '+' g2 = is_generate2.match(side) if g2: mod, sign, offset", "self.origin return (self.origin, self.relativize, effective) def set_rrsets(self, rrsets): self.rrsets =", "TTL can be inferred from its minimum. if ttl is", "# Copyright (C) Dnspython Contributors, see LICENSE for text of", "restrictions. *text*, a file object or a string, is the", "we catch everything in '{' '}' in a group so", "def _get_rdataset(self, name, rdtype, covers): return self.rdatasets.get((name, rdtype, covers)) def", "self.default_ttl_known): raise dns.exception.SyntaxError(\"Missing default TTL value\") if self.default_ttl_known: ttl =", "to optionally type a class as that may be convenient", "read(self): \"\"\"Read a DNS zone file and build a zone", "for key in remove: del self.rdatasets[key] def _delete_rdataset(self, name, rdtype,", "= None try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known", "``dns.name.Name``, or ``None``, is the owner name of the rrset.", "self.relativize, _) = \\ txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl =", "= True self.tok.get_eol() elif c == '$ORIGIN': self.current_origin = self.tok.get_name()", "name): for (n, _, _) in self.rdatasets: if n ==", "IDNA for names in rdata, as there is no IDNA", "== '-': rindex = i - int(roffset) elif rsign ==", "new_origin =\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof(): raise", "self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file = open(filename, 'r') self.tok", "_name_exists(self, name): for (n, _, _) in self.rdatasets: if n", "replace it # with ''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 =", "helpful filename:line info. (ty, va) = sys.exc_info()[:2] raise dns.exception.SyntaxError(\"caught exception", "% (str(ty), str(va))) self.txn.add(name, ttl, rd) def read(self): \"\"\"Read a", "This is not strictly # correct, but it is correct", "tuple _parse_modify returns is ignored lmod, lsign, loffset, lwidth, _", "\"caught exception {}: {}\".format(str(ty), str(va))) if not self.default_ttl_known and rdtype", "'d' g3 = is_generate3.match(side) if g3: mod, sign, offset, width", "THE SOFTWARE IS PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS ALL", "see LICENSE for text of ISC license # Copyright (C)", "errors. This is not strictly # correct, but it is", "= self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known))", "rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin): pass", "RR's # own TTL can be inferred from its minimum.", "import re import sys import dns.exception import dns.name import dns.node", "= self.tok.get() filename = token.value token = self.tok.get() if token.is_identifier():", "rdclass = self.zone_rdclass if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR class", "If it is not specified, then the *default_ttl* will be", "be present for each RR. *default_ttl*, an ``int``, string, or", "None: default_ttl = force_ttl if default_ttl is None: self.default_ttl =", "int(roffset) elif rsign == '+': rindex = i + int(roffset)", "return len(self.rdatasets) > 0 def _end_transaction(self, commit): if commit and", "line containing the GENERATE statement from a DNS zone file.\"\"\"", "the SOA, set it! ttl = rd.minimum # TTL check.", "token.value token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except", "input must not specify an owner name. If ``None``, then", "= i + int(loffset) elif lsign == '-': lindex =", "is None: # if we didn't have a TTL on", "absolute by appending the *origin*. \"\"\" if isinstance(origin, str): origin", "if isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass is not", "OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT #", "= False if force_ttl is not None: default_ttl = force_ttl", "for (rdataset_name, _, _), rdataset in self.rdatasets.items(): if name ==", "== '': sign = '+' width = 0 base =", "forced an error will occur if the TTL is not", "== '$' and self.allow_directives: c = token.value.upper() if c ==", "name): rdatasets = [] for (rdataset_name, _, _), rdataset in", "or ``None``, is the owner name of the rrset. If", "allowed and must be present in the input. *ttl*, an", "OUT # OF OR IN CONNECTION WITH THE USE OR", "least one of the node and the rdataset is neutral,", "= 0 base = 'd' if base != 'd': raise", "a group so we can replace it # with ''.", "SOA RR was found at the zone origin @raises dns.zone.NoNS:", "= rrsets def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None,", "with differing classes; specifying ``None`` for the class simply allows", "False def _changed(self): return len(self.rdatasets) > 0 def _end_transaction(self, commit):", "= name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize,", "``dns.rdataclass.RdataClass``, string, or ``None``. If not ``None``, then the class", "name = self.force_name else: if self.current_origin is None: raise UnknownOrigin", "input may specify a class that matches *default_rdclass*. Note that", "the owner name is \"forced\", and the input must not", "TTL is not specified. *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA", "# Type try: rdtype = dns.rdatatype.from_text(token.value) token = self.tok.get() if", "class\") # Type if self.force_rdtype is not None: rdtype =", "self.tok.get() if token.is_identifier(): new_origin =\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif", "isinstance(default_ttl, str): default_ttl = dns.ttl.from_text(default_ttl) if rdclass is not None:", "not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if rdtype", "rdtype = dns.rdatatype.from_text(token.value) except Exception: raise dns.exception.SyntaxError( \"unknown rdatatype '%s'\"", "self.tok.idna_codec) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if", "MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE", "is not zone's class\") # Type try: rdtype = dns.rdatatype.from_text(token.value)", "``dns.rdataclass.RdataClass`` or string. The class of the returned rrsets. *rdtype*,", "= rdataset def _delete_name(self, name): # First remove any changes", "value, and the input must not specify a class. If", "base = 'd' g3 = is_generate3.match(side) if g3: mod, sign,", "if the TTL is not forced an error will occur", "> 0 def _end_transaction(self, commit): if commit and self._changed(): rrsets", "used. Note that codecs only apply to the owner name;", "a type must be present for each RR. *default_ttl*, an", "%s: %s\" % (str(ty), str(va))) self.txn.add(name, ttl, rd) def read(self):", "it is not possible to return rrsets with differing classes;", "dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str): name = dns.name.from_text(name, origin,", "elif c == '$ORIGIN': self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone_origin", "txn, allow_directives=False, force_name=name, force_ttl=ttl, force_rdclass=rdclass, force_rdtype=rdtype, default_ttl=default_ttl) reader.read() return manager.rrsets", "= self.force_name else: if self.current_origin is None: raise UnknownOrigin token", "Exception: # All exceptions that occur in the processing of", "= None # Name if self.force_name is not None: name", "to return rrsets with differing classes; specifying ``None`` for the", "self.txn.check_put_rdataset(_check_cname_and_other_data) def _eat_line(self): while 1: token = self.tok.get() if token.is_eol_or_eof():", "token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL:", "rdtype, covers)) def _get_node(self, name): rdatasets = [] for (rdataset_name,", "self.tok.unget(token) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if", "# # Permission to use, copy, modify, and distribute this", "step): # +1 because bind is inclusive and python is", "zone file into a transaction.\"\"\" def __init__(self, tok, rdclass, txn,", "copy, modify, and distribute this software and its # documentation", "def writer(self, replacement=False): assert replacement is True return RRsetsReaderTransaction(self, True,", "token.is_eof(): if self.current_file is not None: self.current_file.close() if len(self.saved_state) >", "self.saved_state.append((self.tok, self.current_origin, self.last_name, self.current_file, self.last_ttl, self.last_ttl_known, self.default_ttl, self.default_ttl_known)) self.current_file =", "Catch and reraise. raise except Exception: # All exceptions that", "if self.relativize: name = name.relativize(self.zone_origin) try: rd = dns.rdata.from_text(rdclass, rdtype,", "name=None, ttl=None, rdclass=dns.rdataclass.IN, default_rdclass=dns.rdataclass.IN, rdtype=None, default_ttl=None, idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read", "the node and the rdataset is neutral, so # adding", "= g2.groups() if sign == '': sign = '+' width", "class CNAMEAndOtherData(dns.exception.DNSException): \"\"\"A node has a CNAME and other data\"\"\"", "del self.rdatasets[(name, rdtype, covers)] except KeyError: pass def _name_exists(self, name):", "if they were EOL/EOF. return self.tok.unget(token) name = self.last_name if", "pre-RFC2308 and pre-BIND9 behavior inherits the zone default # TTL", "= '+' offset = 0 width = 0 base =", "names are relativized to the *origin*; if ``False`` then any", "ttl is None: # if we didn't have a TTL", "be the specified value and the input must not specify", "ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT", "and \\ rdataset_kind == dns.node.NodeKind.REGULAR: raise CNAMEAndOtherData('rdataset type is not", "rrset. If not ``None``, then the owner name is \"forced\",", "that it is not possible to return rrsets with differing", "def _delete_rdataset(self, name, rdtype, covers): try: del self.rdatasets[(name, rdtype, covers)]", "in remove: del self.rdatasets[key] def _delete_rdataset(self, name, rdtype, covers): try:", "copies. # # THE SOFTWARE IS PROVIDED \"AS IS\" AND", "not specify a type. If ``None``, then a type must", "*default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class of the returned", "0 base = 'd' if base != 'd': raise NotImplementedError()", "replacement=False): assert replacement is True return RRsetsReaderTransaction(self, True, False) def", "NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL,", "as there is no IDNA zonefile format. *origin*, a string,", "ttl = None try: ttl = dns.ttl.from_text(token.value) self.last_ttl = ttl", "while 1: token = self.tok.get() if token.is_eol_or_eof(): break def _get_identifier(self):", "pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class of the", "it is correct almost all of the time. # We", "any relative names in the input, and also the origin", "encoder/decoder is used. Note that codecs only apply to the", "\"\"\"A node has a CNAME and other data\"\"\" def _check_cname_and_other_data(txn,", "dns.rdataclass.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except", "self.default_ttl elif self.last_ttl_known: ttl = self.last_ttl # Class try: rdclass", "dns.node import dns.rdataclass import dns.rdatatype import dns.rdata import dns.rdtypes.ANY.SOA import", "=\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof(): raise dns.exception.SyntaxError(", "def _check_cname_and_other_data(txn, name, rdataset): rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) node = txn.get_node(name)", "_) = \\ txn.manager.origin_information() self.current_origin = self.zone_origin self.last_ttl = 0", "``None``, then a TTL may be specified in the input.", "PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR", "ttl = rd.minimum # TTL check. We had to wait", "= None except dns.ttl.BadTTL: if self.default_ttl_known: ttl = self.default_ttl elif", "@raises dns.zone.NoNS: No NS RRset was found at the zone", "sign. They are in the form: ${offset[,width[,base]]}. # Make names", "= self.last_ttl # Class try: rdclass = dns.rdataclass.from_text(token.value) token =", "token = self.tok.get(True, True) if token.is_eof(): if self.current_file is not", "origin = dns.name.from_text(origin, dns.name.root, idna_codec) if isinstance(name, str): name =", "zone origin @raises dns.zone.NoNS: No NS RRset was found at", "dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name = self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line()", "not token.is_identifier(): raise dns.exception.SyntaxError return token def _rr_line(self): \"\"\"Process one", "_, _), rdataset in self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype,", "inherits the zone default # TTL from the SOA minttl", "file directive '\" + c + \"'\") continue self.tok.unget(token) self._rr_line()", "sign == '': sign = '+' base = 'd' if", "token = self._get_identifier() ttl = None try: ttl = dns.ttl.from_text(token.value)", "SOA RR's # own TTL can be inferred from its", "range(start, stop + 1, step): # +1 because bind is", "= tok (self.zone_origin, self.relativize, _) = \\ txn.manager.origin_information() self.current_origin =", "be used. *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If not", "compatible with a ' 'regular data node') # Otherwise at", "dns.node.Node() node.rdatasets = rdatasets return node def _put_rdataset(self, name, rdataset):", "'%s'\" % token.value) try: rd = dns.rdata.from_text(rdclass, rdtype, self.tok, self.current_origin,", "TTL check. We had to wait until now to do", "TTL value\") if self.default_ttl_known: ttl = self.default_ttl elif self.last_ttl_known: ttl", "if g1: mod, sign, offset, width, base = g1.groups() if", "then any relative names in the input are made absolute", "rd = dns.rdata.from_text(rdclass, rdtype, rdata, self.current_origin, self.relativize, self.zone_origin) except dns.exception.SyntaxError:", "+ c + \"'\") continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as", "self.allow_directives: c = token.value.upper() if c == '$TTL': token =", "does not do IDNA for names in rdata, as there", "is not None: default_ttl = force_ttl if default_ttl is None:", "c == '$INCLUDE' and self.allow_include: token = self.tok.get() filename =", "g1: mod, sign, offset, width, base = g1.groups() if sign", "int(loffset) elif lsign == '-': lindex = i - int(loffset)", "(rmod), rzfindex) self.last_name = dns.name.from_text(name, self.current_origin, self.tok.idna_codec) name = self.last_name", "a file object or a string, is the input to", "if not self.default_ttl_known and rdtype == dns.rdatatype.SOA: # The pre-RFC2308", "time. # We convert them to syntax errors so that", "txn, allow_include=False, allow_directives=True, force_name=None, force_ttl=None, force_rdclass=None, force_rdtype=None, default_ttl=None): self.tok =", "ISC license # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. #", "dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True token = None", "object or a string, is the input to process. *name*,", "raise except Exception: # All exceptions that occur in the", "'': sign = '+' g2 = is_generate2.match(side) if g2: mod,", "specified value, and the input must not specify a type.", "inclusive and python is exclusive if lsign == '+': lindex", "= self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except dns.exception.SyntaxError: raise", "break def _get_identifier(self): token = self.tok.get() if not token.is_identifier(): raise", "self.default_ttl = 0 self.default_ttl_known = False else: self.default_ttl = default_ttl", "``int``, string, or None. If not ``None``, the the TTL", "is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+)}).*$\") #", "else: if self.current_origin is None: raise UnknownOrigin token = self.tok.get(want_leading=True)", "its # documentation for any purpose with or without fee", "the input must not specify a class. If ``None``, then", "ttl self.last_ttl_known = True token = self.tok.get() if not token.is_identifier():", "are relativized to the *origin*; if ``False`` then any relative", "= dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self,", "self.force_rdtype is not None: rdtype = self.force_rdtype else: token =", "self.last_name if not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name =", "not name.is_subdomain(self.zone_origin): self._eat_line() return if self.relativize: name = name.relativize(self.zone_origin) try:", "encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder is used.", "except Exception: raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value)", "'+' offset = 0 width = 0 base = 'd'", "TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY", "is not None: self.current_file.close() if len(self.saved_state) > 0: (self.tok, self.current_origin,", "with a ' 'CNAME node') elif node_kind == dns.node.NodeKind.REGULAR and", "token = None # Name if self.force_name is not None:", "dns.rdataclass.from_text(token.value) except dns.exception.SyntaxError: raise except Exception: rdclass = self.zone_rdclass self.tok.unget(token)", "rd) def read(self): \"\"\"Read a DNS zone file and build", "self.saved_state.pop(-1) continue break elif token.is_eol(): continue elif token.is_comment(): self.tok.get_eol() continue", "string, or ``None``. If not ``None``, then the type is", "node_kind == dns.node.NodeKind.REGULAR and \\ rdataset_kind == dns.node.NodeKind.CNAME: raise CNAMEAndOtherData('CNAME", "minimum. if ttl is None: raise dns.exception.SyntaxError(\"Missing default TTL value\")", "0 self.last_ttl_known = False if force_ttl is not None: default_ttl", "str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod), lzfindex) rdata = rhs.replace('$%s'", "dns.exception.SyntaxError(\"RR class is not zone's class\") # Type if self.force_rdtype", "commit and self._changed(): rrsets = [] for (name, _, _),", "rdataset in self.rdatasets.items(): rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset)", "rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def", "if we didn't have a TTL on the SOA, set", "come after # the dollar sign. They are in the", "notice and this permission notice # appear in all copies.", "the rdataset is neutral, so # adding the rdataset is", "the returned rrsets. *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If", "exception %s: %s\" % (str(ty), str(va))) self.txn.add(name, ttl, rd) def", "idna_codec=None, origin=dns.name.root, relativize=False): \"\"\"Read one or more rrsets from the", "rdclass is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)", "not compatible with a ' 'regular data node') # Otherwise", "base = 'd' if not (g1 or g2 or g3):", "lwidth, _ = self._parse_modify(lhs) rmod, rsign, roffset, rwidth, _ =", "% token.value) # rhs (required) rhs = token.value # The", "now to do this as the SOA RR's # own", "= self.force_rdclass else: token = self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value)", "= 'd' g3 = is_generate3.match(side) if g3: mod, sign, offset,", "origin \"\"\" try: while 1: token = self.tok.get(True, True) if", "IDNA zonefile format. *origin*, a string, ``dns.name.Name``, or ``None``, is", "ok class Reader: \"\"\"Read a DNS zone file into a", "g1.groups() if sign == '': sign = '+' g2 =", "del self.rdatasets[key] def _delete_rdataset(self, name, rdtype, covers): try: del self.rdatasets[(name,", "dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype, rdataset.covers) rrset.update(rdataset) rrsets.append(rrset) self.manager.set_rrsets(rrsets) def _set_origin(self, origin):", "raise dns.exception.SyntaxError(\"RR class is not zone's class\") # Type try:", "# adding the rdataset is ok class Reader: \"\"\"Read a", "if self.current_origin is None: raise UnknownOrigin token = self.tok.get(want_leading=True) if", "import dns.transaction import dns.ttl import dns.grange class UnknownOrigin(dns.exception.DNSException): \"\"\"Unknown origin\"\"\"", "owner name. If ``None``, then any owner names are allowed", "before the # SOA is parsed. self.default_ttl = rd.minimum self.default_ttl_known", "tb = sys.exc_info()[2] raise ex.with_traceback(tb) from None class RRsetsReaderTransaction(dns.transaction.Transaction): def", "self.current_origin = self.tok.get_name() self.tok.get_eol() if self.zone_origin is None: self.zone_origin =", "*relativize*, a bool. If ``True``, names are relativized to the", "# THE SOFTWARE IS PROVIDED \"AS IS\" AND NOMINUM DISCLAIMS", "step = dns.grange.from_text(token.value) token = self.tok.get() if not token.is_identifier(): raise", "token.is_identifier(): new_origin =\\ dns.name.from_text(token.value, self.current_origin, self.tok.idna_codec) self.tok.get_eol() elif not token.is_eol_or_eof():", "# TTL check. We had to wait until now to", "for any relative names in the input, and also the", "with a ' 'regular data node') # Otherwise at least", "self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError return token def _rr_line(self):", "= lhs.replace('$%s' % (lmod), lzfindex) rdata = rhs.replace('$%s' % (rmod),", "tok (self.zone_origin, self.relativize, _) = \\ txn.manager.origin_information() self.current_origin = self.zone_origin", "assert not read_only super().__init__(manager, replacement, read_only) self.rdatasets = {} def", "pass class RRSetsReaderManager(dns.transaction.TransactionManager): def __init__(self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN): self.origin =", "will occur if the TTL is not specified. *idna_codec*, a", "origin in $INCLUDE\") else: new_origin = self.current_origin self.saved_state.append((self.tok, self.current_origin, self.last_name,", "= dns.ttl.from_text(default_ttl) if rdclass is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass)", "self.force_rdclass else: token = self._get_identifier() try: rdclass = dns.rdataclass.from_text(token.value) except", "raise NotImplementedError() return mod, sign, offset, width, base def _generate_line(self):", "= open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin = new_origin", "self.default_ttl_known)) self.current_file = open(filename, 'r') self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) self.current_origin", "Exception: rdclass = self.zone_rdclass if rdclass != self.zone_rdclass: raise dns.exception.SyntaxError(\"RR", "when cutting and pasting. *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The", "None node = dns.node.Node() node.rdatasets = rdatasets return node def", "FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR", "the form: ${offset[,width[,base]]}. # Make names g1 = is_generate1.match(side) if", "is not None: rdclass = dns.rdataclass.RdataClass.make(rdclass) default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) if", "token.is_identifier(): raise dns.exception.SyntaxError except Exception: raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" %", "empty nodes are neutral. return node_kind = node.classify() if node_kind", "self.current_file = None self.allow_include = allow_include self.allow_directives = allow_directives self.force_name", "``None``, is the origin for any relative names in the", "(rdataset_name, _, _), rdataset in self.rdatasets.items(): if name == rdataset_name:", "i in range(start, stop + 1, step): # +1 because", "0: return None node = dns.node.Node() node.rdatasets = rdatasets return", "def _parse_modify(self, side): # Here we catch everything in '{'", "then the class is forced to the specified value, and", "zone file.\"\"\" if self.current_origin is None: raise UnknownOrigin token =", "with ''. is_generate1 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+),(\\d+),(.)}).*$\") is_generate2 = re.compile(r\"^.*\\$({(\\+|-?)(\\d+)}).*$\") is_generate3 =", "raise dns.exception.SyntaxError # TTL try: ttl = dns.ttl.from_text(token.value) self.last_ttl =", "CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF", "continue self.tok.unget(token) self._rr_line() except dns.exception.SyntaxError as detail: (filename, line_number) =", "name, rdtype, covers): try: del self.rdatasets[(name, rdtype, covers)] except KeyError:", "self.txn.add(name, ttl, rd) def _parse_modify(self, side): # Here we catch", "rzfindex = str(rindex).zfill(int(rwidth)) name = lhs.replace('$%s' % (lmod), lzfindex) rdata", "not token.is_identifier(): raise dns.exception.SyntaxError except dns.ttl.BadTTL: if not (self.last_ttl_known or", "OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM", "RR. *default_ttl*, an ``int``, string, or ``None``. If not ``None``,", "\"syntax error\" ex = dns.exception.SyntaxError( \"%s:%d: %s\" % (filename, line_number,", "dns.exception.SyntaxError(\"RR class is not zone's class\") # Type try: rdtype", "= dns.name.from_text(name, origin, idna_codec) if isinstance(ttl, str): ttl = dns.ttl.from_text(ttl)", "= g3.groups() if sign == '': sign = '+' base", "subject to restrictions. *text*, a file object or a string,", "if ttl is None: raise dns.exception.SyntaxError(\"Missing default TTL value\") self.txn.add(name,", "is not forced an error will occur if the TTL", "so we can replace it # with ''. is_generate1 =", "def origin_information(self): if self.relativize: effective = dns.name.empty else: effective =", "token = self.tok.get() if not token.is_identifier(): raise dns.exception.SyntaxError except Exception:", "dns.ttl.from_text(token.value) self.last_ttl = ttl self.last_ttl_known = True token = self.tok.get()", "the tuple _parse_modify returns is ignored lmod, lsign, loffset, lwidth,", "\"forced\", and the input must not specify an owner name.", "if self.relativize: name = name.relativize(self.zone_origin) # TTL if self.force_ttl is", "sign = '+' width = 0 base = 'd' g3", "return self.rdatasets.get((name, rdtype, covers)) def _get_node(self, name): rdatasets = []", "for (name, _, _), rdataset in self.rdatasets.items(): rrset = dns.rrset.RRset(name,", "WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER", "= self.current_origin self.zone_rdclass = rdclass self.txn = txn self.saved_state =", "int(loffset) if rsign == '-': rindex = i - int(roffset)", "is None: detail = \"syntax error\" ex = dns.exception.SyntaxError( \"%s:%d:", "of the time. # We convert them to syntax errors", "except Exception: raise dns.exception.SyntaxError(\"unknown rdatatype '%s'\" % token.value) # rhs", "[] def writer(self, replacement=False): assert replacement is True return RRsetsReaderTransaction(self,", "may be specified in the input. If it is not", "a class as that may be convenient when cutting and", "class is not zone's class\") # Type try: rdtype =" ]
[ "perform context specific periodic wellness checks which can reset worker", "the script once\"\"\" raise NotImplementedError('run_once not implemented') def run_forever(self, *args,", "2.0 (the \"License\"); # you may not use this file", "called in the parent process. This is probably only useful", "in run-forever mode. \"\"\" pass def get_worker_args(self, once=False, **kwargs): \"\"\"", "1. sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false)", "Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the", "**kwargs) def run(self, once=False, **kwargs): \"\"\"Daemonize and execute our strategy\"\"\"", "or execute multiple daemonized workers, they simply provide the behavior", "err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p)", "self.logger.debug('Forked worker %s finished', os.getpid()) # do not return from", "workers should continue to run \"\"\" return True class DaemonStrategy(object):", "daemon, logger): self.daemon = daemon self.logger = logger self.running =", "a single worker's :meth:`run` method after fork. \"\"\" return []", "iterable of dicts, each element represents the kwargs to be", "of Daemon do not know *how* to daemonize, or execute", "the number of processes created. If the returned iterable is", ":meth:`Daemon.run` method \"\"\" # very often the config section_name is", "environment variable exists to avoid stat('/etc/localtime') on # some platforms.", "def iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError:", "to instantiate, subclass of :class:`Daemon` :param conf_file: Path to configuration", "parent process. If it returns False, all child workers are", "Daemon(object): \"\"\" Daemon base class A daemon has a run", "is the execution strategy for using subclasses of Daemon. The", "context specific periodic wellness checks which can reset worker arguments.", "utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet", "workers, they simply provide the behavior of the daemon and", "strategy. :param once: False if the worker(s) will be daemonized,", "self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "of the daemon held by the parent process. If it", "err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up", "time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt:", "*how* to daemonize, or execute multiple daemonized workers, they simply", "worker processes. This method is called in the parent process.", "will be derived from the daemon ``klass`` if not provided", "daemon :meth:`Daemon.run` method \"\"\" # very often the config section_name", "via command line argparser :returns: an iterable of dicts, each", "if section_name == '': section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try:", "def cleanup(self): for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except", "p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): \"\"\" Loads settings", "daemonize, or execute multiple daemonized workers, they simply provide the", "this to do something after running using multiple worker processes.", "def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon')", "a boolean, True only if all workers should continue to", "ANY KIND, either express or # implied. # See the", "kwarg. The section_name will be derived from the daemon ``klass``", "provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate,", "utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The", "returns False, all child workers are terminated, and new workers", "self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\" Override this to do", "This is the execution strategy for using subclasses of Daemon.", "our strategy\"\"\" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit')", "# limitations under the License. import errno import os import", "signal from re import sub import eventlet.debug from eventlet.hubs import", "exit. However, if the Daemon returns a non-empty iterable from", "of Daemon may override :meth:`get_worker_args` to dispatch arguments to individual", "eventlet.debug from eventlet.hubs import use_hub from swift.common import utils class", "for run-once mode since there is no \"after running\" in", "it returns False, all child workers are terminated, and new", "parent process will exit. However, if the Daemon returns a", "while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once:", "multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self,", "conf file to load config from :param once: Passed to", "worker yield a (possibly empty) dict of kwargs to pass", "use this file except in compliance with the License. #", "on # some platforms. This locks in reported times to", "fork. The length of elements returned from this method will", "err: if err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s", "= utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: #", "# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under", "stack, nor execute any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs)", "and :meth:`run_forever`. A subclass of Daemon may override :meth:`get_worker_args` to", "== 0: # child still running continue self.logger.debug('Worker %s exited',", "not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if", "iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return", "not implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else:", "self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs))", "options, unless it was executed in once mode. :param daemon:", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "under the License. import errno import os import sys import", "and execute our strategy\"\"\" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt:", "License. # You may obtain a copy of the License", "False def _fork(self, once, **kwargs): pid = os.fork() if pid", "on command line (i.e. daemonize=false) will over-ride config once =", "will determine the number of processes created. If the returned", "log_route='daemon') def run_once(self, *args, **kwargs): \"\"\"Override this to run the", "True if the worker(s) will be run once :param kwargs:", "If a child process exits it will be restarted with", "run once :param kwargs: plumbed through via command line argparser", "[] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs)", "a :class:`Daemon` (has a `run` method) :param logger: a logger", "self.cleanup() return True return False def check_on_all_running_workers(self): for p in", "return 0 else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break", "klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError)", "to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try:", "under the License is distributed on an \"AS IS\" BASIS,", "**kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for", "list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with %r',", "License for the specific language governing permissions and # limitations", "kwargs to pass along to the daemon's :meth:`run` method after", "\"\"\" This method is called very frequently on the instance", "will be restarted with the same options, unless it was", "daemon: an instance of a :class:`Daemon` (has a `run` method)", "raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass, conf_file,", "OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise", "pass def get_worker_args(self, once=False, **kwargs): \"\"\" For each worker yield", "if the worker(s) will be run once :param kwargs: plumbed", "= os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)", "if err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died',", "to do something after running using multiple worker processes. This", "instantiates daemon ``klass`` and runs the daemon with the specified", "return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if", "self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): \"\"\"Daemonize and execute our", "avoid stat('/etc/localtime') on # some platforms. This locks in reported", "self.logger.notice('User quit') finally: self.cleanup() self.running = False def _fork(self, once,", "to daemon :meth:`Daemon.run` method \"\"\" # very often the config", "utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable", "from conf, then instantiates daemon ``klass`` and runs the daemon", "singleton will be passed through to readconf as is if", ":meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be invoked in child", "`run` method) :param logger: a logger instance \"\"\" def __init__(self,", "no \"after running\" in run-forever mode. \"\"\" pass def get_worker_args(self,", "os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited", "if all workers should continue to run \"\"\" return True", "0: # child still running continue self.logger.debug('Worker %s exited', p)", "configuration file :param section_name: Section name from conf file to", "run forever\"\"\" raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs):", "**kwargs): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL)", "will be invoked in child processes, with the arguments provided", "list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon", "\"\"\" def __init__(self, daemon, logger): self.daemon = daemon self.logger =", "is based on the class name # the None singleton", "self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) ==", "the behavior of the daemon and context specific knowledge about", "try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running", "implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs)", "running\" in run-forever mode. \"\"\" pass def get_worker_args(self, once=False, **kwargs):", "section_name == '': section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try: conf", "self.cleanup() self.running = False def _fork(self, once, **kwargs): pid =", "optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate if", "in compliance with the License. # You may obtain a", "\"\"\"Override this to run forever\"\"\" raise NotImplementedError('run_forever not implemented') def", "If it returns False, all child workers are terminated, and", "= conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs):", "# very often the config section_name is based on the", "log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The message will", "software # distributed under the License is distributed on an", "daemon held by the parent process. If it returns False,", "kwargs) return pid def iter_unspawned_workers(self): while True: try: per_worker_options =", "do not return from this stack, nor execute any finally", "``klass`` if not provided (e.g. ObjectReplicator => object-replicator). :param klass:", "do something after running using multiple worker processes. This method", "arguments. Implementations of Daemon do not know *how* to daemonize,", "the daemon's :meth:`Daemon.run` method will be invoked in child processes,", "with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid):", "'logger' in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf,", "daemon's :meth:`run` method after fork. The length of elements returned", "= list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug(", "options, aborting workers') self.cleanup() return True return False def check_on_all_running_workers(self):", "utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) #", "strategy self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self, **kwargs):", "if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned", "subclass of Daemon must implement :meth:`run_once` and :meth:`run_forever`. A subclass", "Daemon may override :meth:`get_worker_args` to dispatch arguments to individual child", "config from :param once: Passed to daemon :meth:`Daemon.run` method \"\"\"", "section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf,", "execute our strategy\"\"\" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User", "iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be invoked", "processes. This method is called in the parent process. This", "else: if pid == 0: # child still running continue", "utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\", "execute any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid", "accepts a ``once`` kwarg and will dispatch to :meth:`run_once` or", "self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0)", "signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished',", "return yield per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self, pid,", "%r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid))", "Passed to daemon :meth:`Daemon.run` method \"\"\" # very often the", "signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True", "disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) #", "a `run` method) :param logger: a logger instance \"\"\" def", "with the same options, unless it was executed in once", "worker %s', p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): \"\"\"", "once on command line (i.e. daemonize=false) will over-ride config once", "# disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() #", "'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except", "as is if section_name == '': section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2',", "exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if", "def __init__(self, daemon, logger): self.daemon = daemon self.logger = logger", "def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def", "run \"\"\" return True class DaemonStrategy(object): \"\"\" This is the", "= {} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user',", "if pid == 0: # child still running continue self.logger.debug('Worker", "if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return", "in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting", "logger self.running = False # only used by multi-worker strategy", "not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options, aborting workers')", "def check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid, status =", "be invoked in child processes, with the arguments provided from", "all child workers are terminated, and new workers will be", "os.kill(p, signal.SIGTERM) except OSError as err: if err.errno not in", "**kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\" Override", "to load config from :param once: Passed to daemon :meth:`Daemon.run`", "%s with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self,", "kwarg and will dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass", "nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate if desired", "the daemon's :meth:`Daemon.run` method from within the parent process. When", "from re import sub import eventlet.debug from eventlet.hubs import use_hub", "def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return", "When the :meth:`Daemon.run` method returns the parent process will exit.", "utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False", "self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def", "and # limitations under the License. import errno import os", "Daemon base class A daemon has a run method that", "See the License for the specific language governing permissions and", "all workers should continue to run \"\"\" return True class", "returned iterable is empty, the Strategy will fallback to run-inline", "utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM,", "except IndexError: return yield per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys()) def", "import utils class Daemon(object): \"\"\" Daemon base class A daemon", "except OSError as err: if err.errno not in (errno.ESRCH, errno.EINTR,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "run-forever mode. \"\"\" pass def get_worker_args(self, once=False, **kwargs): \"\"\" For", "KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False def _fork(self,", "to in writing, software # distributed under the License is", "\"after running\" in run-forever mode. \"\"\" pass def get_worker_args(self, once=False,", "False if the worker(s) will be daemonized, True if the", "only used by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options =", "# See the License for the specific language governing permissions", "register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with %r', pid, per_worker_options)", "self._fork(once, **per_worker_options) == 0: return 0 else: if not self.spawned_pids():", "the Strategy will fallback to run-inline strategy. :param once: False", "on the class name # the None singleton will be", "logger): self.daemon = daemon self.logger = logger self.running = False", "fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE", "governing permissions and # limitations under the License. import errno", "daemon has a run method that accepts a ``once`` kwarg", "or agreed to in writing, software # distributed under the", "ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass of", "\"\"\"Run the daemon\"\"\" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): \"\"\"Daemonize", "required by applicable law or agreed to in writing, software", "True class DaemonStrategy(object): \"\"\" This is the execution strategy for", "None singleton will be passed through to readconf as is", "logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "of the daemon and context specific knowledge about how workers", "be passed through to readconf as is if section_name ==", "signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid())", "invoke the daemon's :meth:`Daemon.run` method from within the parent process.", "any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def", "abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options,", "else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) #", "**kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0:", "setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args):", "to change options, aborting workers') self.cleanup() return True return False", "return False def check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid,", "\"\"\"Daemonize and execute our strategy\"\"\" self.setup(**kwargs) try: self._run(once=once, **kwargs) except", "%s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit')", "compliance with the License. # You may obtain a copy", "single worker's :meth:`run` method after fork. \"\"\" return [] def", "self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options:", "agreed to in writing, software # distributed under the License", "per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0", "klass: Class to instantiate, subclass of :class:`Daemon` :param conf_file: Path", "the daemon. If a child process exits it will be", "utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces eventlet_debug", "the License. import errno import os import sys import time", "self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args,", "run the script once\"\"\" raise NotImplementedError('run_once not implemented') def run_forever(self,", "``once`` kwarg and will dispatch to :meth:`run_once` or :meth:`run_forever`. A", "running using multiple worker processes. This method is called in", "distributed under the License is distributed on an \"AS IS\"", "(possibly empty) dict of kwargs to pass along to the", "this to run forever\"\"\" raise NotImplementedError('run_forever not implemented') def run(self,", "pid == 0: # child still running continue self.logger.debug('Worker %s", "logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose',", "or # implied. # See the License for the specific", "return [] def is_healthy(self): \"\"\" This method is called very", "'1%')) # By default, disable eventlet printing stacktraces eventlet_debug =", "raise self.logger.notice('Worker %s died', p) else: if pid == 0:", "pass along to the daemon's :meth:`run` method after fork. The", "the parent process will exit. However, if the Daemon returns", "be restarted with the same options, unless it was executed", "raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if once:", "{} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift'))", "signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) # do", "except in compliance with the License. # You may obtain", "process exits it will be restarted with the same options,", "is empty, the Strategy will fallback to run-inline strategy. :param", "passed to a single worker's :meth:`run` method after fork. \"\"\"", "pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM,", "if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger =", "fallback to run-inline strategy. :param once: False if the worker(s)", "of :class:`Daemon` :param conf_file: Path to configuration file :param section_name:", "A daemon has a run method that accepts a ``once``", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", ":meth:`get_worker_args` to dispatch arguments to individual child process workers and", "process. This is probably only useful for run-once mode since", "restarted with the same options, unless it was executed in", ":class:`Daemon` :param conf_file: Path to configuration file :param section_name: Section", "not use this file except in compliance with the License.", "0 def cleanup(self): for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM)", "express or # implied. # See the License for the", "the :meth:`Daemon.run` method returns the parent process will exit. However,", "\"\"\" For each worker yield a (possibly empty) dict of", "utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: logger", "blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while", "to run forever\"\"\" raise NotImplementedError('run_forever not implemented') def run(self, once=False,", "from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>", "the kwargs to be passed to a single worker's :meth:`run`", "writing, software # distributed under the License is distributed on", "# The message will be printed to stderr # and", "kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name)", "of elements returned from this method will determine the number", "you may not use this file except in compliance with", "run-inline strategy. :param once: False if the worker(s) will be", "to pass along to the daemon's :meth:`run` method after fork.", ":meth:`run` method after fork. The length of elements returned from", "post_multiprocess_run(self): \"\"\" Override this to do something after running using", "mode. :param daemon: an instance of a :class:`Daemon` (has a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "False def check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid, status", "self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once,", "dict of kwargs to pass along to the daemon's :meth:`run`", "= once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if", "= True def _run_inline(self, once=False, **kwargs): \"\"\"Run the daemon\"\"\" self.daemon.run(once=once,", "**per_worker_options) == 0: return 0 else: if not self.spawned_pids(): self.logger.notice('Finished", "This method is called very frequently on the instance of", "IOError) as e: # The message will be printed to", "\"\"\" def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf,", "in the parent process. This is probably only useful for", "and new workers will be created. :returns: a boolean, True", "instance \"\"\" def __init__(self, daemon, logger): self.daemon = daemon self.logger", "break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def cleanup(self): for p in", "of Daemon must implement :meth:`run_once` and :meth:`run_forever`. A subclass of", ":returns: a boolean, True only if all workers should continue", "may override :meth:`get_worker_args` to dispatch arguments to individual child process", "arguments to individual child process workers and :meth:`is_healthy` to perform", "printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ", "if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By", "section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name,", "in once mode. :param daemon: an instance of a :class:`Daemon`", "element represents the kwargs to be passed to a single", "# child still running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p)", "will dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass of Daemon", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs)", "process. When the :meth:`Daemon.run` method returns the parent process will", "specific periodic wellness checks which can reset worker arguments. Implementations", "the class name # the None singleton will be passed", "self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once,", "after running using multiple worker processes. This method is called", "method is called in the parent process. This is probably", "pid def iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop() except", "run_daemon(klass, conf_file, section_name='', once=False, **kwargs): \"\"\" Loads settings from conf,", "disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set", "os.waitpid(p, os.WNOHANG) except OSError as err: if err.errno not in", "**kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) # do not return", "worker %s with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def", "the daemon held by the parent process. If it returns", "the daemon\"\"\" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): \"\"\"Daemonize and", "each worker yield a (possibly empty) dict of kwargs to", "self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration()", "the returned iterable is empty, the Strategy will fallback to", "process will exit. However, if the Daemon returns a non-empty", "line argparser :returns: an iterable of dicts, each element represents", "quit') finally: self.cleanup() self.running = False def _fork(self, once, **kwargs):", "else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run()", "empty, the Strategy will fallback to run-inline strategy. :param once:", "parent process. This is probably only useful for run-once mode", "to readconf as is if section_name == '': section_name =", ":meth:`run_once` and :meth:`run_forever`. A subclass of Daemon may override :meth:`get_worker_args`", "as e: # The message will be printed to stderr", "elements returned from this method will determine the number of", "needs to change options, aborting workers') self.cleanup() return True return", "the instance of the daemon held by the parent process.", "must implement :meth:`run_once` and :meth:`run_forever`. A subclass of Daemon may", "was executed in once mode. :param daemon: an instance of", "ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self):", "(i.e. daemonize=false) will over-ride config once = once or not", "platforms. This locks in reported times to UTC. os.environ['TZ'] =", "length of elements returned from this method will determine the", "def spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker", "dicts, each element represents the kwargs to be passed to", "in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name',", "use_hub from swift.common import utils class Daemon(object): \"\"\" Daemon base", "Strategy will fallback to run-inline strategy. :param once: False if", "be passed to a single worker's :meth:`run` method after fork.", "be run once :param kwargs: plumbed through via command line", "pid, status = os.waitpid(p, os.WNOHANG) except OSError as err: if", "once=False, **kwargs): \"\"\" For each worker yield a (possibly empty)", "after fork. \"\"\" return [] def is_healthy(self): \"\"\" This method", "for using subclasses of Daemon. The default behavior is to", "Daemon. The default behavior is to invoke the daemon's :meth:`Daemon.run`", "worker(s) will be daemonized, True if the worker(s) will be", "# set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve',", ":meth:`Daemon.run` method will be invoked in child processes, with the", ":param once: Passed to daemon :meth:`Daemon.run` method \"\"\" # very", "returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method", "return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options)", "(c) 2010-2012 OpenStack Foundation # # Licensed under the Apache", "re import sub import eventlet.debug from eventlet.hubs import use_hub from", "scheduling utils.modify_priority(conf, logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate',", "**kwargs): \"\"\" Loads settings from conf, then instantiates daemon ``klass``", "the License is distributed on an \"AS IS\" BASIS, #", "if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers():", "Section name from conf file to load config from :param", "workers are terminated, and new workers will be created. :returns:", "0: return 0 else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid())", "By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))", "per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def spawned_pids(self):", "once mode. :param daemon: an instance of a :class:`Daemon` (has", "number of processes created. If the returned iterable is empty,", "However, if the Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`,", "self.logger = logger self.running = False # only used by", "often the config section_name is based on the class name", "conf, then instantiates daemon ``klass`` and runs the daemon with", "command line (i.e. daemonize=false) will over-ride config once = once", "= kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False),", "aborting workers') self.cleanup() return True return False def check_on_all_running_workers(self): for", "of kwargs to pass along to the daemon's :meth:`run` method", "self.daemon = daemon self.logger = logger self.running = False #", "the daemon ``klass`` if not provided (e.g. ObjectReplicator => object-replicator).", "per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options", "0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not", "class DaemonStrategy(object): \"\"\" This is the execution strategy for using", "def is_healthy(self): \"\"\" This method is called very frequently on", "strategy\"\"\" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally:", "**kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False", "utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received')", "instance of a :class:`Daemon` (has a `run` method) :param logger:", "died', p) else: if pid == 0: # child still", "= False def _fork(self, once, **kwargs): pid = os.fork() if", "Implementations of Daemon do not know *how* to daemonize, or", "log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger)", "return True return False def check_on_all_running_workers(self): for p in self.spawned_pids():", "message will be printed to stderr # and results in", "finally: self.cleanup() self.running = False def _fork(self, once, **kwargs): pid", "once: Passed to daemon :meth:`Daemon.run` method \"\"\" # very often", "the arguments provided from the parent process's instance of the", "law or agreed to in writing, software # distributed under", "workers should be started. \"\"\" def __init__(self, conf): self.conf =", "of processes created. If the returned iterable is empty, the", "daemon ``klass`` and runs the daemon with the specified ``once``", "worker arguments. Implementations of Daemon do not know *how* to", "use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false) will over-ride", "import signal from re import sub import eventlet.debug from eventlet.hubs", "daemon and context specific knowledge about how workers should be", "useful for run-once mode since there is no \"after running\"", "This is probably only useful for run-once mode since there", "mode since there is no \"after running\" in run-forever mode.", "from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be invoked in", "workers') self.cleanup() return True return False def check_on_all_running_workers(self): for p", "in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p) else: if", "since there is no \"after running\" in run-forever mode. \"\"\"", "err: if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p)", "simply provide the behavior of the daemon and context specific", "self.logger.notice('Worker %s died', p) else: if pid == 0: #", "of the daemon. If a child process exits it will", "be daemonized, True if the worker(s) will be run once", "as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker", "once, **kwargs): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP,", "this to run the script once\"\"\" raise NotImplementedError('run_once not implemented')", "from this method will determine the number of processes created.", "conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling", "yield a (possibly empty) dict of kwargs to pass along", "_run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once,", "frequently on the instance of the daemon held by the", "a child process exits it will be restarted with the", "CONDITIONS OF ANY KIND, either express or # implied. #", "determine the number of processes created. If the returned iterable", "e: # The message will be printed to stderr #", "instantiate, subclass of :class:`Daemon` :param conf_file: Path to configuration file", "per_worker_options): self.logger.debug('Spawned worker %s with %r', pid, per_worker_options) self.options_by_pid[pid] =", "may obtain a copy of the License at # #", "to dispatch arguments to individual child process workers and :meth:`is_healthy`", "returned from this method will determine the number of processes", "the daemon and context specific knowledge about how workers should", "strategy for using subclasses of Daemon. The default behavior is", "object-replicator). :param klass: Class to instantiate, subclass of :class:`Daemon` :param", "terminated, and new workers will be created. :returns: a boolean,", "from within the parent process. When the :meth:`Daemon.run` method returns", "once=False, **kwargs): \"\"\" Loads settings from conf, then instantiates daemon", "called very frequently on the instance of the daemon held", "which can reset worker arguments. Implementations of Daemon do not", "True def _run_inline(self, once=False, **kwargs): \"\"\"Run the daemon\"\"\" self.daemon.run(once=once, **kwargs)", "DaemonStrategy(object): \"\"\" This is the execution strategy for using subclasses", "The length of elements returned from this method will determine", "IndexError: return yield per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self,", "def run(self, once=False, **kwargs): \"\"\"Daemonize and execute our strategy\"\"\" self.setup(**kwargs)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run`", "self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running =", "is_healthy(self): \"\"\" This method is called very frequently on the", "an instance of a :class:`Daemon` (has a `run` method) :param", "section_name is based on the class name # the None", "after fork. The length of elements returned from this method", "non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be", "to :meth:`run_once` or :meth:`run_forever`. A subclass of Daemon must implement", "executed in once mode. :param daemon: an instance of a", "may not use this file except in compliance with the", "the specified ``once`` kwarg. The section_name will be derived from", "this method will determine the number of processes created. If", "signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self, once=False,", "once: False if the worker(s) will be daemonized, True if", "= daemon self.logger = logger self.running = False # only", "= os.waitpid(p, os.WNOHANG) except OSError as err: if err.errno not", "as err: if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise", "finished', os.getpid()) # do not return from this stack, nor", "# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "the parent process. If it returns False, all child workers", "The section_name will be derived from the daemon ``klass`` if", "this file except in compliance with the License. # You", "not return from this stack, nor execute any finally blocks", "OSError as err: if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD):", "For each worker yield a (possibly empty) dict of kwargs", "wellness checks which can reset worker arguments. Implementations of Daemon", "cleanup(self): for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError", "then instantiates daemon ``klass`` and runs the daemon with the", "errno import os import sys import time import signal from", "it was executed in once mode. :param daemon: an instance", "kwargs to be passed to a single worker's :meth:`run` method", "each element represents the kwargs to be passed to a", "def run_once(self, *args, **kwargs): \"\"\"Override this to run the script", "nor execute any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return", ":class:`Daemon` (has a `run` method) :param logger: a logger instance", "errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass,", "of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e.", "checks which can reset worker arguments. Implementations of Daemon do", "forever\"\"\" raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if", "to configuration file :param section_name: Section name from conf file", "stderr # and results in an exit code of 1.", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\"\"\" return [] def is_healthy(self): \"\"\" This method is called", "the Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's", ":param logger: a logger instance \"\"\" def __init__(self, daemon, logger):", "return from this stack, nor execute any finally blocks os._exit(0)", "from :param once: Passed to daemon :meth:`Daemon.run` method \"\"\" #", "if the Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the", "# # Licensed under the Apache License, Version 2.0 (the", "pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def", "= logger self.running = False # only used by multi-worker", "the daemon with the specified ``once`` kwarg. The section_name will", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "method after fork. The length of elements returned from this", "def get_worker_args(self, once=False, **kwargs): \"\"\" For each worker yield a", "through via command line argparser :returns: an iterable of dicts,", "should continue to run \"\"\" return True class DaemonStrategy(object): \"\"\"", "__init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def", "default behavior is to invoke the daemon's :meth:`Daemon.run` method from", "invoked in child processes, with the arguments provided from the", "the execution strategy for using subclasses of Daemon. The default", "same options, unless it was executed in once mode. :param", "This method is called in the parent process. This is", "def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options =", "if not once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options)", "specified ``once`` kwarg. The section_name will be derived from the", "conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): \"\"\"Override", "fork. \"\"\" return [] def is_healthy(self): \"\"\" This method is", "once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0:", "not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0", "sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false) will", "probably only useful for run-once mode since there is no", "self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once,", "desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "Override this to do something after running using multiple worker", "utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): \"\"\"Override this to run", "**kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN)", "conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e:", "be derived from the daemon ``klass`` if not provided (e.g.", "**kwargs): \"\"\"Override this to run the script once\"\"\" raise NotImplementedError('run_once", "else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while True: try:", "passed through to readconf as is if section_name == '':", "a run method that accepts a ``once`` kwarg and will", "os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once,", "errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def", "of dicts, each element represents the kwargs to be passed", "or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in", "process's instance of the daemon. If a child process exits", "once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger'", "to run-inline strategy. :param once: False if the worker(s) will", "# only used by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options", "OpenStack Foundation # # Licensed under the Apache License, Version", "workers will be created. :returns: a boolean, True only if", "raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): \"\"\"Override this", "daemon's :meth:`Daemon.run` method from within the parent process. When the", "= utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): \"\"\"Override this to", "override :meth:`get_worker_args` to dispatch arguments to individual child process workers", "signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) #", "language governing permissions and # limitations under the License. import", "daemonized workers, they simply provide the behavior of the daemon", "== 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker", "context specific knowledge about how workers should be started. \"\"\"", "utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running =", "not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker", "import sub import eventlet.debug from eventlet.hubs import use_hub from swift.common", "child workers are terminated, and new workers will be created.", "in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 while", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self):", "the same options, unless it was executed in once mode.", "permissions and # limitations under the License. import errno import", "in an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once", "DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited %s', os.getpid())", "try: pid, status = os.waitpid(p, os.WNOHANG) except OSError as err:", "once=False, **kwargs): \"\"\"Daemonize and execute our strategy\"\"\" self.setup(**kwargs) try: self._run(once=once,", "self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while True: try: per_worker_options", "*args, **kwargs): \"\"\"Override this to run forever\"\"\" raise NotImplementedError('run_forever not", "once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger", "locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset()", "os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while True:", "self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as err: if err.errno", "Path to configuration file :param section_name: Section name from conf", "if self._fork(once, **per_worker_options) == 0: return 0 else: if not", "# and results in an exit code of 1. sys.exit(e)", "= utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists", "self.running = False # only used by multi-worker strategy self.options_by_pid", "True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options", "yield per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options):", "boolean, True only if all workers should continue to run", "used by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options = []", "specific language governing permissions and # limitations under the License.", "kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name),", "=> object-replicator). :param klass: Class to instantiate, subclass of :class:`Daemon`", "os import sys import time import signal from re import", "try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited %s',", "self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene()", "to run the script once\"\"\" raise NotImplementedError('run_once not implemented') def", "held by the parent process. If it returns False, all", "runs the daemon with the specified ``once`` kwarg. The section_name", "signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self, once=False, **kwargs): \"\"\"Run", "from this stack, nor execute any finally blocks os._exit(0) else:", "p) else: if pid == 0: # child still running", "``klass`` and runs the daemon with the specified ``once`` kwarg.", "os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self, once=False, **kwargs):", ":meth:`Daemon.run` method from within the parent process. When the :meth:`Daemon.run`", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "is probably only useful for run-once mode since there is", "spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s", "reset worker arguments. Implementations of Daemon do not know *how*", "settings from conf, then instantiates daemon ``klass`` and runs the", "eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable", "to be passed to a single worker's :meth:`run` method after", "that accepts a ``once`` kwarg and will dispatch to :meth:`run_once`", ":meth:`Daemon.run` method returns the parent process will exit. However, if", "pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked", "continue to run \"\"\" return True class DaemonStrategy(object): \"\"\" This", "set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))", "multiple daemonized workers, they simply provide the behavior of the", "= 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)", "a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will", "\"\"\" Loads settings from conf, then instantiates daemon ``klass`` and", "else: self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\" Override this to do something", "will be created. :returns: a boolean, True only if all", "to perform context specific periodic wellness checks which can reset", "import errno import os import sys import time import signal", "(the \"License\"); # you may not use this file except", "time import signal from re import sub import eventlet.debug from", "method that accepts a ``once`` kwarg and will dispatch to", "exits it will be restarted with the same options, unless", "created. If the returned iterable is empty, the Strategy will", "daemon's :meth:`Daemon.run` method will be invoked in child processes, with", "# you may not use this file except in compliance", "self.logger.debug('Spawned worker %s with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options", "self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs", "import sys import time import signal from re import sub", "**kwargs) self.check_on_all_running_workers() if not once: for per_worker_options in self.iter_unspawned_workers(): if", ":meth:`run_forever`. A subclass of Daemon must implement :meth:`run_once` and :meth:`run_forever`.", "(has a `run` method) :param logger: a logger instance \"\"\"", "self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 while self.running:", "The message will be printed to stderr # and results", "sys import time import signal from re import sub import", "process workers and :meth:`is_healthy` to perform context specific periodic wellness", "still running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self,", "if not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class", "self.check_on_all_running_workers() if not once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once,", "os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def cleanup(self): for p", "execute multiple daemonized workers, they simply provide the behavior of", "(e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass", ":param conf_file: Path to configuration file :param section_name: Section name", "using multiple worker processes. This method is called in the", "to individual child process workers and :meth:`is_healthy` to perform context", "2010-2012 OpenStack Foundation # # Licensed under the Apache License,", ":meth:`run` method after fork. \"\"\" return [] def is_healthy(self): \"\"\"", "**kwargs): \"\"\"Run the daemon\"\"\" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs):", "reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s',", "_run_inline(self, once=False, **kwargs): \"\"\"Run the daemon\"\"\" self.daemon.run(once=once, **kwargs) def run(self,", "self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options, aborting workers') self.cleanup()", "and results in an exit code of 1. sys.exit(e) use_hub(utils.get_hub())", "self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def cleanup(self):", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or #", "in child processes, with the arguments provided from the parent", "daemon\"\"\" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): \"\"\"Daemonize and execute", "# # Unless required by applicable law or agreed to", "# implied. # See the License for the specific language", "method is called very frequently on the instance of the", "name from conf file to load config from :param once:", "name # the None singleton will be passed through to", "times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid())", "False # only used by multi-worker strategy self.options_by_pid = {}", "to daemonize, or execute multiple daemonized workers, they simply provide", "child process workers and :meth:`is_healthy` to perform context specific periodic", "there is no \"after running\" in run-forever mode. \"\"\" pass", "If the returned iterable is empty, the Strategy will fallback", "command line argparser :returns: an iterable of dicts, each element", "\"\"\"Override this to run the script once\"\"\" raise NotImplementedError('run_once not", "p in self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG) except", "worker(s) will be run once :param kwargs: plumbed through via", "False, all child workers are terminated, and new workers will", "self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 else: if", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "eventlet.hubs import use_hub from swift.common import utils class Daemon(object): \"\"\"", "True return False def check_on_all_running_workers(self): for p in self.spawned_pids(): try:", "def post_multiprocess_run(self): \"\"\" Override this to do something after running", "Version 2.0 (the \"License\"); # you may not use this", "of Daemon. The default behavior is to invoke the daemon's", "method returns the parent process will exit. However, if the", "for p in self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG)", "child still running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def", "return 0 def cleanup(self): for p in self.spawned_pids(): try: os.kill(p,", "conf_file, section_name='', once=False, **kwargs): \"\"\" Loads settings from conf, then", "run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self):", "based on the class name # the None singleton will", "self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for per_worker_options in", "is no \"after running\" in run-forever mode. \"\"\" pass def", "if the worker(s) will be daemonized, True if the worker(s)", ":returns: an iterable of dicts, each element represents the kwargs", "daemon self.logger = logger self.running = False # only used", "self.running = False def _fork(self, once, **kwargs): pid = os.fork()", "(errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p) else: if pid", "are terminated, and new workers will be created. :returns: a", "%s', p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): \"\"\" Loads", "except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False def", "and context specific knowledge about how workers should be started.", "run-once mode since there is no \"after running\" in run-forever", "daemon. If a child process exits it will be restarted", "the None singleton will be passed through to readconf as", "implied. # See the License for the specific language governing", "will fallback to run-inline strategy. :param once: False if the", "up worker %s', p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):", "section_name: Section name from conf file to load config from", "= \\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing", "under the Apache License, Version 2.0 (the \"License\"); # you", "base class A daemon has a run method that accepts", "*args, **kwargs): \"\"\"Override this to run the script once\"\"\" raise", "once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\" Override this to", "plumbed through via command line argparser :returns: an iterable of", "\"\"\" This is the execution strategy for using subclasses of", "execution strategy for using subclasses of Daemon. The default behavior", "A subclass of Daemon may override :meth:`get_worker_args` to dispatch arguments", "periodic wellness checks which can reset worker arguments. Implementations of", "self.logger.debug( 'Daemon needs to change options, aborting workers') self.cleanup() return", "return True class DaemonStrategy(object): \"\"\" This is the execution strategy", ":meth:`is_healthy` to perform context specific periodic wellness checks which can", "should be started. \"\"\" def __init__(self, conf): self.conf = conf", "very frequently on the instance of the daemon held by", "= utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice", "UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf),", "daemon ``klass`` if not provided (e.g. ObjectReplicator => object-replicator). :param", "if self._fork(once, **per_worker_options) == 0: return 0 while self.running: if", "by applicable law or agreed to in writing, software #", "register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list(", "results in an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) #", "received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running =", "behavior is to invoke the daemon's :meth:`Daemon.run` method from within", "the worker(s) will be daemonized, True if the worker(s) will", "self.running = True def _run_inline(self, once=False, **kwargs): \"\"\"Run the daemon\"\"\"", "process. If it returns False, all child workers are terminated,", "errno.ECHILD): raise self.logger.notice('Worker %s died', p) else: if pid ==", ":param klass: Class to instantiate, subclass of :class:`Daemon` :param conf_file:", "conf_file: Path to configuration file :param section_name: Section name from", "kwargs: plumbed through via command line argparser :returns: an iterable", "child process exits it will be restarted with the same", "== 0: return 0 else: if not self.spawned_pids(): self.logger.notice('Finished %s',", "some platforms. This locks in reported times to UTC. os.environ['TZ']", "logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User", "== '': section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try: conf =", "**kwargs): \"\"\"Daemonize and execute our strategy\"\"\" self.setup(**kwargs) try: self._run(once=once, **kwargs)", "utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to", "def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0,", "try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def", "not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to", "# optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate", "default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug)", "OR CONDITIONS OF ANY KIND, either express or # implied.", "to invoke the daemon's :meth:`Daemon.run` method from within the parent", "if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for per_worker_options", "through to readconf as is if section_name == '': section_name", "worker's :meth:`run` method after fork. \"\"\" return [] def is_healthy(self):", "not once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) ==", "not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs:", "The default behavior is to invoke the daemon's :meth:`Daemon.run` method", "logger instance \"\"\" def __init__(self, daemon, logger): self.daemon = daemon", "dispatch arguments to individual child process workers and :meth:`is_healthy` to", "'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM", "sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except", "is if section_name == '': section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower()", "code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command line", "to stderr # and results in an exit code of", "pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger') else:", "utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT", "once\"\"\" raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): \"\"\"Override", "self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for", "'': section_name = sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try: conf = utils.readconf(conf_file,", "Foundation # # Licensed under the Apache License, Version 2.0", "subclasses of Daemon. The default behavior is to invoke the", "subclass of :class:`Daemon` :param conf_file: Path to configuration file :param", "child processes, with the arguments provided from the parent process's", "do not know *how* to daemonize, or execute multiple daemonized", "exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command", "implement :meth:`run_once` and :meth:`run_forever`. A subclass of Daemon may override", "logger if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger", "the parent process. When the :meth:`Daemon.run` method returns the parent", "daemon with the specified ``once`` kwarg. The section_name will be", "# the None singleton will be passed through to readconf", "implemented') def run_forever(self, *args, **kwargs): \"\"\"Override this to run forever\"\"\"", "conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self,", "except (ValueError, IOError) as e: # The message will be", "False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children)", "**kwargs): \"\"\" For each worker yield a (possibly empty) dict", "represents the kwargs to be passed to a single worker's", "mode. \"\"\" pass def get_worker_args(self, once=False, **kwargs): \"\"\" For each", "returns the parent process will exit. However, if the Daemon", "will exit. However, if the Daemon returns a non-empty iterable", "be started. \"\"\" def __init__(self, conf): self.conf = conf self.logger", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "subclass of Daemon may override :meth:`get_worker_args` to dispatch arguments to", "will be daemonized, True if the worker(s) will be run", "per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned", "Unless required by applicable law or agreed to in writing,", "method) :param logger: a logger instance \"\"\" def __init__(self, daemon,", "time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def cleanup(self): for p in self.spawned_pids():", "logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate()", "def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def", "provided from the parent process's instance of the daemon. If", "# By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug',", "NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs)", "return list(self.options_by_pid.keys()) def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with", "load config from :param once: Passed to daemon :meth:`Daemon.run` method", "section_name='', once=False, **kwargs): \"\"\" Loads settings from conf, then instantiates", "class Daemon(object): \"\"\" Daemon base class A daemon has a", "``once`` kwarg. The section_name will be derived from the daemon", "will be printed to stderr # and results in an", "is called very frequently on the instance of the daemon", "# some platforms. This locks in reported times to UTC.", "about how workers should be started. \"\"\" def __init__(self, conf):", "stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment", "os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self,", "self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): \"\"\"Override this", "the specific language governing permissions and # limitations under the", "utils class Daemon(object): \"\"\" Daemon base class A daemon has", "except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD):", "try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as", "will over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true'))", "stat('/etc/localtime') on # some platforms. This locks in reported times", "dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass of Daemon must", "a ``once`` kwarg and will dispatch to :meth:`run_once` or :meth:`run_forever`.", "applicable law or agreed to in writing, software # distributed", "they simply provide the behavior of the daemon and context", "config section_name is based on the class name # the", "if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options, aborting", "desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired", "class name # the None singleton will be passed through", "on the instance of the daemon held by the parent", "and runs the daemon with the specified ``once`` kwarg. The", "file :param section_name: Section name from conf file to load", "def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): \"\"\" Loads settings from", "\"\"\" Daemon base class A daemon has a run method", "True only if all workers should continue to run \"\"\"", "p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not", "with the specified ``once`` kwarg. The section_name will be derived", "TZ environment variable exists to avoid stat('/etc/localtime') on # some", "from the parent process's instance of the daemon. If a", "Daemon must implement :meth:`run_once` and :meth:`run_forever`. A subclass of Daemon", "in writing, software # distributed under the License is distributed", "0 else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1)", "in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as err: if", "self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\" Override this to do something after", "if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if", "from conf file to load config from :param once: Passed", "to avoid stat('/etc/localtime') on # some platforms. This locks in", "**kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in", "and :meth:`is_healthy` to perform context specific periodic wellness checks which", "the config section_name is based on the class name #", "pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once,", "parent process's instance of the daemon. If a child process", "eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure", "self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running", "License. import errno import os import sys import time import", "check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid, status = os.waitpid(p,", "\"\"\" return True class DaemonStrategy(object): \"\"\" This is the execution", "empty) dict of kwargs to pass along to the daemon's", "get_worker_args(self, once=False, **kwargs): \"\"\" For each worker yield a (possibly", "once=False, **kwargs): \"\"\"Run the daemon\"\"\" self.daemon.run(once=once, **kwargs) def run(self, once=False,", "self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG) except OSError as", "not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p) else:", "worker %s finished', os.getpid()) # do not return from this", "section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The message", "has a run method that accepts a ``once`` kwarg and", "\"\"\" pass def get_worker_args(self, once=False, **kwargs): \"\"\" For each worker", "method from within the parent process. When the :meth:`Daemon.run` method", "import time import signal from re import sub import eventlet.debug", "method after fork. \"\"\" return [] def is_healthy(self): \"\"\" This", "return pid def iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop()", "signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def", "def _run_inline(self, once=False, **kwargs): \"\"\"Run the daemon\"\"\" self.daemon.run(once=once, **kwargs) def", "os.WNOHANG) except OSError as err: if err.errno not in (errno.EINTR,", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "OF ANY KIND, either express or # implied. # See", "processes, with the arguments provided from the parent process's instance", "log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable", "This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0'", "License, Version 2.0 (the \"License\"); # you may not use", "unless it was executed in once mode. :param daemon: an", "is to invoke the daemon's :meth:`Daemon.run` method from within the", "# You may obtain a copy of the License at", "swift.common import utils class Daemon(object): \"\"\" Daemon base class A", "multiple worker processes. This method is called in the parent", "provide the behavior of the daemon and context specific knowledge", ":param section_name: Section name from conf file to load config", "0: return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers()", "method will be invoked in child processes, with the arguments", "from eventlet.hubs import use_hub from swift.common import utils class Daemon(object):", "of a :class:`Daemon` (has a `run` method) :param logger: a", "parent process. When the :meth:`Daemon.run` method returns the parent process", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with %r', pid,", "run_forever(self, *args, **kwargs): \"\"\"Override this to run forever\"\"\" raise NotImplementedError('run_forever", "is called in the parent process. This is probably only", "how workers should be started. \"\"\" def __init__(self, conf): self.conf", "method \"\"\" # very often the config section_name is based", "only useful for run-once mode since there is no \"after", "run method that accepts a ``once`` kwarg and will dispatch", "either express or # implied. # See the License for", "= self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def spawned_pids(self): return", "for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return", "specific knowledge about how workers should be started. \"\"\" def", "import os import sys import time import signal from re", "be created. :returns: a boolean, True only if all workers", "line (i.e. daemonize=false) will over-ride config once = once or", "utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority", "run_once(self, *args, **kwargs): \"\"\"Override this to run the script once\"\"\"", "class A daemon has a run method that accepts a", "\"\"\" Override this to do something after running using multiple", "new workers will be created. :returns: a boolean, True only", "arguments provided from the parent process's instance of the daemon.", "config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure", "be printed to stderr # and results in an exit", "import use_hub from swift.common import utils class Daemon(object): \"\"\" Daemon", "pid, per_worker_options): self.logger.debug('Spawned worker %s with %r', pid, per_worker_options) self.options_by_pid[pid]", "the License for the specific language governing permissions and #", "kill_children) self.running = True def _run_inline(self, once=False, **kwargs): \"\"\"Run the", "exists to avoid stat('/etc/localtime') on # some platforms. This locks", "self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) # do not", "argparser :returns: an iterable of dicts, each element represents the", "Apache License, Version 2.0 (the \"License\"); # you may not", "Class to instantiate, subclass of :class:`Daemon` :param conf_file: Path to", "%s', os.getpid()) break time.sleep(0.1) self.daemon.post_multiprocess_run() return 0 def cleanup(self): for", "# pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger')", "once :param kwargs: plumbed through via command line argparser :returns:", "the worker(s) will be run once :param kwargs: plumbed through", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "not know *how* to daemonize, or execute multiple daemonized workers,", "**kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to", "while True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield", "'Daemon needs to change options, aborting workers') self.cleanup() return True", "workers and :meth:`is_healthy` to perform context specific periodic wellness checks", "%s died', p) else: if pid == 0: # child", "running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self, once,", "\"\"\" # very often the config section_name is based on", "something after running using multiple worker processes. This method is", "'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT =", "self._fork(once, **per_worker_options) == 0: return 0 while self.running: if self.abort_workers_if_daemon_would_like():", "eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid stat('/etc/localtime')", "variable exists to avoid stat('/etc/localtime') on # some platforms. This", "self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass, conf_file, section_name='',", "file to load config from :param once: Passed to daemon", "run(self, once=False, **kwargs): \"\"\"Daemonize and execute our strategy\"\"\" self.setup(**kwargs) try:", "to a single worker's :meth:`run` method after fork. \"\"\" return", "limitations under the License. import errno import os import sys", "a (possibly empty) dict of kwargs to pass along to", "%s exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs)", "self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy():", "def _fork(self, once, **kwargs): pid = os.fork() if pid ==", "priority scheduling utils.modify_priority(conf, logger) # disable fallocate if desired if", "if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\" Override this", "Ensure TZ environment variable exists to avoid stat('/etc/localtime') on #", "the parent process. This is probably only useful for run-once", "by the parent process. If it returns False, all child", "try: os.kill(p, signal.SIGTERM) except OSError as err: if err.errno not", "iterable is empty, the Strategy will fallback to run-inline strategy.", "with the arguments provided from the parent process's instance of", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "a logger instance \"\"\" def __init__(self, daemon, logger): self.daemon =", "it will be restarted with the same options, unless it", "= sub(r'([a-z])([A-Z])', r'\\1-\\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name'))", ":param once: False if the worker(s) will be daemonized, True", "self.daemon.post_multiprocess_run() return 0 def cleanup(self): for p in self.spawned_pids(): try:", "if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs)", "p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as err:", "an iterable of dicts, each element represents the kwargs to", "along to the daemon's :meth:`run` method after fork. The length", "import eventlet.debug from eventlet.hubs import use_hub from swift.common import utils", "Daemon do not know *how* to daemonize, or execute multiple", "in self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG) except OSError", "by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options = [] def", "r'\\1-\\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError,", "daemonize=false) will over-ride config once = once or not utils.config_true_value(conf.get('daemonize',", "if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE,", "will be passed through to readconf as is if section_name", "'true')) # pre-configure logger if 'logger' in kwargs: logger =", "sub import eventlet.debug from eventlet.hubs import use_hub from swift.common import", "def run_forever(self, *args, **kwargs): \"\"\"Override this to run forever\"\"\" raise", "can reset worker arguments. Implementations of Daemon do not know", "**kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running", "utils.modify_priority(conf, logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')):", "status = os.waitpid(p, os.WNOHANG) except OSError as err: if err.errno", "the parent process's instance of the daemon. If a child", "= [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger,", "for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as", "# once on command line (i.e. daemonize=false) will over-ride config", "(errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p)", "= per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs):", "KIND, either express or # implied. # See the License", "def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def", "started. \"\"\" def __init__(self, conf): self.conf = conf self.logger =", "False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) #", "or :meth:`run_forever`. A subclass of Daemon must implement :meth:`run_once` and", "printed to stderr # and results in an exit code", "__init__(self, daemon, logger): self.daemon = daemon self.logger = logger self.running", "in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s',", "'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid", "\"License\"); # you may not use this file except in", "once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): \"\"\"", "created. :returns: a boolean, True only if all workers should", "individual child process workers and :meth:`is_healthy` to perform context specific", "instance of the daemon. If a child process exits it", "per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "readconf as is if section_name == '': section_name = sub(r'([a-z])([A-Z])',", "this stack, nor execute any finally blocks os._exit(0) else: self.register_worker_start(pid,", "within the parent process. When the :meth:`Daemon.run` method returns the", "%s finished', os.getpid()) # do not return from this stack,", "very often the config section_name is based on the class", "and will dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass of", "# distributed under the License is distributed on an \"AS", "_fork(self, once, **kwargs): pid = os.fork() if pid == 0:", "# Unless required by applicable law or agreed to in", "self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup()", "know *how* to daemonize, or execute multiple daemonized workers, they", "change options, aborting workers') self.cleanup() return True return False def", "once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if", "from swift.common import utils class Daemon(object): \"\"\" Daemon base class", "= False # only used by multi-worker strategy self.options_by_pid =", "continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs):", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "instance of the daemon held by the parent process. If", "NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): \"\"\"Override this to", "[] def is_healthy(self): \"\"\" This method is called very frequently", "= False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM,", ":meth:`run_once` or :meth:`run_forever`. A subclass of Daemon must implement :meth:`run_once`", "self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for per_worker_options in self.iter_unspawned_workers():", "processes created. If the returned iterable is empty, the Strategy", ":meth:`run_forever`. A subclass of Daemon may override :meth:`get_worker_args` to dispatch", "behavior of the daemon and context specific knowledge about how", "script once\"\"\" raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs):", "only if all workers should continue to run \"\"\" return", "You may obtain a copy of the License at #", "os.getpid()) # do not return from this stack, nor execute", "0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s", "== 0: return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs)", "**kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not", "def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change", "section_name will be derived from the daemon ``klass`` if not", "the daemon's :meth:`run` method after fork. The length of elements", "not implemented') def run_forever(self, *args, **kwargs): \"\"\"Override this to run", ":param kwargs: plumbed through via command line argparser :returns: an", "# do not return from this stack, nor execute any", "to run \"\"\" return True class DaemonStrategy(object): \"\"\" This is", "method will determine the number of processes created. If the", "signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH,", "kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM)", "using subclasses of Daemon. The default behavior is to invoke", "derived from the daemon ``klass`` if not provided (e.g. ObjectReplicator", "over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) #", "to the daemon's :meth:`run` method after fork. The length of", "daemonized, True if the worker(s) will be run once :param", "an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on", "(ValueError, IOError) as e: # The message will be printed", "the Apache License, Version 2.0 (the \"License\"); # you may", ":param daemon: an instance of a :class:`Daemon` (has a `run`", "in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 else:", "self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass, conf_file, section_name='', once=False,", "\\ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces", "will be run once :param kwargs: plumbed through via command", "self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def spawned_pids(self): return list(self.options_by_pid.keys())", "knowledge about how workers should be started. \"\"\" def __init__(self,", "**per_worker_options) == 0: return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once,", "A subclass of Daemon must implement :meth:`run_once` and :meth:`run_forever`. A", "os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once,", "Loads settings from conf, then instantiates daemon ``klass`` and runs", "logger: a logger instance \"\"\" def __init__(self, daemon, logger): self.daemon", "**kwargs): \"\"\"Override this to run forever\"\"\" raise NotImplementedError('run_forever not implemented')" ]
[ "resource_files.ResourceFiles() # sample use case of getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\",", "sample use case of getting describe info print(resources.get_logs('mycluster', 'default', \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\"))", "yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) # sample use case of", "print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) # sample use case of getting", "use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case", "\"default\", \"mycluster\")) # sample use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a'))", "# sample use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample", "\"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) # sample use case of getting events", "sample use case of getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\"))", "# sample use case of getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\",", "getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of getting describe", "case of getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) # sample", "of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of getting", "of getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) # sample use", "events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of getting describe info", "sample use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use", "print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of getting describe info print(resources.get_logs('mycluster',", "\"mycluster\")) # sample use case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) #", "resources = resource_files.ResourceFiles() # sample use case of getting yamls", "# sample use case of getting describe info print(resources.get_logs('mycluster', 'default',", "case of getting events print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a')) # sample use case of", "resource_files resources = resource_files.ResourceFiles() # sample use case of getting", "import resource_files resources = resource_files.ResourceFiles() # sample use case of", "= resource_files.ResourceFiles() # sample use case of getting yamls print(resources.get_yaml(\"Pod\",", "getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) # sample use case", "use case of getting yamls print(resources.get_yaml(\"Pod\", \"jumpy-shark-gbapp-frontend-844fdccf55-ggkbf\", \"default\", \"mycluster\")) #" ]
[ "api.v1 import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import helper from", "from api.v1.fundamentals import helper from .auth_controller import AuthController def sage_auth_signup_function(self,", "= AuthController.authenticate_client() if not result: # not successful raise params", "\"\"\" Project: flask-rest Author: <NAME> Description: Handle auth endpoints such", "Handle auth endpoints such as auth/signup, auth/login \"\"\" from api.v1", "refresh token and the access token auth_controller = { 'signup':", "AuthController.authenticate_client() if not result: # not successful raise params #", "auth/signup, auth/login \"\"\" from api.v1 import make_json_ok_response, SageController, SageMethod from", "user = _UserModel(**args) # user has been created user.put() #", "if not result: # not successful user.key.delete() raise params #", "resource, **kwargs): result, params = AuthController.authenticate_client() if not result: #", "the refresh token and the access token auth_controller = {", "such as auth/signup, auth/login \"\"\" from api.v1 import make_json_ok_response, SageController,", "= AuthController.create_unique_for_user(user.key) if not result: # not successful user.key.delete() raise", "the error message else: return params # this holds accesskey", "if not result: # not successful raise params # this", "not successful raise params # this holds the error message", "the user result, params = AuthController.create_unique_for_user(user.key) if not result: #", "token and the access token auth_controller = { 'signup': SageController(sage_auth_signup_function,", "= { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False)", "auth/login \"\"\" from api.v1 import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals", "# save to get a key for the user result,", "api.v1.fundamentals import helper from .auth_controller import AuthController def sage_auth_signup_function(self, resource,", "error message else: return params # this holds accesskey and", "<NAME> Description: Handle auth endpoints such as auth/signup, auth/login \"\"\"", "the error message else: return params # this holds the", "args = helper.parse_args_for_model(_UserModel) user = _UserModel(**args) # user has been", "params # this holds accesskey and refresh token def sage_auth_authenticate_function(self,", "user has been created user.put() # save to get a", "import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import helper from .auth_controller", "Description: Handle auth endpoints such as auth/signup, auth/login \"\"\" from", "make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import helper from .auth_controller import", "user result, params = AuthController.create_unique_for_user(user.key) if not result: # not", "Project: flask-rest Author: <NAME> Description: Handle auth endpoints such as", "created user.put() # save to get a key for the", "# not successful user.key.delete() raise params # this holds the", "SageMethod from api.v1.fundamentals import helper from .auth_controller import AuthController def", "params = AuthController.authenticate_client() if not result: # not successful raise", "access token auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate':", "this holds accesskey and refresh token def sage_auth_authenticate_function(self, resource, **kwargs):", "params # this holds the refresh token and the access", "helper from .auth_controller import AuthController def sage_auth_signup_function(self, resource, **kwargs): _UserModel", "\"\"\" from api.v1 import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import", "result, params = AuthController.authenticate_client() if not result: # not successful", "the access token auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False),", "# this holds the error message else: return params #", "to get a key for the user result, params =", "AuthController.create_unique_for_user(user.key) if not result: # not successful user.key.delete() raise params", "= resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user = _UserModel(**args) # user", "user.key.delete() raise params # this holds the error message else:", "helper.parse_args_for_model(_UserModel) user = _UserModel(**args) # user has been created user.put()", "holds accesskey and refresh token def sage_auth_authenticate_function(self, resource, **kwargs): result,", "from api.v1 import make_json_ok_response, SageController, SageMethod from api.v1.fundamentals import helper", "def sage_auth_signup_function(self, resource, **kwargs): _UserModel = resource.get_account_model() args = helper.parse_args_for_model(_UserModel)", "endpoints such as auth/signup, auth/login \"\"\" from api.v1 import make_json_ok_response,", "params # this holds the error message else: return params", "as auth/signup, auth/login \"\"\" from api.v1 import make_json_ok_response, SageController, SageMethod", "= _UserModel(**args) # user has been created user.put() # save", "return params # this holds the refresh token and the", "**kwargs): _UserModel = resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user = _UserModel(**args)", "else: return params # this holds accesskey and refresh token", "sage_auth_authenticate_function(self, resource, **kwargs): result, params = AuthController.authenticate_client() if not result:", "import AuthController def sage_auth_signup_function(self, resource, **kwargs): _UserModel = resource.get_account_model() args", "holds the refresh token and the access token auth_controller =", "resource, **kwargs): _UserModel = resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user =", "# this holds accesskey and refresh token def sage_auth_authenticate_function(self, resource,", "{ 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False) }", "for the user result, params = AuthController.create_unique_for_user(user.key) if not result:", "import helper from .auth_controller import AuthController def sage_auth_signup_function(self, resource, **kwargs):", "flask-rest Author: <NAME> Description: Handle auth endpoints such as auth/signup,", "# this holds the refresh token and the access token", "refresh token def sage_auth_authenticate_function(self, resource, **kwargs): result, params = AuthController.authenticate_client()", "and refresh token def sage_auth_authenticate_function(self, resource, **kwargs): result, params =", "message else: return params # this holds the refresh token", "token auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate': SageController(sage_auth_authenticate_function,", "from .auth_controller import AuthController def sage_auth_signup_function(self, resource, **kwargs): _UserModel =", "message else: return params # this holds accesskey and refresh", "# user has been created user.put() # save to get", "and the access token auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST,", "a key for the user result, params = AuthController.create_unique_for_user(user.key) if", "_UserModel(**args) # user has been created user.put() # save to", "# not successful raise params # this holds the error", "holds the error message else: return params # this holds", "result: # not successful raise params # this holds the", "been created user.put() # save to get a key for", "successful user.key.delete() raise params # this holds the error message", "user.put() # save to get a key for the user", "auth_controller = { 'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False), 'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST,", "else: return params # this holds the refresh token and", "successful raise params # this holds the error message else:", ".auth_controller import AuthController def sage_auth_signup_function(self, resource, **kwargs): _UserModel = resource.get_account_model()", "sage_auth_signup_function(self, resource, **kwargs): _UserModel = resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user", "key for the user result, params = AuthController.create_unique_for_user(user.key) if not", "not result: # not successful user.key.delete() raise params # this", "return params # this holds accesskey and refresh token def", "AuthController def sage_auth_signup_function(self, resource, **kwargs): _UserModel = resource.get_account_model() args =", "error message else: return params # this holds the refresh", "token def sage_auth_authenticate_function(self, resource, **kwargs): result, params = AuthController.authenticate_client() if", "this holds the refresh token and the access token auth_controller", "raise params # this holds the error message else: return", "not result: # not successful raise params # this holds", "SageController, SageMethod from api.v1.fundamentals import helper from .auth_controller import AuthController", "result: # not successful user.key.delete() raise params # this holds", "has been created user.put() # save to get a key", "this holds the error message else: return params # this", "**kwargs): result, params = AuthController.authenticate_client() if not result: # not", "save to get a key for the user result, params", "params = AuthController.create_unique_for_user(user.key) if not result: # not successful user.key.delete()", "_UserModel = resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user = _UserModel(**args) #", "accesskey and refresh token def sage_auth_authenticate_function(self, resource, **kwargs): result, params", "resource.get_account_model() args = helper.parse_args_for_model(_UserModel) user = _UserModel(**args) # user has", "= helper.parse_args_for_model(_UserModel) user = _UserModel(**args) # user has been created", "def sage_auth_authenticate_function(self, resource, **kwargs): result, params = AuthController.authenticate_client() if not", "get a key for the user result, params = AuthController.create_unique_for_user(user.key)", "not successful user.key.delete() raise params # this holds the error", "result, params = AuthController.create_unique_for_user(user.key) if not result: # not successful", "Author: <NAME> Description: Handle auth endpoints such as auth/signup, auth/login", "auth endpoints such as auth/signup, auth/login \"\"\" from api.v1 import" ]
[ "[] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount", "singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k", "system dynamics. decayRateSM = rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)", "= lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this", "t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this is used", "it actually is not testing open system dynamics. decayRateSM =", "rn import numpy as np # open system dynamics of", "into the dictionary above. def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0,", "and compare numerical results with the analytical calculations # NOTE", "the analytical calculations # NOTE these are also TUTORIALS of", "numerical and analytical excited state populations into the dictionary above.", "# this is used as the calculate attribute of the", "TUTORIALS of the library, so see the Tutorials for what", "dynamics. decayRateSM = rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations", "is not testing open system dynamics. decayRateSM = rn.random() excitedPopulation", "# TODO this is an unfinished test. below two tests", "calling Liouville method without giving # any collapse operators. For", "excited state populations # TODO this is an unfinished test.", "excited state populations into the dictionary above. def singleQubitDecayCalculate(qub, state,", "2 cases: (i) decay only, and (ii) unitary evolution by", "the singleQubit fixture evolve method calls this at every #", "see the Tutorials for what these are doing and analytical", "doing and analytical # calculations. # currently includes 2 cases:", "random as rn import numpy as np # open system", "and analytical excited state populations into the dictionary above. def", "for what these are doing and analytical # calculations. #", "(ii) unitary evolution by calling Liouville method without giving #", "excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} #", "= [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert", "def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations: populations[k] = [] singleQubit.evolutionMethod", "evolution by calling Liouville method without giving # any collapse", "It stores both numerical and analytical excited state populations into", "collapse operators. For now, only looks at excited state populations", "analytical excited state populations into the dictionary above. def singleQubitDecayCalculate(qub,", "calculations # NOTE these are also TUTORIALS of the library,", "state populations # TODO this is an unfinished test. below", "tests are the same and it actually is not testing", "singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount ==", "above. def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit):", "calculations. # currently includes 2 cases: (i) decay only, and", "so see the Tutorials for what these are doing and", "the library, so see the Tutorials for what these are", "two tests are the same and it actually is not", "test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations: populations[k] = [] singleQubit.evolutionMethod =", "def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for", "singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical']) def", "unitary evolution by calling Liouville method without giving # any", "only looks at excited state populations # TODO this is", "test_qubitDecay(singleQubit): for k in populations: populations[k] = [] singleQubit.evolutionMethod =", "dynamics of a qubit and compare numerical results with the", "evolve method calls this at every # step of the", "an unfinished test. below two tests are the same and", "the Tutorials for what these are doing and analytical #", "singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k in", "fixture evolve method calls this at every # step of", "dictionary above. def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def", "singleQubit fixture evolve method calls this at every # step", "singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k", "includes 2 cases: (i) decay only, and (ii) unitary evolution", "def test_qubitDecay(singleQubit): for k in populations: populations[k] = [] singleQubit.evolutionMethod", "populations: populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate", "# open system dynamics of a qubit and compare numerical", "singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit):", "are doing and analytical # calculations. # currently includes 2", "# any collapse operators. For now, only looks at excited", "populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations: populations[k]", "the qubit, and the singleQubit fixture evolve method calls this", "as the calculate attribute of the qubit, and the singleQubit", "method calls this at every # step of the evolution.", "lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this is", "state populations into the dictionary above. def singleQubitDecayCalculate(qub, state, i):", "of the evolution. It stores both numerical and analytical excited", "state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in", "i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations:", "both numerical and analytical excited state populations into the dictionary", "{'excitedAnalytical':[], 'excitedNumerical':[]} # this is used as the calculate attribute", "calls this at every # step of the evolution. It", "method without giving # any collapse operators. For now, only", "what these are doing and analytical # calculations. # currently", "populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this is used as the", "now, only looks at excited state populations # TODO this", "these are doing and analytical # calculations. # currently includes", "singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k in populations: populations[k]", "'excitedNumerical':[]} # this is used as the calculate attribute of", "calculate attribute of the qubit, and the singleQubit fixture evolve", "for k in populations: populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution", "these are also TUTORIALS of the library, so see the", "of a qubit and compare numerical results with the analytical", "NOTE these are also TUTORIALS of the library, so see", "step of the evolution. It stores both numerical and analytical", "any collapse operators. For now, only looks at excited state", "as rn import numpy as np # open system dynamics", "at excited state populations # TODO this is an unfinished", "every # step of the evolution. It stores both numerical", "populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve()", "currently includes 2 cases: (i) decay only, and (ii) unitary", "populations into the dictionary above. def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize))", "# currently includes 2 cases: (i) decay only, and (ii)", "= singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for", "as np # open system dynamics of a qubit and", "evolution. It stores both numerical and analytical excited state populations", "and analytical # calculations. # currently includes 2 cases: (i)", "cases: (i) decay only, and (ii) unitary evolution by calling", "giving # any collapse operators. For now, only looks at", "and the singleQubit fixture evolve method calls this at every", "# calculations. # currently includes 2 cases: (i) decay only,", "by calling Liouville method without giving # any collapse operators.", "the same and it actually is not testing open system", "library, so see the Tutorials for what these are doing", "used as the calculate attribute of the qubit, and the", "np # open system dynamics of a qubit and compare", "0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]} # this is used as", "operators. For now, only looks at excited state populations #", "import numpy as np # open system dynamics of a", "len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k in populations: populations[k] = []", "rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[], 'excitedNumerical':[]}", "= {'excitedAnalytical':[], 'excitedNumerical':[]} # this is used as the calculate", "in populations: populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate =", "this at every # step of the evolution. It stores", "# step of the evolution. It stores both numerical and", "is used as the calculate attribute of the qubit, and", "import random as rn import numpy as np # open", "results with the analytical calculations # NOTE these are also", "this is used as the calculate attribute of the qubit,", "only, and (ii) unitary evolution by calling Liouville method without", "populations['excitedNumerical'].append(state[0, 0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations: populations[k] =", "and it actually is not testing open system dynamics. decayRateSM", "numpy as np # open system dynamics of a qubit", "also TUTORIALS of the library, so see the Tutorials for", "this is an unfinished test. below two tests are the", "populations # TODO this is an unfinished test. below two", "analytical # calculations. # currently includes 2 cases: (i) decay", "unfinished test. below two tests are the same and it", "same and it actually is not testing open system dynamics.", "test. below two tests are the same and it actually", "is an unfinished test. below two tests are the same", "looks at excited state populations # TODO this is an", "Liouville method without giving # any collapse operators. For now,", "and (ii) unitary evolution by calling Liouville method without giving", "# NOTE these are also TUTORIALS of the library, so", "of the library, so see the Tutorials for what these", "== len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k in populations: populations[k] =", "system dynamics of a qubit and compare numerical results with", "qubit, and the singleQubit fixture evolve method calls this at", "(i) decay only, and (ii) unitary evolution by calling Liouville", "stores both numerical and analytical excited state populations into the", "the calculate attribute of the qubit, and the singleQubit fixture", "the dictionary above. def singleQubitDecayCalculate(qub, state, i): populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize)) populations['excitedNumerical'].append(state[0, 0])", "with the analytical calculations # NOTE these are also TUTORIALS", "Tutorials for what these are doing and analytical # calculations.", "0]) def test_qubitUnitaryEvolutionFromLiouville(singleQubit): for k in populations: populations[k] = []", "compare numerical results with the analytical calculations # NOTE these", "analytical calculations # NOTE these are also TUTORIALS of the", "= rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations = {'excitedAnalytical':[],", "= singleQubit.openEvolution singleQubit.calculate = singleQubitDecayCalculate singleQubit.evolve() assert singleQubit.stepCount == len(populations['excitedNumerical'])", "below two tests are the same and it actually is", "decay only, and (ii) unitary evolution by calling Liouville method", "qubit and compare numerical results with the analytical calculations #", "TODO this is an unfinished test. below two tests are", "open system dynamics. decayRateSM = rn.random() excitedPopulation = lambda t:", "decayRateSM = rn.random() excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t) populations =", "of the qubit, and the singleQubit fixture evolve method calls", "k in populations: populations[k] = [] singleQubit.evolutionMethod = singleQubit.openEvolution singleQubit.calculate", "are the same and it actually is not testing open", "are also TUTORIALS of the library, so see the Tutorials", "numerical results with the analytical calculations # NOTE these are", "without giving # any collapse operators. For now, only looks", "For now, only looks at excited state populations # TODO", "a qubit and compare numerical results with the analytical calculations", "actually is not testing open system dynamics. decayRateSM = rn.random()", "not testing open system dynamics. decayRateSM = rn.random() excitedPopulation =", "open system dynamics of a qubit and compare numerical results", "testing open system dynamics. decayRateSM = rn.random() excitedPopulation = lambda", "at every # step of the evolution. It stores both", "assert singleQubit.stepCount == len(populations['excitedNumerical']) def test_qubitDecay(singleQubit): for k in populations:", "the evolution. It stores both numerical and analytical excited state", "attribute of the qubit, and the singleQubit fixture evolve method" ]
[ "element in vec: col.append([element]) return col else: return vec def", "row2col(state) if np.shape(state)[1]>1: raise Exception(\"invalid state, not a vector!\") if", "functions from them and dependencies def row2col(vec): if np.ndim(vec)==1: col=[]", "def row2col(vec): if np.ndim(vec)==1: col=[] for element in vec: col.append([element])", "col.append([element]) return col else: return vec def check_state(state): row2col(state) if", "return vec def check_state(state): row2col(state) if np.shape(state)[1]>1: raise Exception(\"invalid state,", "else: return vec def check_state(state): row2col(state) if np.shape(state)[1]>1: raise Exception(\"invalid", "vector state from .density import * #we may use some", "them and dependencies def row2col(vec): if np.ndim(vec)==1: col=[] for element", "row2col(vec): if np.ndim(vec)==1: col=[] for element in vec: col.append([element]) return", "col=[] for element in vec: col.append([element]) return col else: return", "may use some functions from them and dependencies def row2col(vec):", "for element in vec: col.append([element]) return col else: return vec", "check_state(state): row2col(state) if np.shape(state)[1]>1: raise Exception(\"invalid state, not a vector!\")", "* #we may use some functions from them and dependencies", "a vector!\") if schmidt_inner(state,state) !=1: raise Exception(\"invalid state, not normalized!\")", "some functions from them and dependencies def row2col(vec): if np.ndim(vec)==1:", "that related to vector state from .density import * #we", "from .density import * #we may use some functions from", "#we may use some functions from them and dependencies def", "Exception(\"invalid state, not a vector!\") if schmidt_inner(state,state) !=1: raise Exception(\"invalid", "import * #we may use some functions from them and", "and dependencies def row2col(vec): if np.ndim(vec)==1: col=[] for element in", "vec def check_state(state): row2col(state) if np.shape(state)[1]>1: raise Exception(\"invalid state, not", "raise Exception(\"invalid state, not a vector!\") if schmidt_inner(state,state) !=1: raise", "np.ndim(vec)==1: col=[] for element in vec: col.append([element]) return col else:", "contain function that related to vector state from .density import", "return col else: return vec def check_state(state): row2col(state) if np.shape(state)[1]>1:", "function that related to vector state from .density import *", "related to vector state from .density import * #we may", "#this file will contain function that related to vector state", "dependencies def row2col(vec): if np.ndim(vec)==1: col=[] for element in vec:", "use some functions from them and dependencies def row2col(vec): if", "if np.shape(state)[1]>1: raise Exception(\"invalid state, not a vector!\") if schmidt_inner(state,state)", "in vec: col.append([element]) return col else: return vec def check_state(state):", "np.shape(state)[1]>1: raise Exception(\"invalid state, not a vector!\") if schmidt_inner(state,state) !=1:", "if np.ndim(vec)==1: col=[] for element in vec: col.append([element]) return col", "to vector state from .density import * #we may use", "file will contain function that related to vector state from", "from them and dependencies def row2col(vec): if np.ndim(vec)==1: col=[] for", "will contain function that related to vector state from .density", "state, not a vector!\") if schmidt_inner(state,state) !=1: raise Exception(\"invalid state,", "vec: col.append([element]) return col else: return vec def check_state(state): row2col(state)", "not a vector!\") if schmidt_inner(state,state) !=1: raise Exception(\"invalid state, not", ".density import * #we may use some functions from them", "state from .density import * #we may use some functions", "def check_state(state): row2col(state) if np.shape(state)[1]>1: raise Exception(\"invalid state, not a", "<reponame>Mohamed-ShehabEldin/QuGraphy #this file will contain function that related to vector", "col else: return vec def check_state(state): row2col(state) if np.shape(state)[1]>1: raise" ]
[ "value) 7. Optimisation output 8. Others ?? SB Notes: Not", "is read as a python dictionary in uncoverml which can", "metadata. CreationDate: 31/05/19 Developer: <EMAIL> Revision History: LastUpdate: 31/05/19 FZ", "and path to shapefile is not required as this is", "= ((-10, 100),(-40, 140)) if config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file)", "in each of the output geotif file. Model parameters: 1.", "###########\\n\\n\") conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200, show_protected=True, show_static=True,", "values in self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\")", "the model) Model; Name: Type and date: Algorithm: Extent: Lat/long", "shapefile: csv file SB Notes: Only covaraite list file. Targets", "import getpass import socket from ppretty import ppretty import uncoverml", "%s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str = ppretty(self.config, indent=' ',", "- covariate value) 7. Optimisation output 8. Others ?? SB", "', show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The End of", "if config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf: self.model_performance_metrics =", "ppretty import uncoverml class MetadataSummary(): \"\"\" Summary Description of the", "file 6. Raw covariates file (target value - covariate value)", "file. Targets and path to shapefile is not required as", "email 2019-05-24 Overview Creator: (person who generated the model) Model;", "outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\") if self.model_performance_metrics: for keys, values in", "ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) self.config = config", "some merit as one can specify partial path. Model performance", "in the yaml file. Model inputs: 1. Covariates - list", "1. Covariates - list (in full) 2. Targets: path to", "dictionary in uncoverml which can be dumped in the metadata.", "#! /usr/bin/env python \"\"\" Description: Gather Metadata for the uncover-ml", "import datetime import getpass import socket from ppretty import ppretty", "= \"Metadata for the ML results\" username = getpass.getuser() hostname", "be dumped in the metadata. CreationDate: 31/05/19 Developer: <EMAIL> Revision", "is required as this information will be captured in the", ".SH file (in full) SB Notes: The .sh file is", "= config self.name = self.config.name # 'demo_regression' self.algorithm = self.config.algorithm", "as this is available in the yaml file. May be", "((-10, 100),(-40, 140)) if config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as", "###########\\n\\n\") model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50)", "the output geotif file. Model parameters: 1. YAML file (in", "open(config.crossval_scores_file) as sf: self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics = None", "keys, values in self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration", "values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str = ppretty(self.config, indent=' ', width=200,", "full) 2. .SH file (in full) SB Notes: The .sh", "= uncoverml.__version__ model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False,", "outf: outf.write(\"# Metadata Profile for the Prediction Results\") outf.write(\"\\n\\n############ Software", "of the output geotif file. Model parameters: 1. YAML file", "prediction output \"\"\" def __init__(self, model, config): self.model = model", "traceable and reproduceable (provenance) \"\"\" with open(out_filename, 'w') as outf:", "6. Raw covariates file (target value - covariate value) 7.", "History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional description \"\"\"", "rank file 6. Raw covariates file (target value - covariate", "= %s\\n\"%self.version) outf.write(\"Datetime = %s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\")", "contained in each of the output geotif file. Model parameters:", "# 'svr' self.extent = ((-10, 100),(-40, 140)) if config.cross_validate and", "uncover-ml prediction output results: Reference: email 2019-05-24 Overview Creator: (person", "self.computename = hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version = uncoverml.__version__", "Yes Model outputs 1. Prediction grid including path 2. Quantiles", "= self.config.algorithm # 'svr' self.extent = ((-10, 100),(-40, 140)) if", "5. Feature rank file 6. Raw covariates file (target value", "Model outputs 1. Prediction grid including path 2. Quantiles Q5;", "class MetadataSummary(): \"\"\" Summary Description of the ML prediction output", "in self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str", "list file. Targets and path to shapefile is not required", "Description: Gather Metadata for the uncover-ml prediction output results: Reference:", "full) 2. Targets: path to shapefile: csv file SB Notes:", "these are model dependent, and the metadata will be contained", "datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version = uncoverml.__version__ model_str = ppretty(self.model, indent=' ',", "to shapefile: csv file SB Notes: Only covaraite list file.", "path. Model performance JSON file (in full) SB Notes: Yes", "outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,", "in uncoverml which can be dumped in the metadata. CreationDate:", "= %s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\") if self.model_performance_metrics: for", "2. .SH file (in full) SB Notes: The .sh file", "Covariates - list (in full) 2. Targets: path to shapefile:", "100),(-40, 140)) if config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf:", "file. Model inputs: 1. Covariates - list (in full) 2.", "uncoverml.__version__ model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50)", "8. Others ?? SB Notes: Not required as these are", "Software Environment ###########\\n\\n\") outf.write(\"Creator = %s \\n\"%self.creator) outf.write(\"Computer = %s", "show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str = ppretty(self.model, indent='", "(person who generated the model) Model; Name: Type and date:", "str_length=50) self.config = config self.name = self.config.name # 'demo_regression' self.algorithm", "will be contained in each of the output geotif file.", "indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The End", "as one can specify partial path. Model performance JSON file", "prediction result, into a human-readable txt file. in order to", "as this information will be captured in the yaml file.", "self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str =", "(in full) 2. .SH file (in full) SB Notes: The", "specify partial path. Model performance JSON file (in full) SB", "Overview Creator: (person who generated the model) Model; Name: Type", "model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) self.config", "'w') as outf: outf.write(\"# Metadata Profile for the Prediction Results\")", "a python dictionary in uncoverml which can be dumped in", "import uncoverml class MetadataSummary(): \"\"\" Summary Description of the ML", "def __init__(self, model, config): self.model = model self.description = \"Metadata", "above is required as this information will be captured in", "str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str = ppretty(self.model, indent=' ',", "self.config = config self.name = self.config.name # 'demo_regression' self.algorithm =", "###########\\n\\n\") if self.model_performance_metrics: for keys, values in self.model_performance_metrics.items(): outf.write(\"%s =", "%s \\n\"%self.creator) outf.write(\"Computer = %s \\n\"%self.computename) outf.write(\"ML Algorithm = %s", "%s \\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime = %s \\n\"%self.datetime) outf.write(\"\\n\\n############", "= json.load(sf) else: self.model_performance_metrics = None def write_metadata(self, out_filename): \"\"\"", "Optional description \"\"\" # import section import os import sys", "= None def write_metadata(self, out_filename): \"\"\" write the metadata for", "merit as one can specify partial path. Model performance JSON", "= hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version = uncoverml.__version__ model_str", "\\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\") if self.model_performance_metrics: for keys, values", "\"\"\" def __init__(self, model, config): self.model = model self.description =", "output \"\"\" def __init__(self, model, config): self.model = model self.description", "Q95 3. Variance: 4. Entropy: 5. Feature rank file 6.", "yaml file. May be the full path to the shapefile", "file (in full) SB Notes: Yes Model outputs 1. Prediction", "which can be dumped in the metadata. CreationDate: 31/05/19 Developer:", "import pickle import datetime import getpass import socket from ppretty", "to make the ML results traceable and reproduceable (provenance) \"\"\"", "The .sh file is not required. YAML file is read", "write_metadata(self, out_filename): \"\"\" write the metadata for this prediction result,", "Extent: Lat/long - location on Australia map? SB Notes: None", "Environment ###########\\n\\n\") outf.write(\"Creator = %s \\n\"%self.creator) outf.write(\"Computer = %s \\n\"%self.computename)", "socket from ppretty import ppretty import uncoverml class MetadataSummary(): \"\"\"", "'svr' self.extent = ((-10, 100),(-40, 140)) if config.cross_validate and os.path.exists(config.crossval_scores_file):", "be contained in each of the output geotif file. Model", "dd/mm/yyyy Who Optional description \"\"\" # import section import os", "map? SB Notes: None of the above is required as", "(target value - covariate value) 7. Optimisation output 8. Others", "\"\"\" write the metadata for this prediction result, into a", "outf.write(\"# Metadata Profile for the Prediction Results\") outf.write(\"\\n\\n############ Software Environment", "LastUpdate: dd/mm/yyyy Who Optional description \"\"\" # import section import", "config): self.model = model self.description = \"Metadata for the ML", "will be captured in the yaml file. Model inputs: 1.", "Prediction grid including path 2. Quantiles Q5; Q95 3. Variance:", "outputs 1. Prediction grid including path 2. Quantiles Q5; Q95", "self.version = uncoverml.__version__ model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,", "self.algorithm = self.config.algorithm # 'svr' self.extent = ((-10, 100),(-40, 140))", "result, into a human-readable txt file. in order to make", "dependent, and the metadata will be contained in each of", "value - covariate value) 7. Optimisation output 8. Others ??", "who generated the model) Model; Name: Type and date: Algorithm:", "Reference: email 2019-05-24 Overview Creator: (person who generated the model)", "as sf: self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics = None def", "in the yaml file. May be the full path to", "geotif file. Model parameters: 1. YAML file (in full) 2.", "2. Targets: path to shapefile: csv file SB Notes: Only", "for the Prediction Results\") outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\") outf.write(\"Creator =", "2. Quantiles Q5; Q95 3. Variance: 4. Entropy: 5. Feature", "model dependent, and the metadata will be contained in each", "python \"\"\" Description: Gather Metadata for the uncover-ml prediction output", "%s\\n\"%self.version) outf.write(\"Datetime = %s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\") if", "\"\"\" with open(out_filename, 'w') as outf: outf.write(\"# Metadata Profile for", "file is read as a python dictionary in uncoverml which", "sys import json import pickle import datetime import getpass import", "Notes: Not required as these are model dependent, and the", "YAML file (in full) 2. .SH file (in full) SB", "outf.write(\"ML Algorithm = %s \\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime =", "results\" username = getpass.getuser() hostname = socket.gethostname() self.creator = username", "of the ML prediction output \"\"\" def __init__(self, model, config):", "socket.gethostname() self.creator = username self.computename = hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d", "full) SB Notes: Yes Model outputs 1. Prediction grid including", "username self.computename = hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version =", "Prediction Results\") outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\") outf.write(\"Creator = %s \\n\"%self.creator)", "human-readable txt file. in order to make the ML results", "Profile for the Prediction Results\") outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\") outf.write(\"Creator", "model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str)", "os import sys import json import pickle import datetime import", "= datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version = uncoverml.__version__ model_str = ppretty(self.model, indent='", "for the uncover-ml prediction output results: Reference: email 2019-05-24 Overview", "the uncover-ml prediction output results: Reference: email 2019-05-24 Overview Creator:", "generated the model) Model; Name: Type and date: Algorithm: Extent:", "', show_protected=True, show_static=True, show_address=False, str_length=50) self.config = config self.name =", "in the metadata. CreationDate: 31/05/19 Developer: <EMAIL> Revision History: LastUpdate:", "required as this information will be captured in the yaml", "show_protected=True, show_static=True, show_address=False, str_length=50) self.config = config self.name = self.config.name", "with open(config.crossval_scores_file) as sf: self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics =", "140)) if config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf: self.model_performance_metrics", "indent=' ', width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str)", "required. YAML file is read as a python dictionary in", "make the ML results traceable and reproduceable (provenance) \"\"\" with", "to the shapefile has some merit as one can specify", "config.cross_validate and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf: self.model_performance_metrics = json.load(sf)", "the Prediction Results\") outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\") outf.write(\"Creator = %s", "information will be captured in the yaml file. Model inputs:", "MetadataSummary(): \"\"\" Summary Description of the ML prediction output \"\"\"", "(in full) SB Notes: Yes Model outputs 1. Prediction grid", "import os import sys import json import pickle import datetime", "SB Notes: Only covaraite list file. Targets and path to", "the yaml file. May be the full path to the", "file (in full) 2. .SH file (in full) SB Notes:", "= socket.gethostname() self.creator = username self.computename = hostname self.datetime =", "= %s \\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime = %s \\n\"%self.datetime)", "and os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf: self.model_performance_metrics = json.load(sf) else:", "uncoverml class MetadataSummary(): \"\"\" Summary Description of the ML prediction", "\"\"\" Summary Description of the ML prediction output \"\"\" def", "file (target value - covariate value) 7. Optimisation output 8.", "the metadata. CreationDate: 31/05/19 Developer: <EMAIL> Revision History: LastUpdate: 31/05/19", "%s \\n\"%self.computename) outf.write(\"ML Algorithm = %s \\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version)", "not required as this is available in the yaml file.", "# 'demo_regression' self.algorithm = self.config.algorithm # 'svr' self.extent = ((-10,", "Quantiles Q5; Q95 3. Variance: 4. Entropy: 5. Feature rank", "###########\\n\\n\") outf.write(\"Creator = %s \\n\"%self.creator) outf.write(\"Computer = %s \\n\"%self.computename) outf.write(\"ML", "ML results\" username = getpass.getuser() hostname = socket.gethostname() self.creator =", "= username self.computename = hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version", "description \"\"\" # import section import os import sys import", "path to the shapefile has some merit as one can", "show_address=False, str_length=50) self.config = config self.name = self.config.name # 'demo_regression'", "Not required as these are model dependent, and the metadata", "\\n\"%self.creator) outf.write(\"Computer = %s \\n\"%self.computename) outf.write(\"ML Algorithm = %s \\n\"%self.algorithm)", "\"Metadata for the ML results\" username = getpass.getuser() hostname =", "grid including path 2. Quantiles Q5; Q95 3. Variance: 4.", "not required. YAML file is read as a python dictionary", "Developer: <EMAIL> Revision History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who", "Metadata Profile for the Prediction Results\") outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\")", "Model inputs: 1. Covariates - list (in full) 2. Targets:", "Metadata for the uncover-ml prediction output results: Reference: email 2019-05-24", "outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\") outf.write(\"Creator = %s \\n\"%self.creator) outf.write(\"Computer =", "Raw covariates file (target value - covariate value) 7. Optimisation", "one can specify partial path. Model performance JSON file (in", "Variance: 4. Entropy: 5. Feature rank file 6. Raw covariates", "1. YAML file (in full) 2. .SH file (in full)", "the ML results\" username = getpass.getuser() hostname = socket.gethostname() self.creator", "Type and date: Algorithm: Extent: Lat/long - location on Australia", "sf: self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics = None def write_metadata(self,", "= ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) self.config =", "else: self.model_performance_metrics = None def write_metadata(self, out_filename): \"\"\" write the", "reproduceable (provenance) \"\"\" with open(out_filename, 'w') as outf: outf.write(\"# Metadata", "is not required. YAML file is read as a python", "1. Prediction grid including path 2. Quantiles Q5; Q95 3.", "Notes: None of the above is required as this information", "= %s \\n\"%self.computename) outf.write(\"ML Algorithm = %s \\n\"%self.algorithm) outf.write(\"Version =", "(in full) 2. Targets: path to shapefile: csv file SB", "read as a python dictionary in uncoverml which can be", "ppretty import ppretty import uncoverml class MetadataSummary(): \"\"\" Summary Description", "SB Notes: Not required as these are model dependent, and", "import section import os import sys import json import pickle", "Algorithm = %s \\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime = %s", "(provenance) \"\"\" with open(out_filename, 'w') as outf: outf.write(\"# Metadata Profile", "self.model = model self.description = \"Metadata for the ML results\"", ".sh file is not required. YAML file is read as", "2019-05-24 Overview Creator: (person who generated the model) Model; Name:", "None of the above is required as this information will", "SB Notes: The .sh file is not required. YAML file", "txt file. in order to make the ML results traceable", "= %s \\n\"%self.creator) outf.write(\"Computer = %s \\n\"%self.computename) outf.write(\"ML Algorithm =", "?? SB Notes: Not required as these are model dependent,", "<filename>uncoverml/metadata_profiler.py #! /usr/bin/env python \"\"\" Description: Gather Metadata for the", "can specify partial path. Model performance JSON file (in full)", "yaml file. Model inputs: 1. Covariates - list (in full)", "= getpass.getuser() hostname = socket.gethostname() self.creator = username self.computename =", "self.extent = ((-10, 100),(-40, 140)) if config.cross_validate and os.path.exists(config.crossval_scores_file): with", "Performance Matrics ###########\\n\\n\") if self.model_performance_metrics: for keys, values in self.model_performance_metrics.items():", "31/05/19 Developer: <EMAIL> Revision History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy", "username = getpass.getuser() hostname = socket.gethostname() self.creator = username self.computename", "Only covaraite list file. Targets and path to shapefile is", "dumped in the metadata. CreationDate: 31/05/19 Developer: <EMAIL> Revision History:", "covaraite list file. Targets and path to shapefile is not", "outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200,", "None def write_metadata(self, out_filename): \"\"\" write the metadata for this", "this is available in the yaml file. May be the", "for this prediction result, into a human-readable txt file. in", "this information will be captured in the yaml file. Model", "output 8. Others ?? SB Notes: Not required as these", "Entropy: 5. Feature rank file 6. Raw covariates file (target", "shapefile is not required as this is available in the", "Model parameters: 1. YAML file (in full) 2. .SH file", "file. May be the full path to the shapefile has", "file. Model parameters: 1. YAML file (in full) 2. .SH", "FZ LastUpdate: dd/mm/yyyy Who Optional description \"\"\" # import section", "the ML results traceable and reproduceable (provenance) \"\"\" with open(out_filename,", "indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) self.config = config self.name", "str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The End of Metadata ###########\\n\\n\") return out_filename", "open(out_filename, 'w') as outf: outf.write(\"# Metadata Profile for the Prediction", "Algorithm: Extent: Lat/long - location on Australia map? SB Notes:", "of the above is required as this information will be", "Targets: path to shapefile: csv file SB Notes: Only covaraite", "available in the yaml file. May be the full path", "self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics = None def write_metadata(self, out_filename):", "order to make the ML results traceable and reproduceable (provenance)", "Gather Metadata for the uncover-ml prediction output results: Reference: email", "/usr/bin/env python \"\"\" Description: Gather Metadata for the uncover-ml prediction", "Feature rank file 6. Raw covariates file (target value -", "python dictionary in uncoverml which can be dumped in the", "# import section import os import sys import json import", "datetime import getpass import socket from ppretty import ppretty import", "= self.config.name # 'demo_regression' self.algorithm = self.config.algorithm # 'svr' self.extent", "uncoverml which can be dumped in the metadata. CreationDate: 31/05/19", "show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The End of Metadata ###########\\n\\n\") return", "be captured in the yaml file. Model inputs: 1. Covariates", "pickle import datetime import getpass import socket from ppretty import", "seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\")", "list (in full) 2. Targets: path to shapefile: csv file", "date: Algorithm: Extent: Lat/long - location on Australia map? SB", "covariates file (target value - covariate value) 7. Optimisation output", "Notes: Only covaraite list file. Targets and path to shapefile", "json import pickle import datetime import getpass import socket from", "Q5; Q95 3. Variance: 4. Entropy: 5. Feature rank file", "self.name = self.config.name # 'demo_regression' self.algorithm = self.config.algorithm # 'svr'", "results traceable and reproduceable (provenance) \"\"\" with open(out_filename, 'w') as", "= ppretty(self.config, indent=' ', width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False,", "os.path.exists(config.crossval_scores_file): with open(config.crossval_scores_file) as sf: self.model_performance_metrics = json.load(sf) else: self.model_performance_metrics", "can be dumped in the metadata. CreationDate: 31/05/19 Developer: <EMAIL>", "', width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############", "self.model_performance_metrics = None def write_metadata(self, out_filename): \"\"\" write the metadata", "for keys, values in self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys, values)) outf.write(\"\\n\\n############", "file (in full) SB Notes: The .sh file is not", "are model dependent, and the metadata will be contained in", "outf.write(\"Datetime = %s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\") if self.model_performance_metrics:", "\"\"\" # import section import os import sys import json", "- list (in full) 2. Targets: path to shapefile: csv", "covariate value) 7. Optimisation output 8. Others ?? SB Notes:", "\"\"\" Description: Gather Metadata for the uncover-ml prediction output results:", "each of the output geotif file. Model parameters: 1. YAML", "Model ###########\\n\\n\") model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False,", "prediction output results: Reference: email 2019-05-24 Overview Creator: (person who", "and the metadata will be contained in each of the", "ML results traceable and reproduceable (provenance) \"\"\" with open(out_filename, 'w')", "%H:%M:%S\") self.version = uncoverml.__version__ model_str = ppretty(self.model, indent=' ', show_protected=True,", "has some merit as one can specify partial path. Model", "3. Variance: 4. Entropy: 5. Feature rank file 6. Raw", "shapefile has some merit as one can specify partial path.", "model self.description = \"Metadata for the ML results\" username =", "Australia map? SB Notes: None of the above is required", "self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version = uncoverml.__version__ model_str = ppretty(self.model,", "def write_metadata(self, out_filename): \"\"\" write the metadata for this prediction", "the metadata will be contained in each of the output", "file SB Notes: Only covaraite list file. Targets and path", "SB Notes: None of the above is required as this", "%s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics ###########\\n\\n\") if self.model_performance_metrics: for keys,", "31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional description \"\"\" # import", "7. Optimisation output 8. Others ?? SB Notes: Not required", "metadata for this prediction result, into a human-readable txt file.", "<EMAIL> Revision History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional", "self.creator = username self.computename = hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "self.config.algorithm # 'svr' self.extent = ((-10, 100),(-40, 140)) if config.cross_validate", "\\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime = %s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance", "= ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############", "inputs: 1. Covariates - list (in full) 2. Targets: path", "Others ?? SB Notes: Not required as these are model", "output results: Reference: email 2019-05-24 Overview Creator: (person who generated", "Notes: Yes Model outputs 1. Prediction grid including path 2.", "4. Entropy: 5. Feature rank file 6. Raw covariates file", "Who Optional description \"\"\" # import section import os import", "performance JSON file (in full) SB Notes: Yes Model outputs", "hostname = socket.gethostname() self.creator = username self.computename = hostname self.datetime", "section import os import sys import json import pickle import", "import ppretty import uncoverml class MetadataSummary(): \"\"\" Summary Description of", "full path to the shapefile has some merit as one", "the metadata for this prediction result, into a human-readable txt", "and date: Algorithm: Extent: Lat/long - location on Australia map?", "model, config): self.model = model self.description = \"Metadata for the", "hostname self.datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") self.version = uncoverml.__version__ model_str =", "full) SB Notes: The .sh file is not required. YAML", "import socket from ppretty import ppretty import uncoverml class MetadataSummary():", "outf.write(\"%s = %s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str = ppretty(self.config,", "YAML file is read as a python dictionary in uncoverml", "Configuration ###########\\n\\n\") conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200, show_protected=True,", "the shapefile has some merit as one can specify partial", "Name: Type and date: Algorithm: Extent: Lat/long - location on", "Optimisation output 8. Others ?? SB Notes: Not required as", "a human-readable txt file. in order to make the ML", "model) Model; Name: Type and date: Algorithm: Extent: Lat/long -", "metadata will be contained in each of the output geotif", "\\n\"%self.computename) outf.write(\"ML Algorithm = %s \\n\"%self.algorithm) outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime", "as outf: outf.write(\"# Metadata Profile for the Prediction Results\") outf.write(\"\\n\\n############", "import sys import json import pickle import datetime import getpass", "the yaml file. Model inputs: 1. Covariates - list (in", "- location on Australia map? SB Notes: None of the", "is available in the yaml file. May be the full", "Model performance JSON file (in full) SB Notes: Yes Model", "ppretty(self.model, indent=' ', show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The", "path to shapefile: csv file SB Notes: Only covaraite list", "May be the full path to the shapefile has some", "Matrics ###########\\n\\n\") if self.model_performance_metrics: for keys, values in self.model_performance_metrics.items(): outf.write(\"%s", "self.config.name # 'demo_regression' self.algorithm = self.config.algorithm # 'svr' self.extent =", "= %s\\n\"%(keys, values)) outf.write(\"\\n\\n############ Configuration ###########\\n\\n\") conf_str = ppretty(self.config, indent='", "required as these are model dependent, and the metadata will", "the full path to the shapefile has some merit as", "results: Reference: email 2019-05-24 Overview Creator: (person who generated the", "this prediction result, into a human-readable txt file. in order", "as these are model dependent, and the metadata will be", "partial path. Model performance JSON file (in full) SB Notes:", "parameters: 1. YAML file (in full) 2. .SH file (in", "ML prediction output \"\"\" def __init__(self, model, config): self.model =", "show_protected=True, show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The End of Metadata", "output geotif file. Model parameters: 1. YAML file (in full)", "Results\") outf.write(\"\\n\\n############ Software Environment ###########\\n\\n\") outf.write(\"Creator = %s \\n\"%self.creator) outf.write(\"Computer", "Targets and path to shapefile is not required as this", "LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional description \"\"\" #", "import json import pickle import datetime import getpass import socket", "width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model", "json.load(sf) else: self.model_performance_metrics = None def write_metadata(self, out_filename): \"\"\" write", "out_filename): \"\"\" write the metadata for this prediction result, into", "SB Notes: Yes Model outputs 1. Prediction grid including path", "Creator: (person who generated the model) Model; Name: Type and", "is not required as this is available in the yaml", "from ppretty import ppretty import uncoverml class MetadataSummary(): \"\"\" Summary", "Revision History: LastUpdate: 31/05/19 FZ LastUpdate: dd/mm/yyyy Who Optional description", "(in full) SB Notes: The .sh file is not required.", "file. in order to make the ML results traceable and", "conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True,", "self.model_performance_metrics: for keys, values in self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys, values))", "= model self.description = \"Metadata for the ML results\" username", "if self.model_performance_metrics: for keys, values in self.model_performance_metrics.items(): outf.write(\"%s = %s\\n\"%(keys,", "Description of the ML prediction output \"\"\" def __init__(self, model,", "path 2. Quantiles Q5; Q95 3. Variance: 4. Entropy: 5.", "show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str =", "config self.name = self.config.name # 'demo_regression' self.algorithm = self.config.algorithm #", "show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str", "the above is required as this information will be captured", "self.description = \"Metadata for the ML results\" username = getpass.getuser()", "path to shapefile is not required as this is available", "outf.write(\"Computer = %s \\n\"%self.computename) outf.write(\"ML Algorithm = %s \\n\"%self.algorithm) outf.write(\"Version", "Notes: The .sh file is not required. YAML file is", "on Australia map? SB Notes: None of the above is", "show_static=True, show_address=False, str_length=50) outf.write(model_str) outf.write(\"\\n\\n############ The End of Metadata ###########\\n\\n\")", "outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str = ppretty(self.model, indent=' ', show_protected=True,", "into a human-readable txt file. in order to make the", "in order to make the ML results traceable and reproduceable", "including path 2. Quantiles Q5; Q95 3. Variance: 4. Entropy:", "file is not required. YAML file is read as a", "as a python dictionary in uncoverml which can be dumped", "write the metadata for this prediction result, into a human-readable", "captured in the yaml file. Model inputs: 1. Covariates -", "outf.write(\"Version = %s\\n\"%self.version) outf.write(\"Datetime = %s \\n\"%self.datetime) outf.write(\"\\n\\n############ Performance Matrics", "to shapefile is not required as this is available in", "for the ML results\" username = getpass.getuser() hostname = socket.gethostname()", "with open(out_filename, 'w') as outf: outf.write(\"# Metadata Profile for the", "JSON file (in full) SB Notes: Yes Model outputs 1.", "be the full path to the shapefile has some merit", "show_static=True, show_address=False, str_length=50) self.config = config self.name = self.config.name #", "csv file SB Notes: Only covaraite list file. Targets and", "CreationDate: 31/05/19 Developer: <EMAIL> Revision History: LastUpdate: 31/05/19 FZ LastUpdate:", "and reproduceable (provenance) \"\"\" with open(out_filename, 'w') as outf: outf.write(\"#", "__init__(self, model, config): self.model = model self.description = \"Metadata for", "the ML prediction output \"\"\" def __init__(self, model, config): self.model", "Summary Description of the ML prediction output \"\"\" def __init__(self,", "ppretty(self.config, indent=' ', width=200, seq_length=200, show_protected=True, show_static=True, show_properties=True, show_address=False, str_length=200)", "getpass import socket from ppretty import ppretty import uncoverml class", "outf.write(\"Creator = %s \\n\"%self.creator) outf.write(\"Computer = %s \\n\"%self.computename) outf.write(\"ML Algorithm", "required as this is available in the yaml file. May", "show_properties=True, show_address=False, str_length=200) outf.write(conf_str) outf.write(\"\\n\\n############ Model ###########\\n\\n\") model_str = ppretty(self.model,", "'demo_regression' self.algorithm = self.config.algorithm # 'svr' self.extent = ((-10, 100),(-40,", "getpass.getuser() hostname = socket.gethostname() self.creator = username self.computename = hostname", "Lat/long - location on Australia map? SB Notes: None of", "Model; Name: Type and date: Algorithm: Extent: Lat/long - location", "location on Australia map? SB Notes: None of the above" ]
[]
[ "= 100000 self.int_unique = pd.Int64Index(np.arange(N * 5)) # cache is_unique", "day', freq='s', periods=N), }) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] = np.nan", "time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time", "def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def", "def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self):", "time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr,", "'D': np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1", "self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def time_frame(self): self.df.hash() def", "freq='s', periods=N), }) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def", "self.int_unique = pd.Int64Index(np.arange(N * 5)) # cache is_unique self.int_unique.is_unique self.int", "0.2 def setup(self): N = 100000 self.int_unique = pd.Int64Index(np.arange(N *", "np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101', freq='s',", "def time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash() def time_series_string(self): self.df.B.hash() def", "5)) # cache is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float =", "= np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg = np.arange(-1000000, 0) self.arrmixed", "self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take(", "10000, size=N))), 'D': np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N),", "np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1 day',", "periods=N), }) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def time_frame(self):", "pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D':", "is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience", "0.2 def setup(self): N = 100000 self.df = pd.DataFrame( {'A':", "-1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated()", "self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed)", "= np.nan def time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash() def time_series_string(self):", "np.arange(-1000000, 0) self.arrmixed = np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def", "0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def", "np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N),", "as np import pandas as pd from pandas.util import testing", "numpy as np import pandas as pd from pandas.util import", "self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated()", "N = 100000 self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100,", "# cache is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5))", "self.arrmixed) class hashing(object): goal_time = 0.2 def setup(self): N =", "as pd from pandas.util import testing as tm class algorithm(object):", "self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr", "self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def", "0) self.arrmixed = np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def time_float_factorize(self):", "pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr", "def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self):", "time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash() def time_series_string(self): self.df.B.hash() def time_series_categorical(self):", "def setup(self): N = 100000 self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take(", "periods=N), 'G': pd.timedelta_range('1 day', freq='s', periods=N), }) self.df['C'] = self.df['B'].astype('category')", "self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add", "np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg = np.arange(-1000000, 0) self.arrmixed =", "self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg)", "<reponame>raspbian-packages/pandas<filename>asv_bench/benchmarks/algorithms.py import numpy as np import pandas as pd from", "'G': pd.timedelta_range('1 day', freq='s', periods=N), }) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20]", "import numpy as np import pandas as pd from pandas.util", "size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N), 'E': np.arange(N),", "self.df.iloc[10:20] = np.nan def time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash() def", "self.int.factorize() def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated()", "self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0)", "100000 self.int_unique = pd.Int64Index(np.arange(N * 5)) # cache is_unique self.int_unique.is_unique", "np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self):", "self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr,", "time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr,", "time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self):", "= pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add =", "'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N), 'E': np.arange(N), 'F':", "pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg = np.arange(-1000000,", "from pandas.util import testing as tm class algorithm(object): goal_time =", "{'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))),", "* 5)) # cache is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float", "time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time = 0.2 def setup(self):", "freq='s', periods=N), 'G': pd.timedelta_range('1 day', freq='s', periods=N), }) self.df['C'] =", "self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time = 0.2 def setup(self): N", "pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000,", "np.arange(1000000) self.arrneg = np.arange(-1000000, 0) self.arrmixed = np.array([1, -1]).repeat(500000) def", "self.arrmixed = np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize()", "def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def", "def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self):", "time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self):", "testing as tm class algorithm(object): goal_time = 0.2 def setup(self):", "pandas.util import testing as tm class algorithm(object): goal_time = 0.2", "naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos = np.arange(1000000)", "= np.arange(1000000) self.arrneg = np.arange(-1000000, 0) self.arrmixed = np.array([1, -1]).repeat(500000)", "= self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def time_frame(self): self.df.hash() def time_series_int(self):", "def setup(self): N = 100000 self.int_unique = pd.Int64Index(np.arange(N * 5))", "self.arrpos = np.arange(1000000) self.arrneg = np.arange(-1000000, 0) self.arrmixed = np.array([1,", "Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos =", "pandas as pd from pandas.util import testing as tm class", "'F': pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1 day', freq='s', periods=N), })", "np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1 day', freq='s', periods=N),", "time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def time_int_duplicated(self):", "pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1 day', freq='s', periods=N), }) self.df['C']", "algorithm(object): goal_time = 0.2 def setup(self): N = 100000 self.int_unique", "-1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def", "pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101',", "pd from pandas.util import testing as tm class algorithm(object): goal_time", "np import pandas as pd from pandas.util import testing as", "time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr,", "as tm class algorithm(object): goal_time = 0.2 def setup(self): N", "self.arrneg = np.arange(-1000000, 0) self.arrmixed = np.array([1, -1]).repeat(500000) def time_int_factorize(self):", "= np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize() def", "class hashing(object): goal_time = 0.2 def setup(self): N = 100000", "}) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def time_frame(self): self.df.hash()", "self.checked_add = pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg", "import testing as tm class algorithm(object): goal_time = 0.2 def", "def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object):", "hashing(object): goal_time = 0.2 def setup(self): N = 100000 self.df", "cache is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) #", "self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time =", "= 100000 self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))),", "self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos)", "= pd.Int64Index(np.arange(N * 5)) # cache is_unique self.int_unique.is_unique self.int =", "pd.Int64Index(np.arange(N * 5)) # cache is_unique self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5))", "# Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos", "pd.timedelta_range('1 day', freq='s', periods=N), }) self.df['C'] = self.df['B'].astype('category') self.df.iloc[10:20] =", "self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self):", "def time_int_factorize(self): self.int.factorize() def time_float_factorize(self): self.int.factorize() def time_int_unique_duplicated(self): self.int_unique.duplicated() def", "N = 100000 self.int_unique = pd.Int64Index(np.arange(N * 5)) # cache", "def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self):", "time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def", "setup(self): N = 100000 self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0,", "= 0.2 def setup(self): N = 100000 self.int_unique = pd.Int64Index(np.arange(N", "self.df['B'].astype('category') self.df.iloc[10:20] = np.nan def time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash()", "np.nan def time_frame(self): self.df.hash() def time_series_int(self): self.df.E.hash() def time_series_string(self): self.df.B.hash()", "self.arr = np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg = np.arange(-1000000, 0)", "'E': np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N), 'G': pd.timedelta_range('1 day', freq='s',", "def time_int_duplicated(self): self.int.duplicated() def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1)", "goal_time = 0.2 def setup(self): N = 100000 self.int_unique =", "= 0.2 def setup(self): N = 100000 self.df = pd.DataFrame(", "time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1)", "goal_time = 0.2 def setup(self): N = 100000 self.df =", "size=N))), 'D': np.random.randn(N), 'E': np.arange(N), 'F': pd.date_range('20110101', freq='s', periods=N), 'G':", "class algorithm(object): goal_time = 0.2 def setup(self): N = 100000", "pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000)", "self.df.hash() def time_series_int(self): self.df.E.hash() def time_series_string(self): self.df.B.hash() def time_series_categorical(self): self.df.C.hash()", "= pd.core.nanops._checked_add_with_arr self.arr = np.arange(1000000) self.arrpos = np.arange(1000000) self.arrneg =", "setup(self): N = 100000 self.int_unique = pd.Int64Index(np.arange(N * 5)) #", "100000 self.df = pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B':", "100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0, 10000, size=N))), 'D': np.random.randn(N), 'E':", "self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time = 0.2", "self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr, self.arrneg) def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class", "tm class algorithm(object): goal_time = 0.2 def setup(self): N =", "self.int_unique.is_unique self.int = pd.Int64Index(np.arange(N).repeat(5)) self.float = pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming.", "= pd.Float64Index(np.random.randn(N).repeat(5)) # Convenience naming. self.checked_add = pd.core.nanops._checked_add_with_arr self.arr =", "import pandas as pd from pandas.util import testing as tm", "= np.arange(-1000000, 0) self.arrmixed = np.array([1, -1]).repeat(500000) def time_int_factorize(self): self.int.factorize()", "def time_add_overflow_mixed_arr(self): self.checked_add(self.arr, self.arrmixed) class hashing(object): goal_time = 0.2 def", "1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr, -1) def time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def", "= pd.DataFrame( {'A': pd.Series(tm.makeStringIndex(100).take( np.random.randint(0, 100, size=N))), 'B': pd.Series(tm.makeStringIndex(10000).take( np.random.randint(0,", "time_add_overflow_zero_scalar(self): self.checked_add(self.arr, 0) def time_add_overflow_pos_arr(self): self.checked_add(self.arr, self.arrpos) def time_add_overflow_neg_arr(self): self.checked_add(self.arr,", "def time_float_duplicated(self): self.float.duplicated() def time_add_overflow_pos_scalar(self): self.checked_add(self.arr, 1) def time_add_overflow_neg_scalar(self): self.checked_add(self.arr," ]
[ "containing Stokes spectra & errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit a", "included in # # all copies or substantial portions of", "# # MODIFIED: 23-October-2019 by <NAME> # # # #=============================================================================#", "dict() aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr", "uArr, dqArr, duArr, fitDict = \\ create_frac_spectra(freqArr = freqArr_GHz, IArr", "phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2 or", "nBits = nBits, verbose = verbose, log = log) fwhmRMSF", "np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save the measurements to a \"key=value\"", "in a space separated format: [freq_Hz, I, Q, U, I_err,", "frequency and polarization data as either: [freq_Hz, I, Q, U,", "PI = %.4g (+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise", "%.4g (+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD = %.4g", "in Stokes I intensity in each channel. dQ (array_like): Error", "DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION", "import matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM", "I, Q, U, dI, dQ, dU try: if verbose: log(\">", "RM synthesis results. aDict (dict): Data output by RM synthesis.", "endTime = time.time() cputime = (endTime - startTime) if verbose:", "toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x) for x in fitDict[\"p\"]]) mDict[\"IfitStat\"] =", "FDF, RMSF and weight array to ASCII files if verbose:", "columns found in the file. If Stokes I is present,", "each channel. Kwargs: polyOrd (int): Order of polynomial to fit", "Q & U data from the ASCII file. Inputs: datafile", "= IArr, QArr = QArr, UArr = UArr, dIArr =", "to the following conditions: # # # # The above", "= phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 =", "of the Software. # # # # THE SOFTWARE IS", "from RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM", "file containing Stokes spectra & errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit", "signal-to-noise.\") #Add information on nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]", "parameters for RM spectrum _weight.dat: Calculated channel weights [freq_Hz, weight]", "(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat)", "noStokesI = args.noStokesI, nBits = nBits, showPlots = args.showPlots, debug", "(uniform) if weightType==\"variance\": weightArr = 1.0 / np.power(dQUArr, 2.0) else:", "the resulting model used to create fractional q = Q/I", "noise in the FDF !!Old formula only works for wariance", "License (MIT) # # # # Copyright (c) 2015 -", "/ dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum", "= \\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr =", "Spread Function and dirty FDF if showPlots or saveOutput: fdfFig", "# DEALINGS IN THE SOFTWARE. # # # #=============================================================================# import", "phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2,", "by Ifreq0 to recover the PI freq0_Hz = C /", "= \"complex\" + str(2*nBits) # freq_Hz, I, Q, U, dI,", "exist: '%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir,", "= specFig, units = units) # Use the custom navigation", "= np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2", "+ \"_RMSF.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"],", "I_err, Q_err, U_err] OR [freq_Hz, Q, U, Q_err, U_err] To", "Q, U, dI, dQ, dU try: if verbose: log(\"> Trying", "# # NAME: do_RMsynth_1D.py # # # # PURPOSE: API", "dummy spectrum = unity if noStokesI: if verbose: log(\"Warn: no", "freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr, fig = specFig, units", "weight array to ASCII files if verbose: print(\"Saving the dirty", "import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes", "WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE", "= mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging plots for spectral complexity measure", "%.4g (+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0 =", "of floating point numbers. showPlots (bool): Show plots? debug (bool):", "log('Peak PI = %.4g (+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU", "(uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show the plots [False].\") parser.add_argument(\"-v\",", "= freqHirArr_Hz, IModArr = IModHirArr, fig = specFig, units =", "#-----------------------------------------------------------------------------# def main(): import argparse \"\"\" Start the function to", "Plot the RM Spread Function and dirty FDF if showPlots", "= phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF", "the following flags must be set: -S, -p, -v. \"\"\"", "action=\"store_true\", help=\"save the arrays and plots [False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\",", "the command line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\",", "do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from", "Noise (rms) = %.4g %s' % (mDict[\"dFDFrms\"],units)) log('FDF SNR =", "Spectrum [Phi, Q, U] _RMSF.dat: Computed RMSF [Phi, Q, U]", "args.nSamples, weightType = args.weightType, fitRMSF = args.fitRMSF, noStokesI = args.noStokesI,", "weights [freq_Hz, weight] \"\"\" # Parse the command line options", "# # all copies or substantial portions of the Software.", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "dIArr = dIArr, dQArr = dQArr, dUArr = dUArr, polyOrd", "in fracpol units initially, convert back to flux # Calculate", "fit with a polynomial and the resulting model used to", "color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms", "mDict (dict): Summary of RM synthesis results. aDict (dict): Data", "np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 = 2.0", "[freq_Hz, weight] \"\"\" # Parse the command line options parser", "= data if verbose: log(\"... success.\") noStokesI = True except", "#-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime) if", "/ 1e9 dQUArr = (dQArr + dUArr)/2.0 # Fit the", "% (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) # Plot the RM", "Stokes I present, create a dummy spectrum = unity if", "= do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2,", "portions of the Software. # # # # THE SOFTWARE", "outputs, one or more of the following flags must be", "\" + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show() #add", "0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of", "log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF", "I spectrum is first fit with a polynomial and the", "# Multiply the dirty FDF by Ifreq0 to recover the", "# # # #=============================================================================# # # # The MIT License", "dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"... success.\")", "if verbose: print(\"> \" + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight')", "Q and U spectra (1D) stored in an ASCII file.", "mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the q", "-v flag output) _RMsynth.json: dictionary of derived parameters for RM", "2.0 + 1.0) startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 /", "Args: data (list): Contains frequency and polarization data as either:", "- 2018 <NAME> # # # # Permission is hereby", "plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr =", "weight] \"\"\" # Parse the command line options parser =", "RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import", "specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr,", "debug: if verbose: print(\"Saving RMSF and dirty FDF plot:\") outFilePlot", "Stokes I, Q and U spectra (1D) stored in an", "create fractional q = Q/I and u = U/I spectra.", "dQArr, dUArr] noStokesI = True except Exception: if verbose: print(\"...failed.\")", "I spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore the Stokes I", "the custom navigation toolbar (does not work on Mac OS", "Trying [freq_Hz, q, u, dq, du]\", end=' ') (freqArr_Hz, QArr,", "file. If Stokes I is present, this will be [freq_Hz,", "ext = os.path.splitext(args.dataFile[0]) # Default data types dtFloat = \"float\"", "(mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise = %.4g %s' % (mDict[\"dQU\"],units)) log('FDF", "= run_rmsynth(data = data, polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2,", "args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples, weightType = args.weightType,", "qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2,", "from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "not os.path.exists(args.dataFile[0]): print(\"File does not exist: '%s'.\" % args.dataFile[0]) sys.exit()", "label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange", "= lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal =", "dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim(", "if fitDict[\"fitStatus\"] >= 128: log(\"WARNING: Stokes I model contains negative", "# try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception:", "Contains frequency and polarization data as either: [freq_Hz, I, Q,", "nBits, verbose = verbose, log = log) # Calculate the", "os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0]) # Set the floating point", "(mDict[\"dFDFrms\"],units)) log('FDF SNR = %.4g ' % (mDict[\"snrPIfit\"])) log('sigma_add(q) =", "log(\"PhiArr = %.2f to %.2f by %.2f (%d chans).\" %", "QArr, UArr, dQArr, dUArr] noStokesI = True except Exception: if", "\"\"\" # Help string to be shown using the -h", "I, Q, U, I_err, Q_err, U_err] OR [freq_Hz, Q, U,", "(mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg' %", "Error in Stokes I intensity in each channel. dQ (array_like):", "Stokes I is present, this will be [freq_Hz, I, Q,", "fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is", "to store the data as. verbose (bool): Print verbose messages", "\\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, IArr,", "= phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine =", "<NAME> # # # # Permission is hereby granted, free", "elif saveOutput or debug: if verbose: print(\"Saving RMSF and dirty", "log('I freq0 = %.4g %s' % (mDict[\"Ifreq0\"],units)) log('Peak PI =", "file. Inputs: datafile (str): relative or absolute path to file.", "outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save the measurements to a", "= args.weightType, fitRMSF = args.fitRMSF, noStokesI = args.noStokesI, nBits =", "\\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, QArr,", "= %.4g %s' % (mDict[\"dFDFrms\"],units)) log('FDF SNR = %.4g '", "do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from", "IArr = IArr, QArr = QArr, UArr = UArr, dIArr", "mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise = %.4g %s' % (mDict[\"dQU\"],units)) log('FDF Noise", "# # to deal in the Software without restriction, including", "debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr,", "prefixOut + \"_RMsynth.json\" if verbose: print(\"> %s\" % outFile) json.dump(dict(outdict),", "the Software. # # # # THE SOFTWARE IS PROVIDED", "Parse the command line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\",", "the screen log() log('-'*80) log('RESULTS:\\n') log('FWHM RMSF = %.4g rad/m^2'", "# pass # Display the figure # fdfFig.show() # Pause", "(nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0) *", "nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam',", "plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr, qArr =", "phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 =", "nBits, verbose = verbose, log = log) fwhmRMSF = float(fwhmRMSFArr)", "2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3,", "cputime = (endTime - startTime) if verbose: log(\"> RM-synthesis completed", "(float): Number of samples across the RMSF. weightType (str): Can", "verbose = verbose, log = log) fwhmRMSF = float(fwhmRMSFArr) #", "Function and dirty FDF if showPlots or saveOutput: fdfFig =", "<RETURN> to exit ...\", # input() return mDict, aDict def", "= np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for", "intensity in each channel. or [freq_Hz, q, u, dq, du]", "prefixOut + \"_RMsynth.dat\" if verbose: print(\"Saving the measurements on the", "# #=============================================================================# # # # The MIT License (MIT) #", "U, dI, dQ, dU try: if verbose: log(\"> Trying [freq_Hz,", "sys.exit() if verbose: log(\"Successfully read in the Stokes spectra.\") #", "low signal-to-noise.\") #Add information on nature of channels: good_channels=np.where(np.logical_and(weightArr !=", "# Pause if plotting enabled if showPlots: plt.show() elif saveOutput", "freq_Hz (array_like): Frequency of each channel in Hz. q (array_like):", "str(2*nBits) # Output prefix is derived from the input file", "(str): Can be \"variance\" or \"uniform\" \"variance\" -- Weight by", "from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk", "Q, U, dI, dQ, dU], else [freq_Hz, q, u, dq,", "verbose: print(\"Saving the dirty FDF, RMSF weight arrays to ASCII", "for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"],", "[Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None, help=\"width of Faraday depth channel", "/ lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 /", "I spectrum and create the fractional spectra IModArr, qArr, uArr,", "[freq_Hz, I, Q, U, I_err, Q_err, U_err] OR [freq_Hz, Q,", "[freq_Hz, q, u, dq, du]\", end=' ') (freqArr_Hz, QArr, UArr,", "deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0 = %.4g (+/-%.4g)", "data from the ASCII file. Inputs: datafile (str): relative or", "uniformly (i.e. with 1s) fitRMSF (bool): Fit a Gaussian to", "= RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose: #", "= debug) # Plot the data and the Stokes I", "Display the figure # fdfFig.show() # Pause if plotting enabled", "a # # copy of this software and associated documentation", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "U data from the ASCII file. Inputs: datafile (str): relative", "m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the", "ASCII file. Inputs: datafile (str): relative or absolute path to", "the Software, and to permit persons to whom the #", "or substantial portions of the Software. # # # #", "Trying [freq_Hz, I, Q, U, dI, dQ, dU]\", end=' ')", "= prefixOut + \"_FDFdirty.dat\" if verbose: print(\"> %s\" % outFile)", "verbose: log(\"PhiArr = %.2f to %.2f by %.2f (%d chans).\"", "spectrum _weight.dat: Calculated channel weights [freq_Hz, weight] \"\"\" # Parse", "%.4g ' % (mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' %", "absolute path to file. nBits (int): number of bits to", "QArr = QArr, UArr = UArr, dIArr = dIArr, dQArr", "is hereby granted, free of charge, to any person obtaining", "from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM", "# specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass", "\"complex\" + str(2*nBits) # Output prefix is derived from the", "exist: '%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) #", "phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth, lamSqArr_m2", "str(2*nBits) # freq_Hz, I, Q, U, dI, dQ, dU try:", "+ \"_RMsynth.dat\" if verbose: print(\"Saving the measurements on the FDF", "np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut + \"_RMSF.dat\" if", "verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run RM synthesis on 1D data.", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY,", "Show plots? debug (bool): Turn on debugging messages & plots?", "debug=args.debug) # Run RM-synthesis on the spectra mDict, aDict =", "= dIArr, dqArr = dqArr, duArr = duArr, freqHirArr_Hz =", "(mDict[\"fwhmRMSF\"])) log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"]))", "aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr", "freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig =", "lam0Sq_m2, fwhmRMSF = \\ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)", "# freq_Hz, I, Q, U, dI, dQ, dU try: if", "# # copy of this software and associated documentation files", "# Debugging plots for spectral complexity measure if debug: tmpFig", "the dirty FDF, RMSF weight arrays to ASCII files.\") outFile", "lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting [inverse variance] or 'uniform'", "startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 =", "args.noStokesI, nBits = nBits, showPlots = args.showPlots, debug = args.debug,", "u, dq, du try: if verbose: log(\"> Trying [freq_Hz, q,", "of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) #", "containing the columns found in the file. If Stokes I", "custom navigation toolbar (does not work on Mac OS X)", "Gaussian to the RMSF? noStokesI (bool: Is Stokes I data", "+ dUArr)/2.0 # Fit the Stokes I spectrum and create", "Fit the Stokes I spectrum and create the fractional spectra", "of polynomial to fit to Stokes I spectrum. phiMax_radm2 (float):", "on Stokes I, Q and U spectra (1D) stored in", "[10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting [inverse variance] or 'uniform' (all", "if verbose: print(\"... success.\") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI", "(array_like): Fractional Stokes U intensity (U/I) in each channel. dq", "fdfFig.show() # Pause if plotting enabled if showPlots: plt.show() elif", "if verbose: log(\"Weight type is '%s'.\" % weightType) startTime =", "import time import traceback import json import math as m", "= %.4g ' % (mDict[\"freq0_Hz\"]/1e9)) log('I freq0 = %.4g %s'", "RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc import", "dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute max Faraday depth sampled [Auto].\") parser.add_argument(\"-d\",", "Stokes U intensity (U/I) in each channel. dq (array_like): Error", "person obtaining a # # copy of this software and", "parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10, help=\"number of samples across the RMSF", "[False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save the arrays and plots [False].\")", "= \",\".join([str(x) for x in fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"]", "(np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)')", "#add array dictionary aDict = dict() aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"]", "the RMSF lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting [inverse variance]", "stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr = %.2f", "uArr = uArr, dqArr = dqArr, duArr = duArr, fracPol", "# Speed of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None,", "# Use the custom navigation toolbar (does not work on", "this will be [freq_Hz, I, Q, U, dI, dQ, dU],", "noStokesI = True except Exception: if verbose: print(\"...failed.\") if debug:", "granted, free of charge, to any person obtaining a #", "space separated format: [freq_Hz, I, Q, U, I_err, Q_err, U_err]", "if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2", "# # The MIT License (MIT) # # # #", "= args, ) if args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------#", "dataDir, dummy = os.path.split(args.dataFile[0]) # Set the floating point precision", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #", "average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax", "% outFile) FH = open(outFile, \"w\") for k, v in", "intensity in each channel. Q (array_like): Stokes Q intensity in", "fractional q = Q/I and u = U/I spectra. The", "np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, IArr, QArr,", "# # # # Permission is hereby granted, free of", "TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR", "open(outFile, \"w\")) #-----------------------------------------------------------------------------# def main(): import argparse \"\"\" Start the", "each channel in Hz. I (array_like): Stokes I intensity in", "errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit a Gaussian to the RMSF", "dQ (array_like): Error in Stokes Q intensity in each channel.", "if showPlots: plt.show() elif saveOutput or debug: if verbose: print(\"Saving", "(c) 2015 - 2018 <NAME> # # # # Permission", "# Permission is hereby granted, free of charge, to any", "\" + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if verbose:", "FH.write(\"%s=%s\\n\" % (k, v)) FH.close() outFile = prefixOut + \"_RMsynth.json\"", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "log('FDF Noise (theory) = %.4g %s' % (mDict[\"dFDFth\"],units)) log('FDF Noise", "RMSF [Phi, Q, U] _RMsynth.dat: list of derived parameters for", "units = units) # Use the custom navigation toolbar #", "main(): import argparse \"\"\" Start the function to perform RM-synthesis", "floating point precision [False (uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show", "phiMax to 10 FWHM # Faraday depth sampling. Zero always", "to I spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore the Stokes", "dQ, dU], else [freq_Hz, q, u, dq, du]. \"\"\" #", "function to perform RM-synthesis if called from the command line.", "the Software without restriction, including without limitation # # the", "log(\"... success.\") except Exception: if verbose: log(\"...failed.\") # freq_Hz, q,", "# Determine the Stokes I value at lam0Sq_m2 from the", "dest=\"noStokesI\", action=\"store_true\", help=\"ignore the Stokes I spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\",", "Determine the Stokes I value at lam0Sq_m2 from the Stokes", "(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = \\ np.loadtxt(dataFile,", "1D data. Args: data (list): Contains frequency and polarization data", "ax.set_title(\"RMS noise in Stokes Q, U and <Q,U> spectra\") #", "is '%s'.\" % weightType) startTime = time.time() # Perform RM-synthesis", "depth channel [Auto].\\n(overrides -s NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10,", "UArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose:", "-- Weight by uncertainty in Q and U. \"uniform\" --", "phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime -", "weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double", "formula only works for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0,", "spectrum (approximately equivalent to -v flag output) _RMsynth.json: dictionary of", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM,", "= dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr =", "else [freq_Hz, q, u, dq, du]. \"\"\" # Default data", "8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr, qArr = qArr,", "Stokes spectra & errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit a Gaussian", "messages to terminal? debug (bool): Print full traceback in case", "floating point precision nBits = 32 if args.bit64: nBits =", "each channel. dU (array_like): Error in Stokes U intensity in", "os import time import traceback import json import math as", "numbers. showPlots (bool): Show plots? debug (bool): Turn on debugging", "freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz", "to %.2f by %.2f (%d chans).\" % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2),", "% dataFile) # freq_Hz, I, Q, U, dI, dQ, dU", "if verbose: log(\"...failed.\") # freq_Hz, q, u, dq, du try:", "units = args.units, prefixOut = prefixOut, args = args, )", "import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig", "# specFig.show() # DEBUG (plot the Q, U and average", "EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO", "verbose: print(\"Saving RMSF and dirty FDF plot:\") outFilePlot = prefixOut", "and u spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz", "RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2", "prefixOut + \"_FDFdirty.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile,", "and associated documentation files (the \"Software\"), # # to deal", "\"complex\" + str(2*nBits) # freq_Hz, I, Q, U, dI, dQ,", "# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "args.dPhi_radm2, nSamples = args.nSamples, weightType = args.weightType, fitRMSF = args.fitRMSF,", "floating point numbers. showPlots (bool): Show plots? debug (bool): Turn", "# # PURPOSE: API for runnning RM-synthesis on an ASCII", "channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0", "prefixOut + \"_RMSF.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile,", "help=\"use 64-bit floating point precision [False (uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\",", "if verbose: print(\"... success.\") data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr,", "debug = args.debug, verbose = verbose, units = args.units, prefixOut", "wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt(", "on debugging messages & plots [False].\") parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\",", "= verbose, log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE", "= data if verbose: log(\"... success.\") except Exception: if verbose:", "log = log) # Calculate the Rotation Measure Spread Function", "plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from", "lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime", "the figure # if not plt.isinteractive(): # specFig.show() # DEBUG", "if verbose: print(\"Successfully read in the Stokes spectra.\") return data", "xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise in Stokes Q,", "+ \".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\"> \" + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches", "RMSF and dirty FDF plot:\") outFilePlot = prefixOut + \".RMSF-dirtyFDF-plots.pdf\"", "Set the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0)", "# copy of this software and associated documentation files (the", "RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import", "log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz", "Is Stokes I data provided? phiNoise_radm2 (float): ???? nBits (int):", "FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF", "# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes", "if noStokesI: if verbose: log(\"Warn: no Stokes I data in", "verbose: log(\"Warn: no Stokes I data in use.\") IArr =", "substantial portions of the Software. # # # # THE", "dqArr, duArr, fitDict = \\ create_frac_spectra(freqArr = freqArr_GHz, IArr =", "fit to Stokes I spectrum. phiMax_radm2 (float): Maximum absolute Faraday", "np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure the", "UArr, dIArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if", "file name # Read the data-file. Format=space-delimited, comments=\"#\". if verbose:", "(mDict[\"dQU\"],units)) log('FDF Noise (theory) = %.4g %s' % (mDict[\"dFDFth\"],units)) log('FDF", "U and <Q,U> spectra\") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some", "model # Multiply the dirty FDF by Ifreq0 to recover", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"],", "# # # # THE SOFTWARE IS PROVIDED \"AS IS\",", "I, Q, U, dI, dQ, dU try: if verbose: print(\">", "aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose: # Print the results to the", "outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if verbose: print \"Press", "output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save the arrays and plots", "Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9", "= args.noStokesI, nBits = nBits, showPlots = args.showPlots, debug =", "intensity in each channel. U (array_like): Stokes U intensity in", "nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2", "FDF plot:\") outFilePlot = prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\">", "create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc import MAD from", "import measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar", "input data and spectral index fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1],", "mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"]))", "of derived parameters for RM spectrum (approximately equivalent to -v", "time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 =", "Noise (Corrected MAD) = %.4g %s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise", "RMSF lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting [inverse variance] or", "dU]\", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr)", "'%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir, dummy", "absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel size", "permit persons to whom the # # Software is furnished", "%.2f to %.2f by %.2f (%d chans).\" % (phiArr_radm2[0], phiArr_radm2[-1],", "flags must be set: -S, -p, -v. \"\"\" epilog_text=\"\"\" Outputs", "* dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2", "= (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$", "polyOrd, verbose = True, debug = debug) # Plot the", "prefixOut, args = args, ) if args.saveOutput: saveOutput(mDict, aDict, prefixOut,", "Software, and to permit persons to whom the # #", "'+units) ax.set_title(\"RMS noise in Stokes Q, U and <Q,U> spectra\")", "spectrum is first fit with a polynomial and the resulting", "import nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra", "# DEBUG (plot the Q, U and average RMS spectrum)", "saveOutput or debug: if verbose: print(\"Saving RMSF and dirty FDF", "Precision of floating point numbers. showPlots (bool): Show plots? debug", "json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------# def main(): import argparse \"\"\" Start", "fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0)", "minimum phiMax to 10 FWHM # Faraday depth sampling. Zero", "if verbose: print(\"> \" + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight')", "of bits to store the data as. verbose (bool): Print", "np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, QArr, UArr,", "initially, convert back to flux # Calculate the theoretical noise", "on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas,", "64 verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run RM-synthesis", "import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity", "IArr, QArr, UArr, dIArr, dQArr, dUArr) = data if verbose:", "= plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr, qArr", "as 1/sigma^2 or all 1s (uniform) if weightType==\"variance\": weightArr =", "= True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits =", "the RM Spread Function and dirty FDF if showPlots or", "verbose, units = args.units, prefixOut = prefixOut, args = args,", "formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file containing Stokes spectra &", "equivalent to -v flag output) _RMsynth.json: dictionary of derived parameters", "# Display the figure # fdfFig.show() # Pause if plotting", "no Stokes I data in use.\") IArr = np.ones_like(QArr) dIArr", "\"key=value\" text file outFile = prefixOut + \"_RMsynth.dat\" if verbose:", "print(\"... success.\") data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr] except", "= 64 verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run", "spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU = uArr,", "-S, -p, -v. \"\"\" epilog_text=\"\"\" Outputs with -S flag: _FDFdirty.dat:", "\"variance\" -- Weight by uncertainty in Q and U. \"uniform\"", "color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms", "+ str(nBits) dtComplex = \"complex\" + str(2*nBits) # Output prefix", "# Display the figure # if not plt.isinteractive(): # specFig.show()", "(Ifreq0) # FDF is in fracpol units initially, convert back", "log('RESULTS:\\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol Angle", "fitRMSF (bool): Fit a Gaussian to the RMSF? noStokesI (bool:", "weighting as 1/sigma^2 or all 1s (uniform) if weightType==\"variance\": weightArr", "mDict.update(mD) # Debugging plots for spectral complexity measure if debug:", "verbose): # Save the dirty FDF, RMSF and weight array", "mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF =", "help=\"save the arrays and plots [False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn", "model fit if verbose: log(\"Plotting the input data and spectral", "= prefixOut + \"_RMsynth.dat\" if verbose: print(\"Saving the measurements on", "Q_err, U_err] To get outputs, one or more of the", "always centred on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0)", "dq, du try: if verbose: print(\"> Trying [freq_Hz, q, u,", "dPhi_radm2 (float): Faraday depth channel size (rad/m^2). nSamples (float): Number", "= lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"] =", "in outdict.items(): FH.write(\"%s=%s\\n\" % (k, v)) FH.close() outFile = prefixOut", "dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr,", "figure # if not plt.isinteractive(): # specFig.show() # DEBUG (plot", "1/sigma^2 or all 1s (uniform) if weightType==\"variance\": weightArr = 1.0", "on 1D data. Args: data (list): Contains frequency and polarization", "# and/or sell copies of the Software, and to permit", "= %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80)", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "has low signal-to-noise.\") #Add information on nature of channels: good_channels=np.where(np.logical_and(weightArr", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS", "if not os.path.exists(args.dataFile[0]): print(\"File does not exist: '%s'.\" % args.dataFile[0])", "int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2", "% (mDict[\"freq0_Hz\"]/1e9)) log('I freq0 = %.4g %s' % (mDict[\"Ifreq0\"],units)) log('Peak", "fdfFig.canvas.toolbar.window) # except Exception: # pass # Display the figure", "help=\"ignore the Stokes I spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use", "spectra mDict, aDict = run_rmsynth(data = data, polyOrd = args.polyOrd,", "precision nBits = 32 if args.bit64: nBits = 64 verbose=args.verbose", "# # # The above copyright notice and this permission", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # #", "no Stokes I present, create a dummy spectrum = unity", "rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr,", "du try: if verbose: print(\"> Trying [freq_Hz, q, u, dq,", "UArr, dQArr, dUArr) = data if verbose: log(\"... success.\") noStokesI", "marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,", "back to flux # Calculate the theoretical noise in the", "Verbosity. log (function): Which logging function to use. units (str):", "Exception: if verbose: log(\"...failed.\") if debug: log(traceback.format_exc()) sys.exit() if verbose:", "# Save the dirty FDF, RMSF and weight array to", "mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr", "np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set", "Q (array_like): Stokes Q intensity in each channel. U (array_like):", "\"w\")) #-----------------------------------------------------------------------------# def main(): import argparse \"\"\" Start the function", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "print(\"> %s\" % outFile) json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------# def main():", "software and associated documentation files (the \"Software\"), # # to", "RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import", "aDict = dict() aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"]", "data if verbose: log(\"... success.\") noStokesI = True except Exception:", "U spectrum.# # # # MODIFIED: 16-Nov-2018 by <NAME> #", "chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput: if verbose: print(\"Saving debug plots:\")", "0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2", "to Stokes I spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth", "= np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2 =", "light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\",", "dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax", "file outFile = prefixOut + \"_RMsynth.dat\" if verbose: print(\"Saving the", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "samples across the RMSF lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting", "marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,", "# Read the data-file. Format=space-delimited, comments=\"#\". if verbose: print(\"Reading the", "u, dq, du]. \"\"\" # Default data types dtFloat =", "data. Returns: mDict (dict): Summary of RM synthesis results. aDict", "dUArr) = data if verbose: log(\"... success.\") except Exception: if", "log(\"...failed.\") if debug: log(traceback.format_exc()) sys.exit() if verbose: log(\"Successfully read in", "dQArr = dQArr, dUArr = dUArr, polyOrd = polyOrd, verbose", "print(\"> \" + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if", "= %.4g (+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD =", "x in fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"]", "aDict[\"dirtyFDF\"]=dirtyFDF if verbose: # Print the results to the screen", "= \"uniform\" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log(\"Weight type", "help=\"absolute max Faraday depth sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None,", "verbose=verbose, debug=args.debug) # Run RM-synthesis on the spectra mDict, aDict", "# #if verbose: print \"Press <RETURN> to exit ...\", #", "Units of data. Returns: mDict (dict): Summary of RM synthesis", "on the spectra mDict, aDict = run_rmsynth(data = data, polyOrd", "* dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2", "(array_like): Stokes U intensity in each channel. dI (array_like): Error", "dU (array_like): Error in Stokes U intensity in each channel.", "%.2f seconds.\" % cputime) # Determine the Stokes I value", "freqArr_Hz, qArr = qArr, uArr = uArr, dqArr = dqArr,", "QArr, UArr, dIArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat)", "phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr = %.2f to %.2f by %.2f", "except Exception: if verbose: print(\"...failed.\") if debug: print(traceback.format_exc()) sys.exit() if", "= dIArr, dQArr = dQArr, dUArr = dUArr, polyOrd =", "fracPol = mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD)", "-%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)'", "[Phi, Q, U] _RMsynth.dat: list of derived parameters for RM", "\"\"\" Read the I, Q & U data from the", "U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 +", "SOFTWARE. # # # #=============================================================================# import sys import os import", "lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q')", "= 32 if args.bit64: nBits = 64 verbose=args.verbose data =", "matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM import", "point precision [False (uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show the", "#Add information on nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels]))", "U, dI, dQ, dU]\", end=' ') (freqArr_Hz, IArr, QArr, UArr,", "dqArr, duArr = duArr, fracPol = mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"],", "#!/usr/bin/env python #=============================================================================# # # # NAME: do_RMsynth_1D.py # #", "help=\"width of Faraday depth channel [Auto].\\n(overrides -s NSAMPLES flag)\") parser.add_argument(\"-s\",", "is furnished to do so, subject to the following conditions:", "each channel in Hz. q (array_like): Fractional Stokes Q intensity", "default=10, help=\"number of samples across the RMSF lobe [10].\") parser.add_argument(\"-w\",", "U intensity in each channel. Kwargs: polyOrd (int): Order of", "= plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr", "from RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk", "Q, U and <Q,U> spectra\") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate", "True except Exception: if verbose: log(\"...failed.\") if debug: log(traceback.format_exc()) sys.exit()", "SOFTWARE OR THE USE OR OTHER # # DEALINGS IN", "verbose: print(\"> \" + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else:", "dictionary of derived parameters for RM spectrum _weight.dat: Calculated channel", "to whom the # # Software is furnished to do", "dirty FDF # Use the theoretical noise to calculate uncertainties", "lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,", "uArr, dIArr = dIArr, dqArr = dqArr, duArr = duArr,", "nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 +", "Faraday depth channel [Auto].\\n(overrides -s NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float,", "terminal? debug (bool): Print full traceback in case of failure?", "= toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] =", "...\", # input() return mDict, aDict def readFile(dataFile, nBits, verbose=True,", "CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools", "= phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr = %.2f to %.2f by", "(array_like): Error in fractional Stokes Q intensity in each channel.", "mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the q and", "log('FDF Noise (rms) = %.4g %s' % (mDict[\"dFDFrms\"],units)) log('FDF SNR", "except Exception: if verbose: print(\"...failed.\") # freq_Hz, q, u, dq,", "[Auto].\\n(overrides -s NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10, help=\"number of", "~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF,", "type is '%s'.\" % weightType) startTime = time.time() # Perform", "to use, copy, modify, merge, publish, distribute, sublicense, # #", "polynomial to fit to Stokes I spectrum. phiMax_radm2 (float): Maximum", "(array_like): Frequency of each channel in Hz. I (array_like): Stokes", "U intensity in each channel. dI (array_like): Error in Stokes", "function to use. units (str): Units of data. Returns: mDict", "verbose: log(\"...failed.\") if debug: log(traceback.format_exc()) sys.exit() if verbose: log(\"Successfully read", "runnning RM-synthesis on an ASCII Stokes I, Q & U", "= QArr, UArr = UArr, dIArr = dIArr, dQArr =", "of derived parameters for RM spectrum _weight.dat: Calculated channel weights", "data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI = True except Exception:", "dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r',", "fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits, verbose", "parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity units of the data. [Jy/beam]\")", "_weight.dat: Calculated channel weights [freq_Hz, weight] \"\"\" # Parse the", "exit() C = 2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------#", "OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION", "= True except Exception: if verbose: print(\"...failed.\") if debug: print(traceback.format_exc())", "of arrays): List containing the columns found in the file.", "data (list of arrays): List containing the columns found in", "Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz =", "log(\"Weight type is '%s'.\" % weightType) startTime = time.time() #", "mDict=mDict) if saveOutput: if verbose: print(\"Saving debug plots:\") outFilePlot =", "(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data if verbose: log(\"...", "copy, modify, merge, publish, distribute, sublicense, # # and/or sell", "channel. dI (array_like): Error in Stokes I intensity in each", "% (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' %", "dirty FDF if showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8))", "showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2,", "128: log(\"WARNING: Stokes I model contains negative values!\") elif fitDict[\"fitStatus\"]", "and u = U/I spectra. The ASCII file should the", "RMSF? noStokesI (bool: Is Stokes I data provided? phiNoise_radm2 (float):", "specFig, units = units) # Use the custom navigation toolbar", "type=float, default=None, help=\"absolute max Faraday depth sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\",", "args.showPlots, debug = args.debug, verbose = verbose, units = args.units,", "# Help string to be shown using the -h option", "in 'key=val' and JSON formats.\") print(\"> %s\" % outFile) FH", "import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms", "mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF", "(str): relative or absolute path to file. nBits (int): number", "# Use the theoretical noise to calculate uncertainties mDict =", "log(\"Warn: no Stokes I data in use.\") IArr = np.ones_like(QArr)", "results. aDict (dict): Data output by RM synthesis. \"\"\" #", "to terminal? debug (bool): Print full traceback in case of", "more of the following flags must be set: -S, -p,", "np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range", "\"_weight.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"])))", "OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF", "from the input file name # Read the data-file. Format=space-delimited,", "traceback in case of failure? Returns: data (list of arrays):", "Returns: mDict (dict): Summary of RM synthesis results. aDict (dict):", "dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity units of the data. [Jy/beam]\") args", "args.bit64: nBits = 64 verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug)", "+ xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise in Stokes", "fdfFig, units = units) # Use the custom navigation toolbar", "dU], else [freq_Hz, q, u, dq, du]. \"\"\" # Default", "Stokes Q intensity (Q/I) in each channel. u (array_like): Fractional", "# # DEALINGS IN THE SOFTWARE. # # # #=============================================================================#", "channel. U (array_like): Stokes U intensity in each channel. dI", "the Stokes spectra.\") # If no Stokes I present, create", "weightType==\"variance\": weightArr = 1.0 / np.power(dQUArr, 2.0) else: weightType =", "nargs=1, help=\"ASCII file containing Stokes spectra & errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\",", "= %.4g %s' % (mDict[\"Ifreq0\"],units)) log('Peak PI = %.4g (+/-%.4g)", "OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "= uArr, dqArr = dqArr, duArr = duArr, fracPol =", "outFile = prefixOut + \"_RMsynth.json\" if verbose: print(\"> %s\" %", "<Q,U> spectra\") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters", "return data def saveOutput(outdict, arrdict, prefixOut, verbose): # Save the", "List containing the columns found in the file. If Stokes", "import math as m import numpy as np import matplotlib.pyplot", "index fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9)", "DEBUG (plot the Q, U and average RMS spectrum) if", "RM spectrum _weight.dat: Calculated channel weights [freq_Hz, weight] \"\"\" #", "Run RM-synthesis on Stokes I, Q and U spectra (1D)", "log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u)", "(U/I) in each channel. dq (array_like): Error in fractional Stokes", "of samples across the RMSF. weightType (str): Can be \"variance\"", "1e9 dQUArr = (dQArr + dUArr)/2.0 # Fit the Stokes", "try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: #", "phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if", "of each channel in Hz. q (array_like): Fractional Stokes Q", "Faraday depth channel size (rad/m^2). nSamples (float): Number of samples", "from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM", "(the \"Software\"), # # to deal in the Software without", "verbose: print(\"... success.\") data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]", "#-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False,", "dq, du]. \"\"\" # Default data types dtFloat = \"float\"", "verbose: print(\"Saving the measurements on the FDF in 'key=val' and", "Debugging plots for spectral complexity measure if debug: tmpFig =", "seconds.\" % cputime) # Determine the Stokes I value at", "Computed RMSF [Phi, Q, U] _RMsynth.dat: list of derived parameters", "-p, -v. \"\"\" epilog_text=\"\"\" Outputs with -S flag: _FDFdirty.dat: Dirty", "NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10, help=\"number of samples across", "math as m import numpy as np import matplotlib.pyplot as", "as. verbose (bool): Print verbose messages to terminal? debug (bool):", "TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A", "Q and U. \"uniform\" -- Weight uniformly (i.e. with 1s)", "outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile = prefixOut + \"_weight.dat\"", "Stokes U intensity in each channel. or [freq_Hz, q, u,", "I value at lam0Sq_m2 from the Stokes I model #", "(array_like): Fractional Stokes Q intensity (Q/I) in each channel. u", "fig = specFig, units = units) # Use the custom", "'%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default", "verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile", "depth channel size (rad/m^2). nSamples (float): Number of samples across", "GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr =", "plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,", "= np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr", "double = True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits", "Python 3.') exit() C = 2.997924538e8 # Speed of light", "(list of arrays): List containing the columns found in the", "conditions: # # # # The above copyright notice and", "= m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force", "verbose (bool): Print verbose messages to terminal? debug (bool): Print", "help=\"number of samples across the RMSF lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\",", "traceback import json import math as m import numpy as", "the input file name # Read the data-file. Format=space-delimited, comments=\"#\".", "dQArr, dUArr] except Exception: if verbose: print(\"...failed.\") # freq_Hz, q,", "np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log(\"Weight type is '%s'.\" % weightType)", "types dtFloat = \"float\" + str(nBits) dtComplex = \"complex\" +", "= fitRMSF, fitRMSFreal = False, nBits = nBits, verbose =", "= int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)", "import json import math as m import numpy as np", "(bool): Print full traceback in case of failure? Returns: data", "this software and associated documentation files (the \"Software\"), # #", "end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data if", "# # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"],", "MAD) = %.4g %s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms) =", "psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging plots", "+ \".debug-plots.pdf\" if verbose: print(\"> \" + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches", "uArr = uArr, dIArr = dIArr, dqArr = dqArr, duArr", "U. \"uniform\" -- Weight uniformly (i.e. with 1s) fitRMSF (bool):", "OR [freq_Hz, Q, U, Q_err, U_err] To get outputs, one", "Exception: # pass # Display the figure # fdfFig.show() #", "Stokes I intensity in each channel. Q (array_like): Stokes Q", "toscalar(dFDFth) mDict[\"units\"] = units if fitDict[\"fitStatus\"] >= 128: log(\"WARNING: Stokes", "the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU =", "IArr, QArr = QArr, UArr = UArr, dIArr = dIArr,", "m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF is", "Q, U, dI, dQ, dU try: if verbose: print(\"> Trying", "Error in fractional Stokes U intensity in each channel. Kwargs:", "C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) #", "= duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr, fig =", "= time.time() cputime = (endTime - startTime) if verbose: log(\">", "(mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) # Plot the RM Spread", "# MODIFIED: 23-October-2019 by <NAME> # # # #=============================================================================# #", "OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND", "probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput: if", "saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------# if __name__ == \"__main__\": main()", "a polynomial and the resulting model used to create fractional", "import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian", "print(\"Reading the data file '%s':\" % dataFile) # freq_Hz, I,", "np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -", "try: if verbose: log(\"> Trying [freq_Hz, q, u, dq, du]\",", "startTime) if verbose: log(\"> RM-synthesis completed in %.2f seconds.\" %", "& U data from the ASCII file. Inputs: datafile (str):", "verbose: log(\"> Trying [freq_Hz, q, u, dq, du]\", end=' ')", "verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run RM-synthesis on", "and U. \"uniform\" -- Weight uniformly (i.e. with 1s) fitRMSF", "the Stokes I model fit if verbose: log(\"Plotting the input", "units (str): Units of data. Returns: mDict (dict): Summary of", "in case of failure? Returns: data (list of arrays): List", "debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run RM synthesis on 1D", "information on nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels]))", "fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass #", "MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "Display the figure # if not plt.isinteractive(): # specFig.show() #", "' % (mDict[\"freq0_Hz\"]/1e9)) log('I freq0 = %.4g %s' % (mDict[\"Ifreq0\"],units))", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS", "(bool): Print verbose messages to terminal? debug (bool): Print full", "dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2", "channel size (rad/m^2). nSamples (float): Number of samples across the", "data as either: [freq_Hz, I, Q, U, dI, dQ, dU]", "ASCII Stokes I, Q & U spectrum.# # # #", "number of bits to store the data as. verbose (bool):", "Inputs: datafile (str): relative or absolute path to file. nBits", "plots for spectral complexity measure if debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"],", "# # # # Copyright (c) 2015 - 2018 <NAME>", "either: [freq_Hz, I, Q, U, dI, dQ, dU] freq_Hz (array_like):", "the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF =", "args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples,", "of charge, to any person obtaining a # # copy", "with 1s) fitRMSF (bool): Fit a Gaussian to the RMSF?", "mDict[\"polyCoeffs\"] = \",\".join([str(x) for x in fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"]", "notice and this permission notice shall be included in #", "= np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log(\"Weight type is '%s'.\" %", "to exit ...\", # input() return mDict, aDict def readFile(dataFile,", "spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111)", "to ASCII files if verbose: print(\"Saving the dirty FDF, RMSF", "rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol Angle = %.4g (+/-%.4g) deg' %", "in Q and U. \"uniform\" -- Weight uniformly (i.e. with", "Can be \"variance\" or \"uniform\" \"variance\" -- Weight by uncertainty", "(function): Which logging function to use. units (str): Units of", "data (list): Contains frequency and polarization data as either: [freq_Hz,", "shown using the -h option descStr = \"\"\" Run RM-synthesis", "one or more of the following flags must be set:", "print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile =", "the Stokes spectra.\") return data def saveOutput(outdict, arrdict, prefixOut, verbose):", "spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \\ create_frac_spectra(freqArr", "q and u spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD =", "else: tmpFig.show() #add array dictionary aDict = dict() aDict[\"phiArr_radm2\"] =", "or [freq_Hz, q, u, dq, du] freq_Hz (array_like): Frequency of", "to the RMSF? noStokesI (bool: Is Stokes I data provided?", "log(\"> Trying [freq_Hz, I, Q, U, dI, dQ, dU]\", end='", "( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 =", "<QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr,", "du]\", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data", "by <NAME> # # MODIFIED: 23-October-2019 by <NAME> # #", "measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar from", "at lam0Sq_m2 from the Stokes I model # Multiply the", "create the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict", "the input data and spectral index fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0],", "os.path.split(args.dataFile[0]) # Set the floating point precision nBits = 32", "Q & U spectrum.# # # # MODIFIED: 16-Nov-2018 by", "and JSON formats.\") print(\"> %s\" % outFile) FH = open(outFile,", "spectra (1D) stored in an ASCII file. The Stokes I", "arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile = prefixOut + \"_weight.dat\" if verbose: print(\">", "sell copies of the Software, and to permit persons to", "on an ASCII Stokes I, Q & U spectrum.# #", "poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr =", "duArr, fracPol = mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"])", "each channel. dq (array_like): Error in fractional Stokes Q intensity", "or \"uniform\" \"variance\" -- Weight by uncertainty in Q and", "prefix is derived from the input file name # Read", "mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging plots for spectral complexity measure if", "= fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq =", "phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 =", "Q intensity in each channel. dU (array_like): Error in Stokes", "JSON formats.\") print(\"> %s\" % outFile) FH = open(outFile, \"w\")", "#=============================================================================# # # # NAME: do_RMsynth_1D.py # # # #", "dest=\"bit64\", action=\"store_true\", help=\"use 64-bit floating point precision [False (uses 32-bit)]\")", "polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,", "debug) # Plot the data and the Stokes I model", "(mDict[\"Ifreq0\"],units)) log('Peak PI = %.4g (+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units))", "dq (array_like): Error in fractional Stokes Q intensity in each", "phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run RM", "(Q/I) in each channel. u (array_like): Fractional Stokes U intensity", "= False, nBits = nBits, verbose = verbose, log =", "use, copy, modify, merge, publish, distribute, sublicense, # # and/or", "%.4g %s' % (mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected MAD) = %.4g", "Run RM-synthesis on the spectra mDict, aDict = run_rmsynth(data =", "the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr =", "is present, this will be [freq_Hz, I, Q, U, dI,", "IArr, qArr = qArr, uArr = uArr, dIArr = dIArr,", "= fdfFig, units = units) # Use the custom navigation", "do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2", "= data, polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 =", "dUArr)/2.0 # Fit the Stokes I spectrum and create the", "Stokes I spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).", "= %.4g (+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0", "FDF if showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr", "q, u, dq, du try: if verbose: print(\"> Trying [freq_Hz,", "outFile = prefixOut + \"_RMsynth.dat\" if verbose: print(\"Saving the measurements", "parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute max Faraday depth sampled [Auto].\")", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "= mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging plots for", "and create the fractional spectra IModArr, qArr, uArr, dqArr, duArr,", "dirty FDF plot:\") outFilePlot = prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if verbose:", "u = U/I spectra. The ASCII file should the following", "(i.e. with 1s) fitRMSF (bool): Fit a Gaussian to the", "+ str(2*nBits) # Output prefix is derived from the input", "'%s'.\" % weightType) startTime = time.time() # Perform RM-synthesis on", "import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2:", "dIArr, dQArr, dUArr) = data if verbose: log(\"... success.\") except", "ASCII file. The Stokes I spectrum is first fit with", "or 'uniform' (all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2, help=\"polynomial order", "phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples, weightType", "in each channel. or [freq_Hz, q, u, dq, du] freq_Hz", "RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc import", "nBits, verbose=True, debug=False): \"\"\" Read the I, Q & U", "and this permission notice shall be included in # #", "Q, U] _RMSF.dat: Computed RMSF [Phi, Q, U] _RMsynth.dat: list", "synthesis on 1D data. Args: data (list): Contains frequency and", "the function to perform RM-synthesis if called from the command", "verbose, log = log) # Calculate the Rotation Measure Spread", "Calculated channel weights [freq_Hz, weight] \"\"\" # Parse the command", "lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal = False,", "RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \\", "perform RM-synthesis if called from the command line. \"\"\" #", "OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\",", "dU] freq_Hz (array_like): Frequency of each channel in Hz. I", "the RMSF? noStokesI (bool: Is Stokes I data provided? phiNoise_radm2", "logging function to use. units (str): Units of data. Returns:", "a \"key=value\" text file outFile = prefixOut + \"_RMsynth.dat\" if", "without limitation # # the rights to use, copy, modify,", "or more of the following flags must be set: -S,", "If no Stokes I present, create a dummy spectrum =", "dest=\"weightType\", default=\"variance\", help=\"weighting [inverse variance] or 'uniform' (all 1s).\") parser.add_argument(\"-o\",", "polyOrd = polyOrd, verbose = True, debug = debug) #", "for x in fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"]", "dIArr, dQArr = dQArr, dUArr = dUArr, polyOrd = polyOrd,", "% (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise = %.4g %s' % (mDict[\"dQU\"],units))", "Q intensity in each channel. du (array_like): Error in fractional", "np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for convenience", "rights to use, copy, modify, merge, publish, distribute, sublicense, #", "deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2'", "nSamples = args.nSamples, weightType = args.weightType, fitRMSF = args.fitRMSF, noStokesI", "fractional Stokes U intensity in each channel. Kwargs: polyOrd (int):", "+ outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if verbose: print", "freq_Hz, I, Q, U, dI, dQ, dU try: if verbose:", "= prefixOut + \".debug-plots.pdf\" if verbose: print(\"> \" + outFilePlot)", "Fractional Stokes Q intensity (Q/I) in each channel. u (array_like):", "fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2)", "saveOutput: if verbose: print(\"Saving debug plots:\") outFilePlot = prefixOut +", "failure? Returns: data (list of arrays): List containing the columns", "= uArr, dIArr = dIArr, dqArr = dqArr, duArr =", "CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass # Display the", "# # the rights to use, copy, modify, merge, publish,", "the PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9)", "dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF,", "X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except", "os.path.exists(args.dataFile[0]): print(\"File does not exist: '%s'.\" % args.dataFile[0]) sys.exit() prefixOut,", "I model fit if verbose: log(\"Plotting the input data and", "%s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile = prefixOut", "(mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"],", ") dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the", "in each channel. Q (array_like): Stokes Q intensity in each", "mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr", "Force the minimum phiMax to 10 FWHM # Faraday depth", "noStokesI: if verbose: log(\"Warn: no Stokes I data in use.\")", "by %.2f (%d chans).\" % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) #", "OTHER # # DEALINGS IN THE SOFTWARE. # # #", "in # # all copies or substantial portions of the", "#if verbose: print \"Press <RETURN> to exit ...\", # input()", "dQArr, dUArr = dUArr, polyOrd = polyOrd, verbose = True,", "(MIT) # # # # Copyright (c) 2015 - 2018", "run_rmsynth(data = data, polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2", "RM-synthesis on the spectra mDict, aDict = run_rmsynth(data = data,", "Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,", "dUArr] except Exception: if verbose: print(\"...failed.\") # freq_Hz, q, u,", "dq, du]\", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) =", "prefixOut = prefixOut, args = args, ) if args.saveOutput: saveOutput(mDict,", "23-October-2019 by <NAME> # # # #=============================================================================# # # #", "path to file. nBits (int): number of bits to store", "Stokes I spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use 64-bit floating", "the data-file. Format=space-delimited, comments=\"#\". if verbose: print(\"Reading the data file", "(int): Precision of floating point numbers. showPlots (bool): Show plots?", "the data as. verbose (bool): Print verbose messages to terminal?", "'uniform' (all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2, help=\"polynomial order to", "= dUArr, polyOrd = polyOrd, verbose = True, debug =", "(does not work on Mac OS X) # try: #", "Ifreq0 to recover the PI freq0_Hz = C / m.sqrt(lam0Sq_m2)", "order to fit to I spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\",", "sys.version_info.major == 2: print('RM-tools will no longer run with Python", "(theory) = %.4g %s' % (mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected MAD)", "max Faraday depth sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None, help=\"width", "centred on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) /", "argparse \"\"\" Start the function to perform RM-synthesis if called", "# Copyright (c) 2015 - 2018 <NAME> # # #", "Weight by uncertainty in Q and U. \"uniform\" -- Weight", "[False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\",", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "= fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] =", "32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show the plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\",", "in the Software without restriction, including without limitation # #", "= plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"],", "sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if", "no longer run with Python 2! Please use Python 3.')", "dtComplex = \"complex\" + str(2*nBits) # freq_Hz, I, Q, U,", "if args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------# if __name__ ==", "fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2)", "Frequency of each channel in Hz. I (array_like): Stokes I", "du try: if verbose: log(\"> Trying [freq_Hz, q, u, dq,", "PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF", "text file outFile = prefixOut + \"_RMsynth.dat\" if verbose: print(\"Saving", "[freq_Hz, I, Q, U, dI, dQ, dU] freq_Hz (array_like): Frequency", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to", "phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr,", "be \"variance\" or \"uniform\" \"variance\" -- Weight by uncertainty in", "(phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2", "= lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x) for x", "[False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use 64-bit floating point precision [False", "\"w\") for k, v in outdict.items(): FH.write(\"%s=%s\\n\" % (k, v))", "the Stokes I spectrum and create the fractional spectra IModArr,", "weightArr, nBits = nBits, verbose = verbose, log = log)", "I, Q & U spectrum.# # # # MODIFIED: 16-Nov-2018", "in each channel. dU (array_like): Error in Stokes U intensity", "dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0)", "read in the Stokes spectra.\") return data def saveOutput(outdict, arrdict,", "# Software is furnished to do so, subject to the", "args=None): \"\"\"Run RM synthesis on 1D data. Args: data (list):", "time.time() cputime = (endTime - startTime) if verbose: log(\"> RM-synthesis", "ext = os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0]) # Set the", "Hz. I (array_like): Stokes I intensity in each channel. Q", "verbose = verbose, log = log) # Calculate the Rotation", "from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM", "weightType (str): Can be \"variance\" or \"uniform\" \"variance\" -- Weight", "= prefixOut + \"_RMSF.dat\" if verbose: print(\"> %s\" % outFile)", "Python 2! Please use Python 3.') exit() C = 2.997924538e8", "RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major ==", "to create fractional q = Q/I and u = U/I", "fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF,", "merge, publish, distribute, sublicense, # # and/or sell copies of", "poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from", "FDF !!Old formula only works for wariance weights! weightArr =", "RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import", "exit ...\", # input() return mDict, aDict def readFile(dataFile, nBits,", "the minimum phiMax to 10 FWHM # Faraday depth sampling.", "The Stokes I spectrum is first fit with a polynomial", "in each channel. du (array_like): Error in fractional Stokes U", "xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise", "nBits, showPlots = args.showPlots, debug = args.debug, verbose = verbose,", "input() return mDict, aDict def readFile(dataFile, nBits, verbose=True, debug=False): \"\"\"", "THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR", "= np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure", "dIArr, dqArr = dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz,", "ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o',", "found in the file. If Stokes I is present, this", "print(\"Saving the dirty FDF, RMSF weight arrays to ASCII files.\")", "plots? debug (bool): Turn on debugging messages & plots? verbose", "if verbose: log(\"Plotting the input data and spectral index fit.\")", "* 2.0 + 1.0) startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2", "'key=val' and JSON formats.\") print(\"> %s\" % outFile) FH =", "(all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2, help=\"polynomial order to fit", "Weight uniformly (i.e. with 1s) fitRMSF (bool): Fit a Gaussian", "channel. dq (array_like): Error in fractional Stokes Q intensity in", "from RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc", "to flux # Calculate the theoretical noise in the FDF", "# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF", "fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None):", "[freq_Hz, q, u, dq, du] freq_Hz (array_like): Frequency of each", "theoretical noise in the FDF !!Old formula only works for", "is in fracpol units initially, convert back to flux #", "aDict def readFile(dataFile, nBits, verbose=True, debug=False): \"\"\" Read the I,", "u, dq, du] freq_Hz (array_like): Frequency of each channel in", "QArr, UArr = UArr, dIArr = dIArr, dQArr = dQArr,", "dIArr, dQArr, dUArr] except Exception: if verbose: print(\"...failed.\") # freq_Hz,", "WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING", "measure if debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"],", "ARISING # # FROM, OUT OF OR IN CONNECTION WITH", "sys import os import time import traceback import json import", "# fdfFig.show() # Pause if plotting enabled if showPlots: plt.show()", "== 2: print('RM-tools will no longer run with Python 2!", "dest=\"verbose\", action=\"store_true\", help=\"verbose output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save the", "spectrum = unity if noStokesI: if verbose: log(\"Warn: no Stokes", "# Force the minimum phiMax to 10 FWHM # Faraday", "measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF", "10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz =", "log('FWHM RMSF = %.4g rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol Angle =", "showPlots: plt.show() elif saveOutput or debug: if verbose: print(\"Saving RMSF", "for RM spectrum _weight.dat: Calculated channel weights [freq_Hz, weight] \"\"\"", "m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2", "return mDict, aDict def readFile(dataFile, nBits, verbose=True, debug=False): \"\"\" Read", "whom the # # Software is furnished to do so,", "= %.4g %s' % (mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected MAD) =", "if debug: log(traceback.format_exc()) sys.exit() if verbose: log(\"Successfully read in the", "# Print the results to the screen log() log('-'*80) log('RESULTS:\\n')", "any person obtaining a # # copy of this software", "(mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict[\"dFDFcorMAD\"],units))", "Print verbose messages to terminal? debug (bool): Print full traceback", "if verbose: print(\"Saving the dirty FDF, RMSF weight arrays to", "mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"]", "dIArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose:", "+ \"_FDFdirty.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"],", "% outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save the measurements to", "Stokes I model # Multiply the dirty FDF by Ifreq0", "% (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting as", "except Exception: # pass # Display the figure # if", "aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"]", "lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x)", "if verbose: print(\"> %s\" % outFile) json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------#", "' % (mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"],", "verbose: log(\"Weight type is '%s'.\" % weightType) startTime = time.time()", "# Parse the command line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter)", "of the Software, and to permit persons to whom the", "Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ =", "aDict = run_rmsynth(data = data, polyOrd = args.polyOrd, phiMax_radm2 =", "log('freq0_GHz = %.4g ' % (mDict[\"freq0_Hz\"]/1e9)) log('I freq0 = %.4g", "range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2", "(array_like): Error in Stokes U intensity in each channel. or", "for k, v in outdict.items(): FH.write(\"%s=%s\\n\" % (k, v)) FH.close()", "= dQArr, dUArr = dUArr, polyOrd = polyOrd, verbose =", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr, fig = specFig,", "dIArr = np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz", "# Fit the Stokes I spectrum and create the fractional", "the I, Q & U data from the ASCII file.", "= args.dPhi_radm2, nSamples = args.nSamples, weightType = args.weightType, fitRMSF =", "weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True,", "v in outdict.items(): FH.write(\"%s=%s\\n\" % (k, v)) FH.close() outFile =", "/ np.power(dQUArr, 2.0) else: weightType = \"uniform\" weightArr = np.ones(freqArr_Hz.shape,", "[inverse variance] or 'uniform' (all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2,", "= verbose, units = args.units, prefixOut = prefixOut, args =", "units initially, convert back to flux # Calculate the theoretical", "ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9", "import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar", "command line. \"\"\" # Help string to be shown using", "np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure the parameters of the", "to use. units (str): Units of data. Returns: mDict (dict):", "%s\" % outFile) json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------# def main(): import", "depth sampling. Zero always centred on middle channel nChanRM =", "custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)", "I model has low signal-to-noise.\") #Add information on nature of", "USE OR OTHER # # DEALINGS IN THE SOFTWARE. #", "of this software and associated documentation files (the \"Software\"), #", "u (array_like): Fractional Stokes U intensity (U/I) in each channel.", "I data provided? phiNoise_radm2 (float): ???? nBits (int): Precision of", "Which logging function to use. units (str): Units of data.", "& plots? verbose (bool): Verbosity. log (function): Which logging function", "mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) # Plot the RM Spread Function and", "\\ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr = QArr,", "verbose: print(\"Saving debug plots:\") outFilePlot = prefixOut + \".debug-plots.pdf\" if", "data types dtFloat = \"float\" + str(nBits) dtComplex = \"complex\"", "& plots [False].\") parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity units of", "input file name # Read the data-file. Format=space-delimited, comments=\"#\". if", "(Corrected MAD) = %.4g %s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms)", "action=\"store_true\", help=\"fit a Gaussian to the RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\",", "= unity if noStokesI: if verbose: log(\"Warn: no Stokes I", "for convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr = (dQArr", "action=\"store_true\", help=\"ignore the Stokes I spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\",", "to fit to Stokes I spectrum. phiMax_radm2 (float): Maximum absolute", "= weightArr, nBits = nBits, verbose = verbose, log =", "on middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2))", "to fit to I spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore", "if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if", "probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput: if verbose: print(\"Saving debug plots:\") outFilePlot", "fitDict[\"fitStatus\"] >= 64: log(\"Caution: Stokes I model has low signal-to-noise.\")", "weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log(\"Weight type is '%s'.\"", "= args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples =", "completed in %.2f seconds.\" % cputime) # Determine the Stokes", "if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag)))", "dest=\"showPlots\", action=\"store_true\", help=\"show the plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose", "modify, merge, publish, distribute, sublicense, # # and/or sell copies", "fitDict[\"fitStatus\"] >= 128: log(\"WARNING: Stokes I model contains negative values!\")", "mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"],", "import argparse \"\"\" Start the function to perform RM-synthesis if", "import sys import os import time import traceback import json", "in Stokes Q intensity in each channel. dU (array_like): Error", "log (function): Which logging function to use. units (str): Units", "UArr, dIArr, dQArr, dUArr) = data if verbose: log(\"... success.\")", "the fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict =", "= %.4g rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol Angle = %.4g (+/-%.4g)", "dest=\"saveOutput\", action=\"store_true\", help=\"save the arrays and plots [False].\") parser.add_argument(\"-D\", dest=\"debug\",", "log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log()", "run with Python 2! Please use Python 3.') exit() C", "np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz = freqArr_Hz", "freqArr_GHz = freqArr_Hz / 1e9 dQUArr = (dQArr + dUArr)/2.0", "- np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) #", "dI, dQ, dU try: if verbose: print(\"> Trying [freq_Hz, I,", "verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save", "I, Q, U, dI, dQ, dU] freq_Hz (array_like): Frequency of", "os.path.splitext(args.dataFile[0]) # Default data types dtFloat = \"float\" + str(nBits)", "IModArr, qArr, uArr, dqArr, duArr, fitDict = \\ create_frac_spectra(freqArr =", "float(dPhi_radm2), nChanRM)) # Calculate the weighting as 1/sigma^2 or all", "each channel. du (array_like): Error in fractional Stokes U intensity", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2", "showPlots = args.showPlots, debug = args.debug, verbose = verbose, units", "and weight array to ASCII files if verbose: print(\"Saving the", "# # # The MIT License (MIT) # # #", "depth sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None, help=\"width of Faraday", "(int): number of bits to store the data as. verbose", "IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz", "EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE", "list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut + \"_RMSF.dat\" if verbose:", "# to deal in the Software without restriction, including without", "stored in an ASCII file. The Stokes I spectrum is", "nBits (int): number of bits to store the data as.", "freqArr_Hz, IArr = IArr, qArr = qArr, uArr = uArr,", "= float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr],", "a Gaussian to the RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None,", "Print the results to the screen log() log('-'*80) log('RESULTS:\\n') log('FWHM", "data, polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2,", "IArr, QArr, UArr, dIArr, dQArr, dUArr] except Exception: if verbose:", "2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2", "qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"],", "bits to store the data as. verbose (bool): Print verbose", "the theoretical noise in the FDF !!Old formula only works", "numpy as np import matplotlib.pyplot as plt from RMutils.util_RM import", "default=None, help=\"width of Faraday depth channel [Auto].\\n(overrides -s NSAMPLES flag)\")", "I intensity in each channel. Q (array_like): Stokes Q intensity", "units) # Use the custom navigation toolbar # try: #", "derived from the input file name # Read the data-file.", "of RM synthesis results. aDict (dict): Data output by RM", "be shown using the -h option descStr = \"\"\" Run", "mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"]", "# # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = (", "<NAME> # # # #=============================================================================# # # # The MIT", "fractional spectra IModArr, qArr, uArr, dqArr, duArr, fitDict = \\", "type=float, default=None, help=\"width of Faraday depth channel [Auto].\\n(overrides -s NSAMPLES", "the floating point precision nBits = 32 if args.bit64: nBits", "plots [False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn on debugging messages &", "U, Q_err, U_err] To get outputs, one or more of", "outFile = prefixOut + \"_weight.dat\" if verbose: print(\"> %s\" %", "% outFile) json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------# def main(): import argparse", "above copyright notice and this permission notice shall be included", "if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) #", "\"_FDFdirty.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real,", "Stokes I spectrum is first fit with a polynomial and", "CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \\ #", "'tight') else: tmpFig.show() #add array dictionary aDict = dict() aDict[\"phiArr_radm2\"]", "args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------# if __name__ == \"__main__\":", "permission notice shall be included in # # all copies", "to be shown using the -h option descStr = \"\"\"", "- (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0)", "q = Q/I and u = U/I spectra. The ASCII", "output) _RMsynth.json: dictionary of derived parameters for RM spectrum _weight.dat:", "= True except Exception: if verbose: log(\"...failed.\") if debug: log(traceback.format_exc())", "U, I_err, Q_err, U_err] OR [freq_Hz, Q, U, Q_err, U_err]", "# NAME: do_RMsynth_1D.py # # # # PURPOSE: API for", "debug (bool): Turn on debugging messages & plots? verbose (bool):", "Stokes I spectrum and create the fractional spectra IModArr, qArr,", "fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz)", "= verbose, log = log) # Calculate the Rotation Measure", "\"uniform\" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log(\"Weight type is", "uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr,", "in each channel. dq (array_like): Error in fractional Stokes Q", "the RMSF. weightType (str): Can be \"variance\" or \"uniform\" \"variance\"", "log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol", "saveOutput(outdict, arrdict, prefixOut, verbose): # Save the dirty FDF, RMSF", "the arrays and plots [False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn on", "if sys.version_info.major == 2: print('RM-tools will no longer run with", "= args.fitRMSF, noStokesI = args.noStokesI, nBits = nBits, showPlots =", "= prefixOut, args = args, ) if args.saveOutput: saveOutput(mDict, aDict,", "CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN", "(array_like): Error in fractional Stokes U intensity in each channel.", "args = parser.parse_args() # Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File", "toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth)", "np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile = prefixOut + \"_weight.dat\" if", "% (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g ' % (mDict[\"freq0_Hz\"]/1e9)) log('I", "# # and/or sell copies of the Software, and to", "Q, U and average RMS spectrum) if debug: rmsFig =", "def readFile(dataFile, nBits, verbose=True, debug=False): \"\"\" Read the I, Q", "list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile = prefixOut + \"_weight.dat\" if verbose:", "= args.showPlots, debug = args.debug, verbose = verbose, units =", "C = 2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------# def", "WITH THE SOFTWARE OR THE USE OR OTHER # #", "aDict (dict): Data output by RM synthesis. \"\"\" # Sanity", "Pause if plotting enabled if showPlots: plt.show() elif saveOutput or", "# # # # The above copyright notice and this", "args.fitRMSF, noStokesI = args.noStokesI, nBits = nBits, showPlots = args.showPlots,", "array dictionary aDict = dict() aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"] =", "Faraday depth sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None, help=\"width of", "notice shall be included in # # all copies or", "present, this will be [freq_Hz, I, Q, U, dI, dQ,", "from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk", "= fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] =", "= measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF,", "prefixOut + \".debug-plots.pdf\" if verbose: print(\"> \" + outFilePlot) tmpFig.savefig(outFilePlot,", "import plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax", "freq_Hz, q, u, dq, du try: if verbose: log(\"> Trying", "mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"]))", "aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose:", "duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput: if verbose: print(\"Saving", "sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None, help=\"width of Faraday depth", "uncertainty in Q and U. \"uniform\" -- Weight uniformly (i.e.", "(bool): Fit a Gaussian to the RMSF? noStokesI (bool: Is", "the FDF !!Old formula only works for wariance weights! weightArr", "arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut + \"_RMSF.dat\" if verbose: print(\"> %s\"", "(+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g (+%.4g,", "(array_like): Error in Stokes I intensity in each channel. dQ", "OR THE USE OR OTHER # # DEALINGS IN THE", "called from the command line. \"\"\" # Help string to", "try: if verbose: print(\"> Trying [freq_Hz, q, u, dq, du]\",", "Dirty FDF/RM Spectrum [Phi, Q, U] _RMSF.dat: Computed RMSF [Phi,", "[freq_Hz, I, Q, U, dI, dQ, dU], else [freq_Hz, q,", "not exist: '%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0])", "provided? phiNoise_radm2 (float): ???? nBits (int): Precision of floating point", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "= dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF =", "parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore the Stokes I spectrum [False].\") parser.add_argument(\"-b\",", "the file. If Stokes I is present, this will be", "convert back to flux # Calculate the theoretical noise in", "in Hz. I (array_like): Stokes I intensity in each channel.", "# Convert to GHz for convenience freqArr_GHz = freqArr_Hz /", "= dqArr, duArr = duArr, fracPol = mDict[\"fracPol\"], psi0_deg =", "from the Stokes I model # Multiply the dirty FDF", "each channel. Q (array_like): Stokes Q intensity in each channel.", "(bool): Show plots? debug (bool): Turn on debugging messages &", "in %.2f seconds.\" % cputime) # Determine the Stokes I", "model contains negative values!\") elif fitDict[\"fitStatus\"] >= 64: log(\"Caution: Stokes", "sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0]) #", "log(\"Successfully read in the Stokes spectra.\") # If no Stokes", "except Exception: if verbose: log(\"...failed.\") # freq_Hz, q, u, dq,", "print(\"Saving debug plots:\") outFilePlot = prefixOut + \".debug-plots.pdf\" if verbose:", "MODIFIED: 16-Nov-2018 by <NAME> # # MODIFIED: 23-October-2019 by <NAME>", "plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools will no longer run", "help=\"verbose output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save the arrays and", "RMSF = %.4g rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol Angle = %.4g", "plots [False].\") parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity units of the", "do_RMsynth_1D.py # # # # PURPOSE: API for runnning RM-synthesis", "debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"],", "(mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g ' % (mDict[\"freq0_Hz\"]/1e9)) log('I freq0", "print(traceback.format_exc()) sys.exit() if verbose: print(\"Successfully read in the Stokes spectra.\")", "stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 =", "nChanRM)) # Calculate the weighting as 1/sigma^2 or all 1s", "intensity (U/I) in each channel. dq (array_like): Error in fractional", "ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise in Stokes Q, U and <Q,U>", "= open(outFile, \"w\") for k, v in outdict.items(): FH.write(\"%s=%s\\n\" %", "shall be included in # # all copies or substantial", "2: print('RM-tools will no longer run with Python 2! Please", "Copyright (c) 2015 - 2018 <NAME> # # # #", "in fractional Stokes U intensity in each channel. Kwargs: polyOrd", "log(\"Caution: Stokes I model has low signal-to-noise.\") #Add information on", "(bool): Verbosity. log (function): Which logging function to use. units", "dIArr = dIArr, dqArr = dqArr, duArr = duArr, freqHirArr_Hz", "log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF,", "# the rights to use, copy, modify, merge, publish, distribute,", "# except Exception: # pass # Display the figure #", "phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday", "fitDict = \\ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr", "aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose: # Print the", "store the data as. verbose (bool): Print verbose messages to", "point numbers. showPlots (bool): Show plots? debug (bool): Turn on", "descStr = \"\"\" Run RM-synthesis on Stokes I, Q and", "# # #=============================================================================# # # # The MIT License (MIT)", "Default data types dtFloat = \"float\" + str(nBits) dtComplex =", "(float): Faraday depth channel size (rad/m^2). nSamples (float): Number of", "and the Stokes I model fit if verbose: log(\"Plotting the", "calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2,", "= readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run RM-synthesis on the spectra", "dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"]", "Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)", "% (mDict[\"dQU\"],units)) log('FDF Noise (theory) = %.4g %s' % (mDict[\"dFDFth\"],units))", "the following conditions: # # # # The above copyright", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "action=\"store_true\", help=\"turn on debugging messages & plots [False].\") parser.add_argument(\"-U\", dest=\"units\",", "OR OTHER # # DEALINGS IN THE SOFTWARE. # #", "To get outputs, one or more of the following flags", "Fractional Stokes U intensity (U/I) in each channel. dq (array_like):", "import poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig", "(float): ???? nBits (int): Precision of floating point numbers. showPlots", "False, nBits = nBits, verbose = verbose, log = log)", "the q and u spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD", "log('QU Noise = %.4g %s' % (mDict[\"dQU\"],units)) log('FDF Noise (theory)", "(array_like): Stokes Q intensity in each channel. U (array_like): Stokes", "(+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise = %.4g %s'", "screen log() log('-'*80) log('RESULTS:\\n') log('FWHM RMSF = %.4g rad/m^2' %", "= prefixOut + \"_weight.dat\" if verbose: print(\"> %s\" % outFile)", "RMSF and weight array to ASCII files if verbose: print(\"Saving", "help=\"ASCII file containing Stokes spectra & errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\",", "weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 *", "of data. Returns: mDict (dict): Summary of RM synthesis results.", "# # MODIFIED: 16-Nov-2018 by <NAME> # # MODIFIED: 23-October-2019", "fitRMSF = args.fitRMSF, noStokesI = args.noStokesI, nBits = nBits, showPlots", "in use.\") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert", "time import traceback import json import math as m import", "= np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0,", "if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9,", "verbose: log(\"... success.\") except Exception: if verbose: log(\"...failed.\") # freq_Hz,", "default=None, help=\"absolute max Faraday depth sampled [Auto].\") parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float,", "is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is", "# Faraday depth sampling. Zero always centred on middle channel", "8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2,", "deal in the Software without restriction, including without limitation #", "phi2Arr = phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine", "point precision nBits = 32 if args.bit64: nBits = 64", "RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from RMutils.util_RM import", "of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0,", "dQUArr = (dQArr + dUArr)/2.0 # Fit the Stokes I", "phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz", "RMutils.util_misc import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import", "full traceback in case of failure? Returns: data (list of", "log(\"WARNING: Stokes I model contains negative values!\") elif fitDict[\"fitStatus\"] >=", "# # #=============================================================================# import sys import os import time import", "by uncertainty in Q and U. \"uniform\" -- Weight uniformly", "FDF by Ifreq0 to recover the PI freq0_Hz = C", "log(traceback.format_exc()) sys.exit() if verbose: log(\"Successfully read in the Stokes spectra.\")", "# # The above copyright notice and this permission notice", "nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz)))", "= \\ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime", "# MODIFIED: 16-Nov-2018 by <NAME> # # MODIFIED: 23-October-2019 by", "marker='o', color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9", "print(\"> Trying [freq_Hz, I, Q, U, dI, dQ, dU]\", end='", "# # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "(rms) = %.4g %s' % (mDict[\"dFDFrms\"],units)) log('FDF SNR = %.4g", "precision [False (uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show the plots", "channel in Hz. q (array_like): Fractional Stokes Q intensity (Q/I)", "open(outFile, \"w\") for k, v in outdict.items(): FH.write(\"%s=%s\\n\" % (k,", "QArr, UArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if", "copies of the Software, and to permit persons to whom", "np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))", "to deal in the Software without restriction, including without limitation", "to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr", "log('FDF SNR = %.4g ' % (mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g", "2015 - 2018 <NAME> # # # # Permission is", "phi2Arr_radm2, RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"],", "ASCII files if verbose: print(\"Saving the dirty FDF, RMSF weight", "ASCII files.\") outFile = prefixOut + \"_FDFdirty.dat\" if verbose: print(\">", "Save the dirty FDF, RMSF and weight array to ASCII", "metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file containing Stokes spectra & errors.\") parser.add_argument(\"-t\",", "fwhmRMSF = fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq", "# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass # Display", "Stokes spectra.\") return data def saveOutput(outdict, arrdict, prefixOut, verbose): #", "(+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD = %.4g (+/-%.4g)", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR", "the Q, U and average RMS spectrum) if debug: rmsFig", "log(\"> RM-synthesis completed in %.2f seconds.\" % cputime) # Determine", "duArr, fitDict = \\ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr,", "Gaussian to the RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute", "--------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \\ # do_rmsynth(qArr,", "and dirty FDF plot:\") outFilePlot = prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if", "Stokes I data in use.\") IArr = np.ones_like(QArr) dIArr =", "/ nSamples if phiMax_radm2 is None: phiMax_radm2 = m.sqrt(3.0) /", "in each channel. Kwargs: polyOrd (int): Order of polynomial to", "MODIFIED: 23-October-2019 by <NAME> # # # #=============================================================================# # #", "= 'tight') # #if verbose: print \"Press <RETURN> to exit", "I, Q and U spectra (1D) stored in an ASCII", "model used to create fractional q = Q/I and u", "\".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\"> \" + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches =", "%s' % (mDict[\"Ifreq0\"],units)) log('Peak PI = %.4g (+/-%.4g) %s' %", "the data. [Jy/beam]\") args = parser.parse_args() # Sanity checks if", "duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr = IModHirArr, fig", "outdict.items(): FH.write(\"%s=%s\\n\" % (k, v)) FH.close() outFile = prefixOut +", "the Stokes I value at lam0Sq_m2 from the Stokes I", "# input() return mDict, aDict def readFile(dataFile, nBits, verbose=True, debug=False):", "U spectra (1D) stored in an ASCII file. The Stokes", "following flags must be set: -S, -p, -v. \"\"\" epilog_text=\"\"\"", "(+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0 = %.4g", "verbose: log(\"> Trying [freq_Hz, I, Q, U, dI, dQ, dU]\",", "import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity", "np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) /", "U intensity (U/I) in each channel. dq (array_like): Error in", "Q, U, I_err, Q_err, U_err] OR [freq_Hz, Q, U, Q_err,", "KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED", "= freqArr_Hz, IArr = IArr, qArr = qArr, uArr =", "plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"],", "# If no Stokes I present, create a dummy spectrum", "weightArr = weightArr, nBits = nBits, verbose = verbose, log", "import create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc import MAD", "Please use Python 3.') exit() C = 2.997924538e8 # Speed", "Calculate the weighting as 1/sigma^2 or all 1s (uniform) if", "+ str(nBits) dtComplex = \"complex\" + str(2*nBits) # freq_Hz, I,", "str(nBits) dtComplex = \"complex\" + str(2*nBits) # Output prefix is", "nBits = nBits, verbose = verbose, log = log) #", "NAME: do_RMsynth_1D.py # # # # PURPOSE: API for runnning", "I model contains negative values!\") elif fitDict[\"fitStatus\"] >= 64: log(\"Caution:", "lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF = fitRMSF, fitRMSFreal", "or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF", "(bool): Turn on debugging messages & plots? verbose (bool): Verbosity.", "RM synthesis on 1D data. Args: data (list): Contains frequency", "noStokesI = True except Exception: if verbose: log(\"...failed.\") if debug:", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "spectrum.# # # # MODIFIED: 16-Nov-2018 by <NAME> # #", "Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "python #=============================================================================# # # # NAME: do_RMsynth_1D.py # # #", "try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: #", "sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput: if verbose: print(\"Saving debug", "2018 <NAME> # # # # Permission is hereby granted,", "= %.4g ' % (mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)'", "specFig.show() # DEBUG (plot the Q, U and average RMS", "epilog_text=\"\"\" Outputs with -S flag: _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi,", "used to create fractional q = Q/I and u =", "% (mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"],", "intensity in each channel. dI (array_like): Error in Stokes I", "phiArr_radm2, weightArr = weightArr, nBits = nBits, verbose = verbose,", "IArr, QArr, UArr, dIArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True,", "args = args, ) if args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose)", "# # # Permission is hereby granted, free of charge,", "UArr, dIArr = dIArr, dQArr = dQArr, dUArr = dUArr,", "furnished to do so, subject to the following conditions: #", "(+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) # Plot", "(dQArr + dUArr)/2.0 # Fit the Stokes I spectrum and", "# The above copyright notice and this permission notice shall", "get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from", "on nature of channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size", "RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute max Faraday depth", "fracpol units initially, convert back to flux # Calculate the", "/ m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF", "= nBits, verbose = verbose, log = log) fwhmRMSF =", "OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT", "parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 =", "\"\"\" # Parse the command line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,", "= mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig, units = units) # Use", "dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr", "ASCII file should the following columns, in a space separated", "args.units, prefixOut = prefixOut, args = args, ) if args.saveOutput:", "works for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth", "file '%s':\" % dataFile) # freq_Hz, I, Q, U, dI,", "the -h option descStr = \"\"\" Run RM-synthesis on Stokes", "%s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise = %.4g %s' %", "Q, U, Q_err, U_err] To get outputs, one or more", "[Phi, Q, U] _RMSF.dat: Computed RMSF [Phi, Q, U] _RMsynth.dat:", "# # Software is furnished to do so, subject to", "including without limitation # # the rights to use, copy,", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "chans).\" % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the weighting", "RM-synthesis on an ASCII Stokes I, Q & U spectrum.#", "= uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr =", "as plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes", "outFilePlot = prefixOut + \".debug-plots.pdf\" if verbose: print(\"> \" +", "nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr = %.2f to", "verbose: print(\"Successfully read in the Stokes spectra.\") return data def", "RMSF. weightType (str): Can be \"variance\" or \"uniform\" \"variance\" --", "ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o',", "dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b',", "intensity in each channel. Kwargs: polyOrd (int): Order of polynomial", "flux # Calculate the theoretical noise in the FDF !!Old", "np import matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth from", "np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units)", "qArr = qArr, uArr = uArr, dqArr = dqArr, duArr", "RM spectrum (approximately equivalent to -v flag output) _RMsynth.json: dictionary", "file. nBits (int): number of bits to store the data", "THE USE OR OTHER # # DEALINGS IN THE SOFTWARE.", "= lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr =", "dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]", "rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g ' % (mDict[\"freq0_Hz\"]/1e9))", "parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show the plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\",", "= rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>') ax.plot(freqArr_Hz/1e9,", "!= 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity", "list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save the measurements to a \"key=value\" text", "plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\",", "and <Q,U> spectra\") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength", "-s NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10, help=\"number of samples", "dirty FDF by Ifreq0 to recover the PI freq0_Hz =", "lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x) for x in", "help=\"weighting [inverse variance] or 'uniform' (all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int,", "phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose:", "and to permit persons to whom the # # Software", "Error in Stokes U intensity in each channel. or [freq_Hz,", "dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth range fwhmRMSF_radm2", "= nBits, showPlots = args.showPlots, debug = args.debug, verbose =", "channel. Kwargs: polyOrd (int): Order of polynomial to fit to", "mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"] = units if", "obtaining a # # copy of this software and associated", "and polarization data as either: [freq_Hz, I, Q, U, dI,", "log('-'*80) log('RESULTS:\\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol", "# # # #=============================================================================# import sys import os import time", "channel. dQ (array_like): Error in Stokes Q intensity in each", "tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show() #add array dictionary aDict", "in each channel. u (array_like): Fractional Stokes U intensity (U/I)", "data-file. Format=space-delimited, comments=\"#\". if verbose: print(\"Reading the data file '%s':\"", "%s\" % outFile) FH = open(outFile, \"w\") for k, v", "RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,", "or all 1s (uniform) if weightType==\"variance\": weightArr = 1.0 /", "debug=False): \"\"\" Read the I, Q & U data from", "file should the following columns, in a space separated format:", "If Stokes I is present, this will be [freq_Hz, I,", "(rad/m^2). dPhi_radm2 (float): Faraday depth channel size (rad/m^2). nSamples (float):", "each channel. U (array_like): Stokes U intensity in each channel.", "* m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 =", "verbose: print(\"> Trying [freq_Hz, q, u, dq, du]\", end=' ')", "IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,", "intensity in each channel. dU (array_like): Error in Stokes U", "with a polynomial and the resulting model used to create", "\"_RMsynth.json\" if verbose: print(\"> %s\" % outFile) json.dump(dict(outdict), open(outFile, \"w\"))", "dUArr, polyOrd = polyOrd, verbose = True, debug = debug)", "dest=\"polyOrd\", type=int, default=2, help=\"polynomial order to fit to I spectrum", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # #", "IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT", "Plot the data and the Stokes I model fit if", "data in use.\") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) #", "# # Permission is hereby granted, free of charge, to", "work on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget() #", "ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF =", "# Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File does not exist:", "prefixOut, verbose): # Save the dirty FDF, RMSF and weight", "Order of polynomial to fit to Stokes I spectrum. phiMax_radm2", "parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting [inverse variance] or 'uniform' (all 1s).\")", "import MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig", "dest=\"nSamples\", type=float, default=10, help=\"number of samples across the RMSF lobe", "help=\"turn on debugging messages & plots [False].\") parser.add_argument(\"-U\", dest=\"units\", type=str,", "Outputs with -S flag: _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q,", "the command line. \"\"\" # Help string to be shown", "try: if verbose: print(\"> Trying [freq_Hz, I, Q, U, dI,", "lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits = nBits,", "RM-synthesis on Stokes I, Q and U spectra (1D) stored", "depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel size (rad/m^2). nSamples", "weightType = args.weightType, fitRMSF = args.fitRMSF, noStokesI = args.noStokesI, nBits", "in fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] =", "phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2,", "= RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"], fig =", "% args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default data", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "data provided? phiNoise_radm2 (float): ???? nBits (int): Precision of floating", "if verbose: log(\"...failed.\") if debug: log(traceback.format_exc()) sys.exit() if verbose: log(\"Successfully", "verbose: print(\"Reading the data file '%s':\" % dataFile) # freq_Hz,", "# freq_Hz, q, u, dq, du try: if verbose: print(\">", "from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian from RMutils.util_misc", "of each channel in Hz. I (array_like): Stokes I intensity", "0 = %.4g (+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak FD", "Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel size (rad/m^2).", "messages & plots [False].\") parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity units", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "MIT License (MIT) # # # # Copyright (c) 2015", "print(\"...failed.\") if debug: print(traceback.format_exc()) sys.exit() if verbose: print(\"Successfully read in", "in each channel. U (array_like): Stokes U intensity in each", "if debug: tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"],", "an ASCII file. The Stokes I spectrum is first fit", "mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging plots for spectral", "toolbar (does not work on Mac OS X) # try:", "= IArr, qArr = qArr, uArr = uArr, dIArr =", "and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8))", "= dict() aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"] =", "plotting enabled if showPlots: plt.show() elif saveOutput or debug: if", "to -v flag output) _RMsynth.json: dictionary of derived parameters for", "Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2 =", "= np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2)", "= 1.0 / np.power(dQUArr, 2.0) else: weightType = \"uniform\" weightArr", "log() log('-'*80) # Plot the RM Spread Function and dirty", "verbose: print(\"...failed.\") if debug: print(traceback.format_exc()) sys.exit() if verbose: print(\"Successfully read", "from the ASCII file. Inputs: datafile (str): relative or absolute", "dtComplex = \"complex\" + str(2*nBits) # Output prefix is derived", "present, create a dummy spectrum = unity if noStokesI: if", "unity if noStokesI: if verbose: log(\"Warn: no Stokes I data", "dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially,", "derived parameters for RM spectrum (approximately equivalent to -v flag", "- xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS", "to perform RM-synthesis if called from the command line. \"\"\"", "%.4g (+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise = %.4g", "% (k, v)) FH.close() outFile = prefixOut + \"_RMsynth.json\" if", "file. The Stokes I spectrum is first fit with a", "on debugging messages & plots? verbose (bool): Verbosity. log (function):", "across the RMSF. weightType (str): Can be \"variance\" or \"uniform\"", "\"float\" + str(nBits) dtComplex = \"complex\" + str(2*nBits) # Output", "= toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"] =", "\\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr,", "= phiArr_radm2, weightArr = weightArr, nBits = nBits, verbose =", "RMutils.util_misc import poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk import", "showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run RM synthesis on", "= phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth, lamSqArr_m2 =", "(rad/m^2). nSamples (float): Number of samples across the RMSF. weightType", "q, u, dq, du]\", end=' ') (freqArr_Hz, QArr, UArr, dQArr,", "lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x) for", "channel [Auto].\\n(overrides -s NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10, help=\"number", "model has low signal-to-noise.\") #Add information on nature of channels:", "# Default data types dtFloat = \"float\" + str(nBits) dtComplex", "Angle 0 = %.4g (+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"])) log('Peak", "import os import time import traceback import json import math", "\"float\" + str(nBits) dtComplex = \"complex\" + str(2*nBits) # freq_Hz,", "log() log('-'*80) log('RESULTS:\\n') log('FWHM RMSF = %.4g rad/m^2' % (mDict[\"fwhmRMSF\"]))", "Q/I and u = U/I spectra. The ASCII file should", "= freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose: # Print the results", "channels: good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure", "0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = -", "polarization data as either: [freq_Hz, I, Q, U, dI, dQ,", "NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS", "RMutils.util_RM import get_rmsf_planes from RMutils.util_RM import measure_FDF_parms from RMutils.util_RM import", "dPhi_radm2 / 2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 /", "[False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn on debugging messages & plots", "lambdaSqRange_m2 if dPhi_radm2 is None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples", "Multiply the dirty FDF by Ifreq0 to recover the PI", "\\ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime =", "# Measure the parameters of the dirty FDF # Use", "dtype=dtFloat) if verbose: log(\"Weight type is '%s'.\" % weightType) startTime", "FH.close() outFile = prefixOut + \"_RMsynth.json\" if verbose: print(\"> %s\"", "from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc", "print(\"File does not exist: '%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext", "np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8))", "= 2.997924538e8 # Speed of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data,", "& errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit a Gaussian to the", "will no longer run with Python 2! Please use Python", "variance] or 'uniform' (all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2, help=\"polynomial", "if verbose: log(\"... success.\") noStokesI = True except Exception: if", "lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"]", "the RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute max Faraday", "noStokesI (bool: Is Stokes I data provided? phiNoise_radm2 (float): ????", "units) # Use the custom navigation toolbar (does not work", "= + (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2,", "uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF", "U_err] OR [freq_Hz, Q, U, Q_err, U_err] To get outputs,", "log(\"Plotting the input data and spectral index fit.\") freqHirArr_Hz =", "nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run RM synthesis", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # #", "in each channel. dQ (array_like): Error in Stokes Q intensity", "= \"\"\" Run RM-synthesis on Stokes I, Q and U", "is first fit with a polynomial and the resulting model", "channel. dU (array_like): Error in Stokes U intensity in each", "Use the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() #", "= os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0]) # Set the floating", "create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr = QArr, UArr", "is derived from the input file name # Read the", "') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = \\", "U, dI, dQ, dU], else [freq_Hz, q, u, dq, du].", "10 FWHM # Faraday depth sampling. Zero always centred on", "data def saveOutput(outdict, arrdict, prefixOut, verbose): # Save the dirty", "arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut + \"_RMSF.dat\" if verbose: print(\">", "= freqArr_Hz / 1e9 dQUArr = (dQArr + dUArr)/2.0 #", "debug = debug) # Plot the data and the Stokes", "on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU", "RM synthesis. \"\"\" # Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File", "option descStr = \"\"\" Run RM-synthesis on Stokes I, Q", "QArr, UArr, dIArr, dQArr, dUArr) = data if verbose: log(\"...", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #", "= weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double =", "verbose, log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS", "derived parameters for RM spectrum _weight.dat: Calculated channel weights [freq_Hz,", "as either: [freq_Hz, I, Q, U, dI, dQ, dU] freq_Hz", "freq_Hz (array_like): Frequency of each channel in Hz. I (array_like):", "Software without restriction, including without limitation # # the rights", "checks if not os.path.exists(args.dataFile[0]): print(\"File does not exist: '%s'.\" %", "readFile(dataFile, nBits, verbose=True, debug=False): \"\"\" Read the I, Q &", "dUArr] noStokesI = True except Exception: if verbose: print(\"...failed.\") if", "= %.2f to %.2f by %.2f (%d chans).\" % (phiArr_radm2[0],", "32 if args.bit64: nBits = 64 verbose=args.verbose data = readFile(args.dataFile[0],nBits,", "Number of samples across the RMSF. weightType (str): Can be", "_RMsynth.json: dictionary of derived parameters for RM spectrum _weight.dat: Calculated", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "= args.debug, verbose = verbose, units = args.units, prefixOut =", "each channel. u (array_like): Fractional Stokes U intensity (U/I) in", "values!\") elif fitDict[\"fitStatus\"] >= 64: log(\"Caution: Stokes I model has", "nBits (int): Precision of floating point numbers. showPlots (bool): Show", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "else: weightType = \"uniform\" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose:", "for RM spectrum (approximately equivalent to -v flag output) _RMsynth.json:", "THE SOFTWARE. # # # #=============================================================================# import sys import os", "dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 =", "\"variance\" or \"uniform\" \"variance\" -- Weight by uncertainty in Q", "files if verbose: print(\"Saving the dirty FDF, RMSF weight arrays", "= phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2 aDict[\"RMSFArr\"] = RMSFArr aDict[\"freqArr_Hz\"] =", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "noise to calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr", "Save the measurements to a \"key=value\" text file outFile =", "fig = fdfFig, units = units) # Use the custom", "measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr = uArr, dqArr", "a dummy spectrum = unity if noStokesI: if verbose: log(\"Warn:", "verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile", "plt.show() elif saveOutput or debug: if verbose: print(\"Saving RMSF and", "m import numpy as np import matplotlib.pyplot as plt from", "startTime = time.time() # Perform RM-synthesis on the spectrum dirtyFDF,", "= dFDFth, lamSqArr_m2 = lambdaSqArr_m2, lam0Sq = lam0Sq_m2) mDict[\"Ifreq0\"] =", "(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data if", "dQ, dU]\", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr,", "def saveOutput(outdict, arrdict, prefixOut, verbose): # Save the dirty FDF,", "to a \"key=value\" text file outFile = prefixOut + \"_RMsynth.dat\"", "resulting model used to create fractional q = Q/I and", "copyright notice and this permission notice shall be included in", ") # Measure the parameters of the dirty FDF #", "the Faraday depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) /", "* np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure the parameters of", "in Stokes U intensity in each channel. or [freq_Hz, q,", "longer run with Python 2! Please use Python 3.') exit()", "spectra\") # rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2", "outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show() #add array dictionary", "+ 1.0) startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0", "# # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "of Faraday depth channel [Auto].\\n(overrides -s NSAMPLES flag)\") parser.add_argument(\"-s\", dest=\"nSamples\",", "phiNoise_radm2 (float): ???? nBits (int): Precision of floating point numbers.", "channel. u (array_like): Fractional Stokes U intensity (U/I) in each", "- startTime) if verbose: log(\"> RM-synthesis completed in %.2f seconds.\"", "Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2", "restriction, including without limitation # # the rights to use,", "% cputime) # Determine the Stokes I value at lam0Sq_m2", "FD = %.4g (+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz =", "limitation # # the rights to use, copy, modify, merge,", "dU try: if verbose: log(\"> Trying [freq_Hz, I, Q, U,", "be [freq_Hz, I, Q, U, dI, dQ, dU], else [freq_Hz,", "prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0]) # Set", "lam0Sq_m2 from the Stokes I model # Multiply the dirty", "debugging messages & plots? verbose (bool): Verbosity. log (function): Which", "freqArr_Hz / 1e9 dQUArr = (dQArr + dUArr)/2.0 # Fit", "units = units) # Use the custom navigation toolbar (does", "% (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' %", "= plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k',", "Stokes I model has low signal-to-noise.\") #Add information on nature", "free of charge, to any person obtaining a # #", "fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM #", "nBits = nBits, showPlots = args.showPlots, debug = args.debug, verbose", "lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2 =", "dest=\"dPhi_radm2\", type=float, default=None, help=\"width of Faraday depth channel [Auto].\\n(overrides -s", "= C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0)", "using the -h option descStr = \"\"\" Run RM-synthesis on", "du]\", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = \\", "parser.add_argument(\"-d\", dest=\"dPhi_radm2\", type=float, default=None, help=\"width of Faraday depth channel [Auto].\\n(overrides", "from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5 from RMutils.util_misc", "weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime = (endTime - startTime)", "I intensity in each channel. dQ (array_like): Error in Stokes", "') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data", "outFilePlot = prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\"> \" +", "value at lam0Sq_m2 from the Stokes I model # Multiply", "mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging", "log('-'*80) # Plot the RM Spread Function and dirty FDF", "enabled if showPlots: plt.show() elif saveOutput or debug: if verbose:", "outFile = prefixOut + \"_RMSF.dat\" if verbose: print(\"> %s\" %", "spectra.\") return data def saveOutput(outdict, arrdict, prefixOut, verbose): # Save", "json import math as m import numpy as np import", "output by RM synthesis. \"\"\" # Sanity checks if not", "log(\"...failed.\") # freq_Hz, q, u, dq, du try: if verbose:", "= UArr, dIArr = dIArr, dQArr = dQArr, dUArr =", "print(\"Successfully read in the Stokes spectra.\") return data def saveOutput(outdict,", "toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except", "dUArr) = data if verbose: log(\"... success.\") noStokesI = True", "+ \"_RMsynth.json\" if verbose: print(\"> %s\" % outFile) json.dump(dict(outdict), open(outFile,", "dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) #", "should the following columns, in a space separated format: [freq_Hz,", "= np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday depth", "to recover the PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0", "verbose: print(\"> \" + outFilePlot) fdfFig.savefig(outFilePlot, bbox_inches = 'tight') #", "navigation toolbar (does not work on Mac OS X) #", "mDict[\"units\"] = units if fitDict[\"fitStatus\"] >= 128: log(\"WARNING: Stokes I", "toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"] = units if fitDict[\"fitStatus\"] >=", "subject to the following conditions: # # # # The", "(%d chans).\" % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate the", "k, v in outdict.items(): FH.write(\"%s=%s\\n\" % (k, v)) FH.close() outFile", "polyOrd = args.polyOrd, phiMax_radm2 = args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples", "in the Stokes spectra.\") return data def saveOutput(outdict, arrdict, prefixOut,", "= ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2", "the complexity of the q and u spectra mDict[\"fracPol\"] =", "debug (bool): Print full traceback in case of failure? Returns:", "to permit persons to whom the # # Software is", "a Gaussian to the RMSF? noStokesI (bool: Is Stokes I", "Stokes U intensity in each channel. Kwargs: polyOrd (int): Order", "I spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use 64-bit floating point", "8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms", "U, dI, dQ, dU try: if verbose: print(\"> Trying [freq_Hz,", "RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import", "on the FDF in 'key=val' and JSON formats.\") print(\"> %s\"", "The ASCII file should the following columns, in a space", "verbose = verbose, units = args.units, prefixOut = prefixOut, args", "# Calculate the weighting as 1/sigma^2 or all 1s (uniform)", "print(\"Saving the measurements on the FDF in 'key=val' and JSON", "%.4g %s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms) = %.4g %s'", "2.0) else: weightType = \"uniform\" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if", "Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File does not exist: '%s'.\"", "# rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 =", "with Python 2! Please use Python 3.') exit() C =", "Read the data-file. Format=space-delimited, comments=\"#\". if verbose: print(\"Reading the data", "phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False,", "the weighting as 1/sigma^2 or all 1s (uniform) if weightType==\"variance\":", "if verbose: print(\"Saving the measurements on the FDF in 'key=val'", "Q, U] _RMsynth.dat: list of derived parameters for RM spectrum", "in each channel. dI (array_like): Error in Stokes I intensity", "1s (uniform) if weightType==\"variance\": weightArr = 1.0 / np.power(dQUArr, 2.0)", "= (dQArr + dUArr)/2.0 # Fit the Stokes I spectrum", "the measurements to a \"key=value\" text file outFile = prefixOut", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "spectra.\") # If no Stokes I present, create a dummy", "= prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\"> \" + outFilePlot)", "# Run RM-synthesis on the spectra mDict, aDict = run_rmsynth(data", "each channel. dQ (array_like): Error in Stokes Q intensity in", "good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0] mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the", "I (array_like): Stokes I intensity in each channel. Q (array_like):", "= qArr, uArr = uArr, dqArr = dqArr, duArr =", "# FDF is in fracpol units initially, convert back to", "to calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF, phiArr =", "arrdict[\"weightArr\"]))) # Save the measurements to a \"key=value\" text file", "RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools will no", "flag)\") parser.add_argument(\"-s\", dest=\"nSamples\", type=float, default=10, help=\"number of samples across the", "# # Copyright (c) 2015 - 2018 <NAME> # #", "(k, v)) FH.close() outFile = prefixOut + \"_RMsynth.json\" if verbose:", "spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float):", "FDF is in fracpol units initially, convert back to flux", "units of the data. [Jy/beam]\") args = parser.parse_args() # Sanity", "lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr),", "RM Spread Function and dirty FDF if showPlots or saveOutput:", "verbose: print(\"> Trying [freq_Hz, I, Q, U, dI, dQ, dU]\",", "mDict, aDict def readFile(dataFile, nBits, verbose=True, debug=False): \"\"\" Read the", "to the RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute max", "np.power(dQUArr, 2.0) else: weightType = \"uniform\" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)", "spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use 64-bit floating point precision", "each channel. or [freq_Hz, q, u, dq, du] freq_Hz (array_like):", "fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2,", "None: dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None:", "measure_FDF_parms from RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from", "arrays and plots [False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn on debugging", "2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if", "polynomial and the resulting model used to create fractional q", "[freq_Hz, Q, U, Q_err, U_err] To get outputs, one or", "/ (np.sum(weightArr))**2 ) # Measure the parameters of the dirty", "to 10 FWHM # Faraday depth sampling. Zero always centred", "if verbose: log(\"PhiArr = %.2f to %.2f by %.2f (%d", "channel weights [freq_Hz, weight] \"\"\" # Parse the command line", "(+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g ' %", "if verbose: # Print the results to the screen log()", "of the following flags must be set: -S, -p, -v.", "if plotting enabled if showPlots: plt.show() elif saveOutput or debug:", "#=============================================================================# import sys import os import time import traceback import", "not work on Mac OS X) # try: # specFig.canvas.toolbar.pack_forget()", "= poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF is in fracpol", "\",\".join([str(x) for x in fitDict[\"p\"]]) mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] =", "documentation files (the \"Software\"), # # to deal in the", "np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr =", "except Exception: if verbose: log(\"...failed.\") if debug: log(traceback.format_exc()) sys.exit() if", "= log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#", "1s) fitRMSF (bool): Fit a Gaussian to the RMSF? noStokesI", "if verbose: print(\"...failed.\") if debug: print(traceback.format_exc()) sys.exit() if verbose: print(\"Successfully", "& U spectrum.# # # # MODIFIED: 16-Nov-2018 by <NAME>", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # #", "verbose messages to terminal? debug (bool): Print full traceback in", "[m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False,", "# Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr,", "dqArr = dqArr, duArr = duArr, freqHirArr_Hz = freqHirArr_Hz, IModArr", "np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 ) # Measure the parameters", "= units) # Use the custom navigation toolbar (does not", "verbose: # Print the results to the screen log() log('-'*80)", "weightType) startTime = time.time() # Perform RM-synthesis on the spectrum", "qArr, uArr = uArr, dIArr = dIArr, dqArr = dqArr,", "= prefixOut + \"_RMsynth.json\" if verbose: print(\"> %s\" % outFile)", "saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF =", "FH = open(outFile, \"w\") for k, v in outdict.items(): FH.write(\"%s=%s\\n\"", "bbox_inches = 'tight') # #if verbose: print \"Press <RETURN> to", "FDF in 'key=val' and JSON formats.\") print(\"> %s\" % outFile)", "64-bit floating point precision [False (uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\",", "(bool: Is Stokes I data provided? phiNoise_radm2 (float): ???? nBits", "cputime) # Determine the Stokes I value at lam0Sq_m2 from", "Returns: data (list of arrays): List containing the columns found", "(GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise in Stokes Q, U and", "action=\"store_true\", help=\"use 64-bit floating point precision [False (uses 32-bit)]\") parser.add_argument(\"-p\",", "# # # PURPOSE: API for runnning RM-synthesis on an", "of the dirty FDF # Use the theoretical noise to", "import toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5", "*= (Ifreq0) # FDF is in fracpol units initially, convert", "1.0) startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2", "from the command line. \"\"\" # Help string to be", "to file. nBits (int): number of bits to store the", "a space separated format: [freq_Hz, I, Q, U, I_err, Q_err,", "if weightType==\"variance\": weightArr = 1.0 / np.power(dQUArr, 2.0) else: weightType", "not plt.isinteractive(): # specFig.show() # DEBUG (plot the Q, U", "% (mDict[\"dFDFrms\"],units)) log('FDF SNR = %.4g ' % (mDict[\"snrPIfit\"])) log('sigma_add(q)", "np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 =", "= %.4g %s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms) = %.4g", "outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut + \"_RMSF.dat\"", "Zero always centred on middle channel nChanRM = int(round(abs((phiMax_radm2 -", "readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run RM-synthesis on the spectra mDict,", "%.4g ' % (mDict[\"freq0_Hz\"]/1e9)) log('I freq0 = %.4g %s' %", "UArr, dIArr, dQArr, dUArr] except Exception: if verbose: print(\"...failed.\") #", "DEALINGS IN THE SOFTWARE. # # # #=============================================================================# import sys", "results to the screen log() log('-'*80) log('RESULTS:\\n') log('FWHM RMSF =", "arrdict[\"RMSFArr\"].imag))) outFile = prefixOut + \"_weight.dat\" if verbose: print(\"> %s\"", "help=\"show the plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output [False].\")", "verbose: print(\"... success.\") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI =", "Use the theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF", "\"\"\" # Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File does not", "THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "= freqArr_GHz, IArr = IArr, QArr = QArr, UArr =", "U intensity in each channel. or [freq_Hz, q, u, dq,", "middle channel nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) *", "\"\"\" Run RM-synthesis on Stokes I, Q and U spectra", "mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"]", "= ~np.isfinite(qArr), lam0Sq_m2 = lam0Sq_m2, double = True, fitRMSF =", "[Jy/beam]\") args = parser.parse_args() # Sanity checks if not os.path.exists(args.dataFile[0]):", "if verbose: print(\"Reading the data file '%s':\" % dataFile) #", "navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) #", "NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR", "% (mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected MAD) = %.4g %s' %", "ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5, label='rms <QU>')", "weight arrays to ASCII files.\") outFile = prefixOut + \"_FDFdirty.dat\"", "_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U] _RMSF.dat: Computed RMSF", "import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools will no longer", "= \"complex\" + str(2*nBits) # Output prefix is derived from", "I data in use.\") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr)", "Permission is hereby granted, free of charge, to any person", "???? nBits (int): Precision of floating point numbers. showPlots (bool):", "Q intensity in each channel. U (array_like): Stokes U intensity", "toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr))", "FDF # Use the theoretical noise to calculate uncertainties mDict", "default=\"variance\", help=\"weighting [inverse variance] or 'uniform' (all 1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\",", "dI, dQ, dU] freq_Hz (array_like): Frequency of each channel in", "(endTime - startTime) if verbose: log(\"> RM-synthesis completed in %.2f", ") if args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------# if __name__", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # #", "for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth =", "verbose: print(\"> %s\" % outFile) json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------# def", "1s).\") parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2, help=\"polynomial order to fit to", "spectra. The ASCII file should the following columns, in a", "dQArr, dUArr) = data if verbose: log(\"... success.\") noStokesI =", "help=\"Intensity units of the data. [Jy/beam]\") args = parser.parse_args() #", "intensity (Q/I) in each channel. u (array_like): Fractional Stokes U", "spectral index fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr =", "sampling. Zero always centred on middle channel nChanRM = int(round(abs((phiMax_radm2", "(dict): Summary of RM synthesis results. aDict (dict): Data output", "Stokes Q intensity in each channel. du (array_like): Error in", "debug plots:\") outFilePlot = prefixOut + \".debug-plots.pdf\" if verbose: print(\">", "u, dq, du]\", end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr)", "must be set: -S, -p, -v. \"\"\" epilog_text=\"\"\" Outputs with", "IModHirArr, fig = specFig, units = units) # Use the", "argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file containing Stokes spectra", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "FDF, RMSF weight arrays to ASCII files.\") outFile = prefixOut", "dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) )", "Read the I, Q & U data from the ASCII", "verbose = True, debug = debug) # Plot the data", "array to ASCII files if verbose: print(\"Saving the dirty FDF,", "\"uniform\" \"variance\" -- Weight by uncertainty in Q and U.", "channel. du (array_like): Error in fractional Stokes U intensity in", "plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from", "Stokes I model fit if verbose: log(\"Plotting the input data", "ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT", "Turn on debugging messages & plots? verbose (bool): Verbosity. log", "measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import nanmedian from", "(array_like): Stokes I intensity in each channel. Q (array_like): Stokes", "by RM synthesis. \"\"\" # Sanity checks if not os.path.exists(args.dataFile[0]):", "RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) # Debugging plots for spectral complexity", "2! Please use Python 3.') exit() C = 2.997924538e8 #", "dQ, dU try: if verbose: print(\"> Trying [freq_Hz, I, Q,", "channel. or [freq_Hz, q, u, dq, du] freq_Hz (array_like): Frequency", "the Stokes I model # Multiply the dirty FDF by", "= np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz =", "xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05)", "#=============================================================================# # # # The MIT License (MIT) # #", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED,", "Error in Stokes Q intensity in each channel. dU (array_like):", "= \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz,", "verbose: log(\"...failed.\") # freq_Hz, q, u, dq, du try: if", "the plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output [False].\") parser.add_argument(\"-S\",", "relative or absolute path to file. nBits (int): number of", "verbose=True, debug=False): \"\"\" Read the I, Q & U data", "du]. \"\"\" # Default data types dtFloat = \"float\" +", "dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict)", "Exception: # pass # Display the figure # if not", "persons to whom the # # Software is furnished to", "plots:\") outFilePlot = prefixOut + \".debug-plots.pdf\" if verbose: print(\"> \"", "to the screen log() log('-'*80) log('RESULTS:\\n') log('FWHM RMSF = %.4g", "Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr", "Q intensity (Q/I) in each channel. u (array_like): Fractional Stokes", "dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) # Set the Faraday", "SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "so, subject to the following conditions: # # # #", "as m import numpy as np import matplotlib.pyplot as plt", "U] _RMsynth.dat: list of derived parameters for RM spectrum (approximately", "_RMSF.dat: Computed RMSF [Phi, Q, U] _RMsynth.dat: list of derived", "plt.isinteractive(): # specFig.show() # DEBUG (plot the Q, U and", "%.2f (%d chans).\" % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM)) # Calculate", "in the Stokes spectra.\") # If no Stokes I present,", "QArr, UArr, dQArr, dUArr) = data if verbose: log(\"... success.\")", "separated format: [freq_Hz, I, Q, U, I_err, Q_err, U_err] OR", "dtFloat = \"float\" + str(nBits) dtComplex = \"complex\" + str(2*nBits)", "= U/I spectra. The ASCII file should the following columns,", "64: log(\"Caution: Stokes I model has low signal-to-noise.\") #Add information", "SNR = %.4g ' % (mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g (+%.4g,", "PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I,", "hereby granted, free of charge, to any person obtaining a", "weightArr = 1.0 / np.power(dQUArr, 2.0) else: weightType = \"uniform\"", "True, fitRMSF = fitRMSF, fitRMSFreal = False, nBits = nBits,", "fitRMSFreal = False, nBits = nBits, verbose = verbose, log", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "print('RM-tools will no longer run with Python 2! Please use", "type=int, default=2, help=\"polynomial order to fit to I spectrum [2].\")", "prefixOut=\"prefixOut\", args=None): \"\"\"Run RM synthesis on 1D data. Args: data", "# all copies or substantial portions of the Software. #", "dU try: if verbose: print(\"> Trying [freq_Hz, I, Q, U,", "dest=\"fitRMSF\", action=\"store_true\", help=\"fit a Gaussian to the RMSF [False]\") parser.add_argument(\"-l\",", "U] _RMSF.dat: Computed RMSF [Phi, Q, U] _RMsynth.dat: list of", "mDict[\"IfitStat\"] = fitDict[\"fitStatus\"] mDict[\"IfitChiSqRed\"] = fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"]", "depth range fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if", "synthesis results. aDict (dict): Data output by RM synthesis. \"\"\"", "QArr, UArr, dIArr, dQArr, dUArr] except Exception: if verbose: print(\"...failed.\")", "use Python 3.') exit() C = 2.997924538e8 # Speed of", "Stokes I model contains negative values!\") elif fitDict[\"fitStatus\"] >= 64:", "dest=\"debug\", action=\"store_true\", help=\"turn on debugging messages & plots [False].\") parser.add_argument(\"-U\",", "Format=space-delimited, comments=\"#\". if verbose: print(\"Reading the data file '%s':\" %", "freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *=", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.", "RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig,", "parser.parse_args() # Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File does not", "all 1s (uniform) if weightType==\"variance\": weightArr = 1.0 / np.power(dQUArr,", "dirty FDF, RMSF and weight array to ASCII files if", "the data file '%s':\" % dataFile) # freq_Hz, I, Q,", "tmpFig = plot_complexity_fig(xArr=pD[\"xArrQ\"], qArr=pD[\"yArrQ\"], dqArr=pD[\"dyArrQ\"], sigmaAddqArr=pD[\"sigmaAddArrQ\"], chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"],", "publish, distribute, sublicense, # # and/or sell copies of the", "elif fitDict[\"fitStatus\"] >= 64: log(\"Caution: Stokes I model has low", "%.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g", "= Q/I and u = U/I spectra. The ASCII file", "the Stokes I spectrum [False].\") parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use 64-bit", "the ASCII file. Inputs: datafile (str): relative or absolute path", "% weightType) startTime = time.time() # Perform RM-synthesis on the", "#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \\ # do_rmsynth(qArr, uArr,", "fractional Stokes Q intensity in each channel. du (array_like): Error", "dictionary aDict = dict() aDict[\"phiArr_radm2\"] = phiArr_radm2 aDict[\"phi2Arr_radm2\"] = phi2Arr_radm2", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "Measure the parameters of the dirty FDF # Use the", "channel in Hz. I (array_like): Stokes I intensity in each", "by <NAME> # # # #=============================================================================# # # # The", "all copies or substantial portions of the Software. # #", "Calculate the theoretical noise in the FDF !!Old formula only", "pass # Display the figure # fdfFig.show() # Pause if", "weightType = \"uniform\" weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat) if verbose: log(\"Weight", "args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default data types", "FWHM # Faraday depth sampling. Zero always centred on middle", "of the q and u spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD,", "debugging messages & plots [False].\") parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity", "I model # Multiply the dirty FDF by Ifreq0 to", "data and the Stokes I model fit if verbose: log(\"Plotting", "# # # NAME: do_RMsynth_1D.py # # # # PURPOSE:", "Software is furnished to do so, subject to the following", "polyOrd (int): Order of polynomial to fit to Stokes I", "the dirty FDF by Ifreq0 to recover the PI freq0_Hz", "Stokes I data provided? phiNoise_radm2 (float): ???? nBits (int): Precision", "associated documentation files (the \"Software\"), # # to deal in", "dirty FDF, RMSF weight arrays to ASCII files.\") outFile =", "verbose: log(\"... success.\") noStokesI = True except Exception: if verbose:", "'%s':\" % dataFile) # freq_Hz, I, Q, U, dI, dQ,", "success.\") noStokesI = True except Exception: if verbose: log(\"...failed.\") if", "type=str, default=\"Jy/beam\", help=\"Intensity units of the data. [Jy/beam]\") args =", "plot:\") outFilePlot = prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\"> \"", "duArr = duArr, fracPol = mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2", "arrays): List containing the columns found in the file. If", "phiArr_radm2 = phiArr_radm2.astype(dtFloat) if verbose: log(\"PhiArr = %.2f to %.2f", "= 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 if dPhi_radm2 is None:", "OR OTHERWISE, ARISING # # FROM, OUT OF OR IN", "-v. \"\"\" epilog_text=\"\"\" Outputs with -S flag: _FDFdirty.dat: Dirty FDF/RM", "2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2", "data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) # Run RM-synthesis on the", "data if verbose: log(\"... success.\") except Exception: if verbose: log(\"...failed.\")", "# fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass", "<NAME> # # MODIFIED: 23-October-2019 by <NAME> # # #", "ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN", "to do so, subject to the following conditions: # #", "= dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF =", "= freqArr_Hz, qArr = qArr, uArr = uArr, dqArr =", "data and spectral index fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)", "MAD from RMutils.util_plotTk import plot_Ipqu_spectra_fig from RMutils.util_plotTk import plot_rmsf_fdf_fig from", "Faraday depth sampling. Zero always centred on middle channel nChanRM", "(nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)", "= log) # Calculate the Rotation Measure Spread Function RMSFArr,", "qArr, uArr, dqArr, duArr, fitDict = \\ create_frac_spectra(freqArr = freqArr_GHz,", "def main(): import argparse \"\"\" Start the function to perform", "and spectral index fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr", "[freq_Hz, I, Q, U, dI, dQ, dU]\", end=' ') (freqArr_Hz,", "RMSFArr], lam0Sq_m2, fwhmRMSF = \\ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2,", "end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) =", "copies or substantial portions of the Software. # # #", "intensity in each channel. du (array_like): Error in fractional Stokes", "(int): Order of polynomial to fit to Stokes I spectrum.", "print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile =", "default=\"Jy/beam\", help=\"Intensity units of the data. [Jy/beam]\") args = parser.parse_args()", "= mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr =", "# # # # PURPOSE: API for runnning RM-synthesis on", "-- Weight uniformly (i.e. with 1s) fitRMSF (bool): Fit a", "phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr = ~np.isfinite(qArr), lam0Sq_m2", "I, Q & U data from the ASCII file. Inputs:", "print(\"> \" + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show()", "line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII", "= max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10", "= duArr, fracPol = mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2 =", "action=\"store_true\", help=\"verbose output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save the arrays", "data. [Jy/beam]\") args = parser.parse_args() # Sanity checks if not", "weightArr) dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )", "log(\"... success.\") noStokesI = True except Exception: if verbose: log(\"...failed.\")", "dummy = os.path.split(args.dataFile[0]) # Set the floating point precision nBits", "intensity in each channel. dQ (array_like): Error in Stokes Q", "RMSF weight arrays to ASCII files.\") outFile = prefixOut +", "dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = - (nChanRM-1.0) *", "type=float, default=10, help=\"number of samples across the RMSF lobe [10].\")", "distribute, sublicense, # # and/or sell copies of the Software,", "outFile) FH = open(outFile, \"w\") for k, v in outdict.items():", "the # # Software is furnished to do so, subject", "Kwargs: polyOrd (int): Order of polynomial to fit to Stokes", "and the resulting model used to create fractional q =", "do so, subject to the following conditions: # # #", "spectrum and create the fractional spectra IModArr, qArr, uArr, dqArr,", "dI, dQ, dU], else [freq_Hz, q, u, dq, du]. \"\"\"", "log) # Calculate the Rotation Measure Spread Function RMSFArr, phi2Arr_radm2,", "prefixOut + \"_weight.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile,", "= \\ create_frac_spectra(freqArr = freqArr_GHz, IArr = IArr, QArr =", "= units) # Use the custom navigation toolbar # try:", "color='r', lw=0.5, label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 -", "# freq_Hz, q, u, dq, du try: if verbose: log(\">", "\"Software\"), # # to deal in the Software without restriction,", "= mDict[\"fracPol\"], psi0_deg = mDict[\"polAngle0Fit_deg\"], RM_radm2 = mDict[\"phiPeakPIfit_rm2\"]) mDict.update(mD) #", "= %.4g (+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"], mDict[\"dAmpPeakPIfit\"],units)) log('QU Noise =", "du] freq_Hz (array_like): Frequency of each channel in Hz. q", "# if not plt.isinteractive(): # specFig.show() # DEBUG (plot the", "Fit a Gaussian to the RMSF? noStokesI (bool: Is Stokes", "%.2f by %.2f (%d chans).\" % (phiArr_radm2[0], phiArr_radm2[-1], float(dPhi_radm2), nChanRM))", "toscalar from RMutils.util_misc import create_frac_spectra from RMutils.util_misc import poly5 from", "= args.nSamples, weightType = args.weightType, fitRMSF = args.fitRMSF, noStokesI =", "fwhmRMSF = \\ # do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------#", "[False].\") parser.add_argument(\"-U\", dest=\"units\", type=str, default=\"Jy/beam\", help=\"Intensity units of the data.", "options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file", "Exception: if verbose: print(\"...failed.\") if debug: print(traceback.format_exc()) sys.exit() if verbose:", "or absolute path to file. nBits (int): number of bits", "= toscalar(dFDFth) mDict[\"units\"] = units if fitDict[\"fitStatus\"] >= 128: log(\"WARNING:", "files.\") outFile = prefixOut + \"_FDFdirty.dat\" if verbose: print(\"> %s\"", "set: -S, -p, -v. \"\"\" epilog_text=\"\"\" Outputs with -S flag:", "dQ, dU try: if verbose: log(\"> Trying [freq_Hz, I, Q,", "spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore the Stokes I spectrum", "spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,", "IModArr = IModHirArr, fig = specFig, units = units) #", "= measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr = uArr,", "parser.add_argument(\"-b\", dest=\"bit64\", action=\"store_true\", help=\"use 64-bit floating point precision [False (uses", "rmsFig = plt.figure(figsize=(12.0, 8)) ax = rmsFig.add_subplot(111) ax.plot(freqArr_Hz/1e9, dQUArr, marker='o',", "log = log) fwhmRMSF = float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE", "fit.\") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig", "UArr = UArr, dIArr = dIArr, dQArr = dQArr, dUArr", "plt from RMutils.util_RM import do_rmsynth from RMutils.util_RM import do_rmsynth_planes from", "if showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0, 8)) plot_rmsf_fdf_fig(phiArr =", "use. units (str): Units of data. Returns: mDict (dict): Summary", "freqHirArr_Hz, IModArr = IModHirArr, fig = specFig, units = units)", "mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g ' % (mDict[\"freq0_Hz\"]/1e9)) log('I freq0 =", "dQ, dU] freq_Hz (array_like): Frequency of each channel in Hz.", "%s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save the measurements", "# Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ", "channel. Q (array_like): Stokes Q intensity in each channel. U", "if args.bit64: nBits = 64 verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose,", "qArr = qArr, uArr = uArr, dIArr = dIArr, dqArr", "nSamples (float): Number of samples across the RMSF. weightType (str):", "spectra & errors.\") parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit a Gaussian to", "to any person obtaining a # # copy of this", "RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc import", "%.4g rad/m^2' % (mDict[\"fwhmRMSF\"])) log('Pol Angle = %.4g (+/-%.4g) deg'", "if verbose: print(\"...failed.\") # freq_Hz, q, u, dq, du try:", "pass # Display the figure # if not plt.isinteractive(): #", "# Output prefix is derived from the input file name", "mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"]", "= toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"] = units if fitDict[\"fitStatus\"]", "%s' % (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms) = %.4g %s' %", "mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"],", "parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output [False].\") parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save", "and/or sell copies of the Software, and to permit persons", "% (mDict[\"fwhmRMSF\"])) log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"],", "\"_RMsynth.dat\" if verbose: print(\"Saving the measurements on the FDF in", "for runnning RM-synthesis on an ASCII Stokes I, Q &", "Frequency of each channel in Hz. q (array_like): Fractional Stokes", "print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"], arrdict[\"weightArr\"]))) # Save the", "Help string to be shown using the -h option descStr", "+ (nChanRM-1.0) * dPhi_radm2 / 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2,", "lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) ) dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))", "I present, create a dummy spectrum = unity if noStokesI:", "dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False,", "import traceback import json import math as m import numpy", "_RMsynth.dat: list of derived parameters for RM spectrum (approximately equivalent", "CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF", "size (rad/m^2). nSamples (float): Number of samples across the RMSF.", "Stokes spectra.\") # If no Stokes I present, create a", "chiSqRedqArr=pD[\"chiSqRedArrQ\"], probqArr=pD[\"probArrQ\"], uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput:", "Output prefix is derived from the input file name #", "v)) FH.close() outFile = prefixOut + \"_RMsynth.json\" if verbose: print(\">", "in Stokes Q, U and <Q,U> spectra\") # rmsFig.show() #-------------------------------------------------------------------------#", "in the file. If Stokes I is present, this will", "(mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"],", "U, dI, dQ, dU] freq_Hz (array_like): Frequency of each channel", "max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM", "') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = data if verbose:", "Error in fractional Stokes Q intensity in each channel. du", "%s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut", "fitRMSF, fitRMSFreal = False, nBits = nBits, verbose = verbose,", "copy of this software and associated documentation files (the \"Software\"),", "[phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \\ # do_rmsynth(qArr, uArr, lambdaSqArr_m2,", "u, dq, du try: if verbose: print(\"> Trying [freq_Hz, q,", "Measure the complexity of the q and u spectra mDict[\"fracPol\"]", "ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise in Stokes Q, U", "dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr, dataU = uArr, lambdaSqArr_m2", "be set: -S, -p, -v. \"\"\" epilog_text=\"\"\" Outputs with -S", "contains negative values!\") elif fitDict[\"fitStatus\"] >= 64: log(\"Caution: Stokes I", "unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, IArr, QArr, UArr,", "plots? verbose (bool): Verbosity. log (function): Which logging function to", "of the data. [Jy/beam]\") args = parser.parse_args() # Sanity checks", "verbose: log(\"> RM-synthesis completed in %.2f seconds.\" % cputime) #", "U_err] To get outputs, one or more of the following", "from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major == 2: print('RM-tools will", "+ \"_weight.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"freqArr_Hz\"],", "if verbose: print(\"> Trying [freq_Hz, I, Q, U, dI, dQ,", "True except Exception: if verbose: print(\"...failed.\") if debug: print(traceback.format_exc()) sys.exit()", "(mDict[\"freq0_Hz\"]/1e9)) log('I freq0 = %.4g %s' % (mDict[\"Ifreq0\"],units)) log('Peak PI", "files (the \"Software\"), # # to deal in the Software", "negative values!\") elif fitDict[\"fitStatus\"] >= 64: log(\"Caution: Stokes I model", "') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True,", "freq0 = %.4g %s' % (mDict[\"Ifreq0\"],units)) log('Peak PI = %.4g", "= %.4g %s' % (mDict[\"dQU\"],units)) log('FDF Noise (theory) = %.4g", "1.0 / np.power(dQUArr, 2.0) else: weightType = \"uniform\" weightArr =", "RMutils.util_RM import measure_qu_complexity from RMutils.util_RM import measure_fdf_complexity from RMutils.util_misc import", "= polyOrd, verbose = True, debug = debug) # Plot", "(float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth", "= phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr =", "data as. verbose (bool): Print verbose messages to terminal? debug", "action=\"store_true\", help=\"show the plots [False].\") parser.add_argument(\"-v\", dest=\"verbose\", action=\"store_true\", help=\"verbose output", "sublicense, # # and/or sell copies of the Software, and", "phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) #", "as np import matplotlib.pyplot as plt from RMutils.util_RM import do_rmsynth", "try: if verbose: log(\"> Trying [freq_Hz, I, Q, U, dI,", "# # # MODIFIED: 16-Nov-2018 by <NAME> # # MODIFIED:", "parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file containing", "dI, dQ, dU]\", end=' ') (freqArr_Hz, IArr, QArr, UArr, dIArr,", "Print full traceback in case of failure? Returns: data (list", "!!Old formula only works for wariance weights! weightArr = np.where(np.isnan(weightArr),", "fit to I spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore the", "if debug: print(traceback.format_exc()) sys.exit() if verbose: print(\"Successfully read in the", "will be [freq_Hz, I, Q, U, dI, dQ, dU], else", "RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0, 8)) ax =", "IN THE SOFTWARE. # # # #=============================================================================# import sys import", "[2].\") parser.add_argument(\"-i\", dest=\"noStokesI\", action=\"store_true\", help=\"ignore the Stokes I spectrum [False].\")", "Rotation Measure Spread Function RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \\", "\"\"\"Run RM synthesis on 1D data. Args: data (list): Contains", "mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"] = units if fitDict[\"fitStatus\"] >= 128:", "dUArr = dUArr, polyOrd = polyOrd, verbose = True, debug", "I is present, this will be [freq_Hz, I, Q, U,", "3.') exit() C = 2.997924538e8 # Speed of light [m/s]", "verbose: log(\"Successfully read in the Stokes spectra.\") # If no", "Stokes I intensity in each channel. dQ (array_like): Error in", "import numpy as np import matplotlib.pyplot as plt from RMutils.util_RM", "prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default data types dtFloat =", "Use the custom navigation toolbar (does not work on Mac", "FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER", "The MIT License (MIT) # # # # Copyright (c)", "Noise = %.4g %s' % (mDict[\"dQU\"],units)) log('FDF Noise (theory) =", "Stokes U intensity in each channel. dI (array_like): Error in", "= %.4g (+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g", "datafile (str): relative or absolute path to file. nBits (int):", "mDict, aDict = run_rmsynth(data = data, polyOrd = args.polyOrd, phiMax_radm2", "= units if fitDict[\"fitStatus\"] >= 128: log(\"WARNING: Stokes I model", "Q_err, U_err] OR [freq_Hz, Q, U, Q_err, U_err] To get", "parameters of the dirty FDF # Use the theoretical noise", "= toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x) for x in fitDict[\"p\"]]) mDict[\"IfitStat\"]", "nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc import create_frac_spectra from", "mDict[\"Ifreq0\"] = toscalar(Ifreq0) mDict[\"polyCoeffs\"] = \",\".join([str(x) for x in fitDict[\"p\"]])", "%s' % (mDict[\"dQU\"],units)) log('FDF Noise (theory) = %.4g %s' %", "read in the Stokes spectra.\") # If no Stokes I", "if verbose: print(\"Saving debug plots:\") outFilePlot = prefixOut + \".debug-plots.pdf\"", "I spectrum. phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2). dPhi_radm2", "dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, IArr, QArr, UArr, dIArr,", "lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits", "create a dummy spectrum = unity if noStokesI: if verbose:", "# Save the measurements to a \"key=value\" text file outFile", "data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr] except Exception: if", "if verbose: log(\"... success.\") except Exception: if verbose: log(\"...failed.\") #", "(mDict[\"snrPIfit\"])) log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"]))", "measurements to a \"key=value\" text file outFile = prefixOut +", "success.\") except Exception: if verbose: log(\"...failed.\") # freq_Hz, q, u,", "bbox_inches = 'tight') else: tmpFig.show() #add array dictionary aDict =", "# pass # Display the figure # if not plt.isinteractive():", "(1D) stored in an ASCII file. The Stokes I spectrum", "if saveOutput: if verbose: print(\"Saving debug plots:\") outFilePlot = prefixOut", "= poly5(fitDict[\"p\"])(freqHirArr_Hz/1e9) specFig = plt.figure(figsize=(12.0, 8)) plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr", "and U spectra (1D) stored in an ASCII file. The", "log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict[\"polAngle0Fit_deg\"], mDict[\"dPolAngle0Fit_deg\"]))", "\"Press <RETURN> to exit ...\", # input() return mDict, aDict", "arrdict, prefixOut, verbose): # Save the dirty FDF, RMSF and", "from RMutils.util_misc import poly5 from RMutils.util_misc import MAD from RMutils.util_plotTk", "Q, U, dI, dQ, dU] freq_Hz (array_like): Frequency of each", "toscalar(fwhmRMSF) mDict[\"dQU\"] = toscalar(nanmedian(dQUArr)) mDict[\"dFDFth\"] = toscalar(dFDFth) mDict[\"units\"] = units", "args.weightType, fitRMSF = args.fitRMSF, noStokesI = args.noStokesI, nBits = nBits,", "fdfFig.savefig(outFilePlot, bbox_inches = 'tight') # #if verbose: print \"Press <RETURN>", "if verbose: log(\"> Trying [freq_Hz, q, u, dq, du]\", end='", "u spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz =", "dI, dQ, dU try: if verbose: log(\"> Trying [freq_Hz, I,", "if verbose: print(\"> Trying [freq_Hz, q, u, dq, du]\", end='", "only works for wariance weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)", "uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime =", "parser.add_argument(\"-t\", dest=\"fitRMSF\", action=\"store_true\", help=\"fit a Gaussian to the RMSF [False]\")", "print(\"Saving RMSF and dirty FDF plot:\") outFilePlot = prefixOut +", "= qArr, dataU = uArr, lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 =", "# Set the floating point precision nBits = 32 if", "\"_RMSF.dat\" if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real,", "fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr", "%.4g %s' % (mDict[\"dQU\"],units)) log('FDF Noise (theory) = %.4g %s'", "OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT", "= fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2 =", "is None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2,", "% (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle 0 = %.4g (+/-%.4g) deg'", "(np.sum(weightArr))**2 ) # Measure the parameters of the dirty FDF", "Set the floating point precision nBits = 32 if args.bit64:", "and plots [False].\") parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn on debugging messages", "= \"float\" + str(nBits) dtComplex = \"complex\" + str(2*nBits) #", "theoretical noise to calculate uncertainties mDict = measure_FDF_parms(FDF = dirtyFDF,", "mDict[\"min_freq\"]=float(np.min(freqArr_Hz[good_channels])) mDict[\"max_freq\"]=float(np.max(freqArr_Hz[good_channels])) mDict[\"N_channels\"]=good_channels.size mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the", "flag output) _RMsynth.json: dictionary of derived parameters for RM spectrum", "if verbose: log(\"> RM-synthesis completed in %.2f seconds.\" % cputime)", "mDict[\"median_channel_width\"]=float(np.median(np.diff(freqArr_Hz))) # Measure the complexity of the q and u", "np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS '+units) ax.set_title(\"RMS noise in", ">= 128: log(\"WARNING: Stokes I model contains negative values!\") elif", "Maximum absolute Faraday depth (rad/m^2). dPhi_radm2 (float): Faraday depth channel", "# try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception:", "/ dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 = - (nChanRM-1.0)", "if called from the command line. \"\"\" # Help string", "specFig.canvas.toolbar.window) # except Exception: # pass # Display the figure", "the parameters of the dirty FDF # Use the theoretical", "the dirty FDF # Use the theoretical noise to calculate", "nBits = 32 if args.bit64: nBits = 64 verbose=args.verbose data", "synthesis. \"\"\" # Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File does", "# Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz", "/ 2.0 stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0", "plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz, IArr = IArr, qArr = qArr, uArr", "an ASCII Stokes I, Q & U spectrum.# # #", "%s' % (mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected MAD) = %.4g %s'", "= fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig, units =", "dataFile) # freq_Hz, I, Q, U, dI, dQ, dU try:", "print(\"> Trying [freq_Hz, q, u, dq, du]\", end=' ') (freqArr_Hz,", "parser.add_argument(\"-o\", dest=\"polyOrd\", type=int, default=2, help=\"polynomial order to fit to I", "label='rms <QU>') ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9,", "# Plot the data and the Stokes I model fit", "# Use the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget()", "%s' % (mDict[\"dFDFrms\"],units)) log('FDF SNR = %.4g ' % (mDict[\"snrPIfit\"]))", "Speed of light [m/s] #-----------------------------------------------------------------------------# def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,", "plot_complexity_fig from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if", "%.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) #", "/ 2.0 phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM) phiArr_radm2 = phiArr_radm2.astype(dtFloat)", "dqArr = dqArr, duArr = duArr, fracPol = mDict[\"fracPol\"], psi0_deg", "# Calculate the theoretical noise in the FDF !!Old formula", "# Set the Faraday depth range fwhmRMSF_radm2 = 2.0 *", "= argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file containing Stokes", "samples across the RMSF. weightType (str): Can be \"variance\" or", "(plot the Q, U and average RMS spectrum) if debug:", "[freq_Hz, q, u, dq, du]. \"\"\" # Default data types", "dQArr, dUArr) = data if verbose: log(\"... success.\") except Exception:", "print(\"> %s\" % outFile) FH = open(outFile, \"w\") for k,", "UArr, dQArr, dUArr] noStokesI = True except Exception: if verbose:", "\"\"\" epilog_text=\"\"\" Outputs with -S flag: _FDFdirty.dat: Dirty FDF/RM Spectrum", "RMSFArr = RMSFArr, fwhmRMSF = fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"], fig", "freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose: # Print the results to", "comments=\"#\". if verbose: print(\"Reading the data file '%s':\" % dataFile)", "from RMutils.util_misc import nanmedian from RMutils.util_misc import toscalar from RMutils.util_misc", "= (endTime - startTime) if verbose: log(\"> RM-synthesis completed in", "Stokes Q, U and <Q,U> spectra\") # rmsFig.show() #-------------------------------------------------------------------------# #", "freq_Hz, q, u, dq, du try: if verbose: print(\"> Trying", "columns, in a space separated format: [freq_Hz, I, Q, U,", "showPlots (bool): Show plots? debug (bool): Turn on debugging messages", "= parser.parse_args() # Sanity checks if not os.path.exists(args.dataFile[0]): print(\"File does", "and dirty FDF if showPlots or saveOutput: fdfFig = plt.figure(figsize=(12.0,", "convenience freqArr_GHz = freqArr_Hz / 1e9 dQUArr = (dQArr +", "across the RMSF lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\", help=\"weighting [inverse", "q, u, dq, du try: if verbose: log(\"> Trying [freq_Hz,", "the measurements on the FDF in 'key=val' and JSON formats.\")", "CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass # Display the", "the figure # fdfFig.show() # Pause if plotting enabled if", "q, u, dq, du] freq_Hz (array_like): Frequency of each channel", "(array_like): Frequency of each channel in Hz. q (array_like): Fractional", "the spectra mDict, aDict = run_rmsynth(data = data, polyOrd =", "def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6,", "Stokes Q intensity in each channel. U (array_like): Stokes U", "% outFile) np.savetxt(outFile, list(zip(arrdict[\"phi2Arr_radm2\"], arrdict[\"RMSFArr\"].real, arrdict[\"RMSFArr\"].imag))) outFile = prefixOut +", "dirtyFDF, phiArr = phiArr_radm2, fwhmRMSF = fwhmRMSF, dFDF = dFDFth,", "RMSFArr aDict[\"freqArr_Hz\"] = freqArr_Hz aDict[\"weightArr\"]=weightArr aDict[\"dirtyFDF\"]=dirtyFDF if verbose: # Print", "mDict[\"ampPeakPIfit\"]/(Ifreq0) mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr,", "data. Args: data (list): Contains frequency and polarization data as", "# # # Copyright (c) 2015 - 2018 <NAME> #", "command line options parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1,", "debug: log(traceback.format_exc()) sys.exit() if verbose: log(\"Successfully read in the Stokes", "debug: print(traceback.format_exc()) sys.exit() if verbose: print(\"Successfully read in the Stokes", "Q, U, dI, dQ, dU]\", end=' ') (freqArr_Hz, IArr, QArr,", "fitDict[\"chiSqRed\"] mDict[\"lam0Sq_m2\"] = toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF)", "- 0.0) / dPhi_radm2)) * 2.0 + 1.0) startPhi_radm2 =", "weights! weightArr = np.where(np.isnan(weightArr), 0.0, weightArr) dFDFth = np.sqrt( np.sum(weightArr**2", "(dict): Data output by RM synthesis. \"\"\" # Sanity checks", "args, ) if args.saveOutput: saveOutput(mDict, aDict, prefixOut, verbose) #-----------------------------------------------------------------------------# if", "U/I spectra. The ASCII file should the following columns, in", "default=2, help=\"polynomial order to fit to I spectrum [2].\") parser.add_argument(\"-i\",", "RMutils.util_plotTk import plot_rmsf_fdf_fig from RMutils.util_plotTk import plot_complexity_fig from RMutils.util_plotTk import", "= os.path.splitext(args.dataFile[0]) # Default data types dtFloat = \"float\" +", "\"\"\" # Default data types dtFloat = \"float\" + str(nBits)", "without restriction, including without limitation # # the rights to", "%.4g (+/-%.4g) rad/m^2' % (mDict[\"phiPeakPIfit_rm2\"], mDict[\"dPhiPeakPIfit_rm2\"])) log('freq0_GHz = %.4g '", "string to be shown using the -h option descStr =", "parser.add_argument(\"-S\", dest=\"saveOutput\", action=\"store_true\", help=\"save the arrays and plots [False].\") parser.add_argument(\"-D\",", "parser.add_argument(\"-D\", dest=\"debug\", action=\"store_true\", help=\"turn on debugging messages & plots [False].\")", "= IModHirArr, fig = specFig, units = units) # Use", "dq, du try: if verbose: log(\"> Trying [freq_Hz, q, u,", "first fit with a polynomial and the resulting model used", "Stokes I value at lam0Sq_m2 from the Stokes I model", ">= 64: log(\"Caution: Stokes I model has low signal-to-noise.\") #Add", "end=' ') (freqArr_Hz, QArr, UArr, dQArr, dUArr) = \\ np.loadtxt(dataFile,", "in the FDF !!Old formula only works for wariance weights!", "fwhmRMSF = fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig, units", "the custom navigation toolbar # try: # fdfFig.canvas.toolbar.pack_forget() # CustomNavbar(fdfFig.canvas,", "of failure? Returns: data (list of arrays): List containing the", "args.debug, verbose = verbose, units = args.units, prefixOut = prefixOut,", "= os.path.split(args.dataFile[0]) # Set the floating point precision nBits =", "(list): Contains frequency and polarization data as either: [freq_Hz, I,", "complexity of the q and u spectra mDict[\"fracPol\"] = mDict[\"ampPeakPIfit\"]/(Ifreq0)", "to ASCII files.\") outFile = prefixOut + \"_FDFdirty.dat\" if verbose:", "dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples, weightType = args.weightType, fitRMSF", "from RMutils.util_plotTk import CustomNavbar from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax if sys.version_info.major", "mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig, units = units) # Use the", "messages & plots? verbose (bool): Verbosity. log (function): Which logging", "following columns, in a space separated format: [freq_Hz, I, Q,", "help=\"fit a Gaussian to the RMSF [False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float,", "verbose: print \"Press <RETURN> to exit ...\", # input() return", "flag: _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U] _RMSF.dat: Computed", "\".debug-plots.pdf\" if verbose: print(\"> \" + outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches =", "fit if verbose: log(\"Plotting the input data and spectral index", "dPhi_radm2 = fwhmRMSF_radm2 / nSamples if phiMax_radm2 is None: phiMax_radm2", "# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window) # except Exception: # pass # Display", "parameters for RM spectrum (approximately equivalent to -v flag output)", "each channel. dI (array_like): Error in Stokes I intensity in", "data file '%s':\" % dataFile) # freq_Hz, I, Q, U,", "verbose (bool): Verbosity. log (function): Which logging function to use.", "2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2)", "run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None, nSamples=10.0, weightType=\"variance\", fitRMSF=False, noStokesI=False, phiNoise_radm2=1e6, nBits=32,", "print \"Press <RETURN> to exit ...\", # input() return mDict,", "Summary of RM synthesis results. aDict (dict): Data output by", "noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False, debug=False, verbose=False, log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run", "outFile = prefixOut + \"_FDFdirty.dat\" if verbose: print(\"> %s\" %", "sys.exit() if verbose: print(\"Successfully read in the Stokes spectra.\") return", "lw=0.5, label='rms Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U')", "use.\") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to", "#-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)", "# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time()", "this permission notice shall be included in # # all", "None: phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2 phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.)", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "following conditions: # # # # The above copyright notice", "API for runnning RM-synthesis on an ASCII Stokes I, Q", "-%.4g)' % (mDict[\"sigmaAddU\"], mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) # Plot the", "nBits = 64 verbose=args.verbose data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug) #", "16-Nov-2018 by <NAME> # # MODIFIED: 23-October-2019 by <NAME> #", "if verbose: log(\"Warn: no Stokes I data in use.\") IArr", "= qArr, uArr = uArr, dIArr = dIArr, dqArr =", "phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits = nBits, verbose", "pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz, qArr = qArr, uArr =", "Stokes Q intensity in each channel. dU (array_like): Error in", "RM-synthesis completed in %.2f seconds.\" % cputime) # Determine the", "except Exception: # pass # Display the figure # fdfFig.show()", "\"\"\" Start the function to perform RM-synthesis if called from", "some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))", "verbose: print(\"...failed.\") # freq_Hz, q, u, dq, du try: if", "success.\") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI = True except", "Noise (theory) = %.4g %s' % (mDict[\"dFDFth\"],units)) log('FDF Noise (Corrected", "fwhmRMSFArr, fitStatArr = \\ get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2,", "= toscalar(lam0Sq_m2) mDict[\"freq0_Hz\"] = toscalar(freq0_Hz) mDict[\"fwhmRMSF\"] = toscalar(fwhmRMSF) mDict[\"dQU\"] =", "parser.add_argument(\"dataFile\", metavar=\"dataFile.dat\", nargs=1, help=\"ASCII file containing Stokes spectra & errors.\")", "IArr = IArr, qArr = qArr, uArr = uArr, dIArr", "if verbose: log(\"Successfully read in the Stokes spectra.\") # If", "poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF is in fracpol units", "Software. # # # # THE SOFTWARE IS PROVIDED \"AS", "# #=============================================================================# import sys import os import time import traceback", "do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr) #-------------------------------------------------------------------------# endTime = time.time() cputime", "OS X) # try: # specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) #", "list of derived parameters for RM spectrum (approximately equivalent to", "True, debug = debug) # Plot the data and the", "print(\"... success.\") data=[freqArr_Hz, QArr, UArr, dQArr, dUArr] noStokesI = True", "U (array_like): Stokes U intensity in each channel. dI (array_like):", "(approximately equivalent to -v flag output) _RMsynth.json: dictionary of derived", "q, u, dq, du]. \"\"\" # Default data types dtFloat", "Start the function to perform RM-synthesis if called from the", "= np.power(C/freqArr_Hz, 2.0) dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2)", "get outputs, one or more of the following flags must", "[False]\") parser.add_argument(\"-l\", dest=\"phiMax_radm2\", type=float, default=None, help=\"absolute max Faraday depth sampled", "get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, mskArr", "be included in # # all copies or substantial portions", "measurements on the FDF in 'key=val' and JSON formats.\") print(\">", "Exception: if verbose: print(\"...failed.\") # freq_Hz, q, u, dq, du", "recover the PI freq0_Hz = C / m.sqrt(lam0Sq_m2) Ifreq0 =", "% (mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms) = %.4g %s' % (mDict[\"dFDFrms\"],units))", "the data and the Stokes I model fit if verbose:", "= nBits, verbose = verbose, log = log) # Calculate", "the results to the screen log() log('-'*80) log('RESULTS:\\n') log('FWHM RMSF", "success.\") data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr] except Exception:", "rmsFig.show() #-------------------------------------------------------------------------# # Calculate some wavelength parameters lambdaSqArr_m2 = np.power(C/freqArr_Hz,", "the FDF in 'key=val' and JSON formats.\") print(\"> %s\" %", "uArr=pD[\"yArrU\"], duArr=pD[\"dyArrU\"], sigmaAdduArr=pD[\"sigmaAddArrU\"], chiSqReduArr=pD[\"chiSqRedArrU\"], probuArr=pD[\"probArrU\"], mDict=mDict) if saveOutput: if verbose:", "uArr, dqArr = dqArr, duArr = duArr, fracPol = mDict[\"fracPol\"],", "= - (nChanRM-1.0) * dPhi_radm2 / 2.0 stopPhi_radm2 = +", "+ str(2*nBits) # freq_Hz, I, Q, U, dI, dQ, dU", "unpack=True, dtype=dtFloat) if verbose: print(\"... success.\") data=[freqArr_Hz, QArr, UArr, dQArr,", "if verbose: log(\"> Trying [freq_Hz, I, Q, U, dI, dQ,", "the dirty FDF, RMSF and weight array to ASCII files", "does not exist: '%s'.\" % args.dataFile[0]) sys.exit() prefixOut, ext =", "float(fwhmRMSFArr) # ALTERNATE RM-SYNTHESIS CODE --------------------------------------------# #dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2,", "with -S flag: _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]", "'tight') # #if verbose: print \"Press <RETURN> to exit ...\",", "or debug: if verbose: print(\"Saving RMSF and dirty FDF plot:\")", "fwhmRMSF, vLine = mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig, units = units)", "Hz. q (array_like): Fractional Stokes Q intensity (Q/I) in each", "charge, to any person obtaining a # # copy of", "formats.\") print(\"> %s\" % outFile) FH = open(outFile, \"w\") for", "%.4g %s' % (mDict[\"dFDFrms\"],units)) log('FDF SNR = %.4g ' %", "prefixOut + \".RMSF-dirtyFDF-plots.pdf\" if verbose: print(\"> \" + outFilePlot) fdfFig.savefig(outFilePlot,", "+ outFilePlot) tmpFig.savefig(outFilePlot, bbox_inches = 'tight') else: tmpFig.show() #add array", "FDF/RM Spectrum [Phi, Q, U] _RMSF.dat: Computed RMSF [Phi, Q,", "Data output by RM synthesis. \"\"\" # Sanity checks if", "arrays to ASCII files.\") outFile = prefixOut + \"_FDFdirty.dat\" if", "%.4g %s' % (mDict[\"Ifreq0\"],units)) log('Peak PI = %.4g (+/-%.4g) %s'", "U and average RMS spectrum) if debug: rmsFig = plt.figure(figsize=(12.0,", "the columns found in the file. If Stokes I is", "of samples across the RMSF lobe [10].\") parser.add_argument(\"-w\", dest=\"weightType\", default=\"variance\",", "I, Q, U, dI, dQ, dU]\", end=' ') (freqArr_Hz, IArr,", "units if fitDict[\"fitStatus\"] >= 128: log(\"WARNING: Stokes I model contains", "= time.time() # Perform RM-synthesis on the spectrum dirtyFDF, lam0Sq_m2", "ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9 + xRange*0.05) ax.set_xlabel('$\\\\nu$ (GHz)') ax.set_ylabel('RMS", "if verbose: print(\"> %s\" % outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag)))", "= args.units, prefixOut = prefixOut, args = args, ) if", "noise in Stokes Q, U and <Q,U> spectra\") # rmsFig.show()", "= True, debug = debug) # Plot the data and", "if not plt.isinteractive(): # specFig.show() # DEBUG (plot the Q,", "du (array_like): Error in fractional Stokes U intensity in each", "qArr, uArr = uArr, dqArr = dqArr, duArr = duArr,", "print(\"...failed.\") # freq_Hz, q, u, dq, du try: if verbose:", "help=\"polynomial order to fit to I spectrum [2].\") parser.add_argument(\"-i\", dest=\"noStokesI\",", "Exception: if verbose: log(\"...failed.\") # freq_Hz, q, u, dq, du", "if verbose: print(\"Saving RMSF and dirty FDF plot:\") outFilePlot =", "Stokes I, Q & U spectrum.# # # # MODIFIED:", "% args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir, dummy =", "= 'tight') else: tmpFig.show() #add array dictionary aDict = dict()", "dQArr, dUArr) = \\ np.loadtxt(dataFile, unpack=True, dtype=dtFloat) if verbose: print(\"...", "-h option descStr = \"\"\" Run RM-synthesis on Stokes I,", "mDict[\"dSigmaAddPlusU\"], mDict[\"dSigmaAddMinusU\"])) log() log('-'*80) # Plot the RM Spread Function", "-S flag: _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U] _RMSF.dat:", "verbose: log(\"Plotting the input data and spectral index fit.\") freqHirArr_Hz", "(array_like): Error in Stokes Q intensity in each channel. dU", "# The MIT License (MIT) # # # # Copyright", "outFile) json.dump(dict(outdict), open(outFile, \"w\")) #-----------------------------------------------------------------------------# def main(): import argparse \"\"\"", "vLine = mDict[\"phiPeakPIfit_rm2\"], fig = fdfFig, units = units) #", "args.dataFile[0]) sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) dataDir, dummy = os.path.split(args.dataFile[0])", "Angle = %.4g (+/-%.4g) deg' % (mDict[\"polAngleFit_deg\"], mDict[\"dPolAngleFit_deg\"])) log('Pol Angle", "log=print,units='Jy/beam', prefixOut=\"prefixOut\", args=None): \"\"\"Run RM synthesis on 1D data. Args:", "= lambdaSqArr_m2, phiArr_radm2 = phiArr_radm2, weightArr = weightArr, nBits =", "= args.phiMax_radm2, dPhi_radm2 = args.dPhi_radm2, nSamples = args.nSamples, weightType =", "RM-synthesis if called from the command line. \"\"\" # Help", "THE SOFTWARE OR THE USE OR OTHER # # DEALINGS", "specFig.canvas.toolbar.pack_forget() # CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) # except Exception: # pass #", "case of failure? Returns: data (list of arrays): List containing", "# Plot the RM Spread Function and dirty FDF if", "(str): Units of data. Returns: mDict (dict): Summary of RM", "The above copyright notice and this permission notice shall be", "sys.exit() prefixOut, ext = os.path.splitext(args.dataFile[0]) # Default data types dtFloat", "log(\"> Trying [freq_Hz, q, u, dq, du]\", end=' ') (freqArr_Hz,", "label='rms U') xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9 ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05, np.max(freqArr_Hz)/1e9", "\"uniform\" -- Weight uniformly (i.e. with 1s) fitRMSF (bool): Fit", "% (mDict[\"Ifreq0\"],units)) log('Peak PI = %.4g (+/-%.4g) %s' % (mDict[\"ampPeakPIfit\"],", "tmpFig.show() #add array dictionary aDict = dict() aDict[\"phiArr_radm2\"] = phiArr_radm2", "str(nBits) dtComplex = \"complex\" + str(2*nBits) # freq_Hz, I, Q,", "# # FROM, OUT OF OR IN CONNECTION WITH THE", "(mDict[\"dFDFcorMAD\"],units)) log('FDF Noise (rms) = %.4g %s' % (mDict[\"dFDFrms\"],units)) log('FDF", "freqArr_GHz, IArr = IArr, QArr = QArr, UArr = UArr,", "line. \"\"\" # Help string to be shown using the", "dq, du] freq_Hz (array_like): Frequency of each channel in Hz.", "in Hz. q (array_like): Fractional Stokes Q intensity (Q/I) in", "plot_rmsf_fdf_fig(phiArr = phiArr_radm2, FDF = dirtyFDF, phi2Arr = phi2Arr_radm2, RMSFArr", "dI (array_like): Error in Stokes I intensity in each channel.", "format: [freq_Hz, I, Q, U, I_err, Q_err, U_err] OR [freq_Hz,", "# Measure the complexity of the q and u spectra", "= %.4g (+%.4g, -%.4g)' % (mDict[\"sigmaAddQ\"], mDict[\"dSigmaAddPlusQ\"], mDict[\"dSigmaAddMinusQ\"])) log('sigma_add(u) =", "[False (uses 32-bit)]\") parser.add_argument(\"-p\", dest=\"showPlots\", action=\"store_true\", help=\"show the plots [False].\")", "the following columns, in a space separated format: [freq_Hz, I,", "figure # fdfFig.show() # Pause if plotting enabled if showPlots:", "Ifreq0 = poly5(fitDict[\"p\"])(freq0_Hz/1e9) dirtyFDF *= (Ifreq0) # FDF is in", "I, Q, U, dI, dQ, dU], else [freq_Hz, q, u,", "# # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "in an ASCII file. The Stokes I spectrum is first", "in fractional Stokes Q intensity in each channel. du (array_like):", "% outFile) np.savetxt(outFile, list(zip(arrdict[\"phiArr_radm2\"], arrdict[\"dirtyFDF\"].real, arrdict[\"dirtyFDF\"].imag))) outFile = prefixOut +", "Q') ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5, label='rms U') xRange =", "name # Read the data-file. Format=space-delimited, comments=\"#\". if verbose: print(\"Reading" ]
[ "import MessagePassing from .gcn_conv import GCNConv from .gat_conv import GATConv", "from .gat_conv import GATConv from .se_layer import SELayer from .aggregator", ".gat_conv import GATConv from .se_layer import SELayer from .aggregator import", "SELayer from .aggregator import Meanaggregator from .maggregator import meanaggr __all__", "import meanaggr __all__ = [ 'MessagePassing', 'GCNConv', 'GATConv', 'SELayer', 'Meanaggregator'", "MessagePassing from .gcn_conv import GCNConv from .gat_conv import GATConv from", "import Meanaggregator from .maggregator import meanaggr __all__ = [ 'MessagePassing',", "import SELayer from .aggregator import Meanaggregator from .maggregator import meanaggr", ".gcn_conv import GCNConv from .gat_conv import GATConv from .se_layer import", "from .gcn_conv import GCNConv from .gat_conv import GATConv from .se_layer", ".aggregator import Meanaggregator from .maggregator import meanaggr __all__ = [", "import GATConv from .se_layer import SELayer from .aggregator import Meanaggregator", "meanaggr __all__ = [ 'MessagePassing', 'GCNConv', 'GATConv', 'SELayer', 'Meanaggregator' ]", "GATConv from .se_layer import SELayer from .aggregator import Meanaggregator from", "GCNConv from .gat_conv import GATConv from .se_layer import SELayer from", ".message_passing import MessagePassing from .gcn_conv import GCNConv from .gat_conv import", "Meanaggregator from .maggregator import meanaggr __all__ = [ 'MessagePassing', 'GCNConv',", "from .se_layer import SELayer from .aggregator import Meanaggregator from .maggregator", "import GCNConv from .gat_conv import GATConv from .se_layer import SELayer", ".se_layer import SELayer from .aggregator import Meanaggregator from .maggregator import", "from .maggregator import meanaggr __all__ = [ 'MessagePassing', 'GCNConv', 'GATConv',", "from .message_passing import MessagePassing from .gcn_conv import GCNConv from .gat_conv", ".maggregator import meanaggr __all__ = [ 'MessagePassing', 'GCNConv', 'GATConv', 'SELayer',", "from .aggregator import Meanaggregator from .maggregator import meanaggr __all__ =" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "- All rights reserved. # # Licensed under the Apache", "a module. Does nothing otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply", "module-like object with the method `apply`.\"\"\" apply: Callable \"\"\"The wrapped", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "distributed under the License is distributed on an \"AS IS\"", "already has an apply method. \"\"\" if hasattr(mod_or_fun, \"apply\"): return", "# limitations under the License. from typing import Callable from", "nothing if it already has an apply method. \"\"\" if", "the specific language governing permissions and # limitations under the", "with the method `apply`. Does nothing if it already has", "callable to be a module-like object with the method `apply`.\"\"\"", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "nothing otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply else: return mod_or_fun", "a module-like object with the method `apply`.\"\"\" apply: Callable \"\"\"The", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"apply\"): return mod_or_fun.apply else: return mod_or_fun @struct.dataclass class WrappedApplyFun: \"\"\"Wraps", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "and # limitations under the License. from typing import Callable", "not use this file except in compliance with the License.", "Authors - All rights reserved. # # Licensed under the", "rights reserved. # # Licensed under the Apache License, Version", "apply function if it's a module. Does nothing otherwise.\"\"\" if", "method `apply`. Does nothing if it already has an apply", "def get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns the apply function if it's", "from . import struct def get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns the", "writing, software # distributed under the License is distributed on", "Callable: \"\"\"Returns the apply function if it's a module. Does", "it's a module. Does nothing otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"): return", "in writing, software # distributed under the License is distributed", "you may not use this file except in compliance with", "Callable from . import struct def get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. from", "import Callable from . import struct def get_afun_if_module(mod_or_fun) -> Callable:", "return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps a callable to be", "Copyright 2021 The NetKet Authors - All rights reserved. #", "apply: Callable \"\"\"The wrapped callable.\"\"\" def __repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\"", "mod_or_fun.apply else: return mod_or_fun @struct.dataclass class WrappedApplyFun: \"\"\"Wraps a callable", "hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply else: return mod_or_fun @struct.dataclass class WrappedApplyFun:", "governing permissions and # limitations under the License. from typing", "has an apply method. \"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "if hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply else: return mod_or_fun @struct.dataclass class", "reserved. # # Licensed under the Apache License, Version 2.0", "apply method. \"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun else: return", "struct def get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns the apply function if", "the License. from typing import Callable from . import struct", "CONDITIONS OF ANY KIND, either express or implied. # See", "wrapped callable.\"\"\" def __repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps", "Does nothing if it already has an apply method. \"\"\"", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "typing import Callable from . import struct def get_afun_if_module(mod_or_fun) ->", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "under the License is distributed on an \"AS IS\" BASIS,", "callable to be a module-like object with the method `apply`.", "NetKet Authors - All rights reserved. # # Licensed under", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "# Copyright 2021 The NetKet Authors - All rights reserved.", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "with the method `apply`.\"\"\" apply: Callable \"\"\"The wrapped callable.\"\"\" def", "hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps a callable to be a module-like", "to be a module-like object with the method `apply`.\"\"\" apply:", "The NetKet Authors - All rights reserved. # # Licensed", "else: return mod_or_fun @struct.dataclass class WrappedApplyFun: \"\"\"Wraps a callable to", "the License for the specific language governing permissions and #", "under the License. from typing import Callable from . import", "f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps a callable to be a", "(the \"License\"); # you may not use this file except", "be a module-like object with the method `apply`.\"\"\" apply: Callable", "`apply`.\"\"\" apply: Callable \"\"\"The wrapped callable.\"\"\" def __repr__(self): return f\"{type(self).__name__}(apply={self.apply},", "Apache License, Version 2.0 (the \"License\"); # you may not", "the method `apply`.\"\"\" apply: Callable \"\"\"The wrapped callable.\"\"\" def __repr__(self):", "# you may not use this file except in compliance", "def wrap_afun(mod_or_fun): \"\"\"Wraps a callable to be a module-like object", "either express or implied. # See the License for the", "object with the method `apply`. Does nothing if it already", "OR CONDITIONS OF ANY KIND, either express or implied. #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "mod_or_fun @struct.dataclass class WrappedApplyFun: \"\"\"Wraps a callable to be a", "__repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps a callable to", "def __repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps a callable", "2021 The NetKet Authors - All rights reserved. # #", "the License is distributed on an \"AS IS\" BASIS, #", "-> Callable: \"\"\"Returns the apply function if it's a module.", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "it already has an apply method. \"\"\" if hasattr(mod_or_fun, \"apply\"):", "import struct def get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns the apply function", "permissions and # limitations under the License. from typing import", "from typing import Callable from . import struct def get_afun_if_module(mod_or_fun)", "a callable to be a module-like object with the method", "# # Unless required by applicable law or agreed to", "wrap_afun(mod_or_fun): \"\"\"Wraps a callable to be a module-like object with", "Does nothing otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply else: return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "law or agreed to in writing, software # distributed under", "class WrappedApplyFun: \"\"\"Wraps a callable to be a module-like object", "WrappedApplyFun: \"\"\"Wraps a callable to be a module-like object with", "if it's a module. Does nothing otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"):", "an apply method. \"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun else:", "License. from typing import Callable from . import struct def", "callable.\"\"\" def __repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun): \"\"\"Wraps a", "method. \"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun else: return WrappedApplyFun(mod_or_fun)", "implied. # See the License for the specific language governing", "`apply`. Does nothing if it already has an apply method.", "\"\"\"Returns the apply function if it's a module. Does nothing", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "Callable \"\"\"The wrapped callable.\"\"\" def __repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def", "function if it's a module. Does nothing otherwise.\"\"\" if hasattr(mod_or_fun,", "the apply function if it's a module. Does nothing otherwise.\"\"\"", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "return mod_or_fun @struct.dataclass class WrappedApplyFun: \"\"\"Wraps a callable to be", "All rights reserved. # # Licensed under the Apache License,", "method `apply`.\"\"\" apply: Callable \"\"\"The wrapped callable.\"\"\" def __repr__(self): return", "object with the method `apply`.\"\"\" apply: Callable \"\"\"The wrapped callable.\"\"\"", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "to be a module-like object with the method `apply`. Does", "if it already has an apply method. \"\"\" if hasattr(mod_or_fun,", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns the apply function if it's a", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "return mod_or_fun.apply else: return mod_or_fun @struct.dataclass class WrappedApplyFun: \"\"\"Wraps a", "You may obtain a copy of the License at #", ". import struct def get_afun_if_module(mod_or_fun) -> Callable: \"\"\"Returns the apply", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "a module-like object with the method `apply`. Does nothing if", "@struct.dataclass class WrappedApplyFun: \"\"\"Wraps a callable to be a module-like", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "\"\"\"The wrapped callable.\"\"\" def __repr__(self): return f\"{type(self).__name__}(apply={self.apply}, hash={hash(self)})\" def wrap_afun(mod_or_fun):", "module-like object with the method `apply`. Does nothing if it", "otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply else: return mod_or_fun @struct.dataclass", "the Apache License, Version 2.0 (the \"License\"); # you may", "the method `apply`. Does nothing if it already has an", "limitations under the License. from typing import Callable from .", "\"\"\"Wraps a callable to be a module-like object with the", "module. Does nothing otherwise.\"\"\" if hasattr(mod_or_fun, \"apply\"): return mod_or_fun.apply else:", "be a module-like object with the method `apply`. Does nothing" ]
[ "'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name))", "field in fields] types = dict(zip(field_names, field_types)) features = []", "'GeometryCollection': geometries = geom['geometries'] for g in geometries: c =", "filter records with start and end start = start if", "fld_type in ['C', 'N', 'F']: value = rec else: continue", "= 'EPSG:4326' for n, feat in enumerate(geodict.get('features')): properties = feat.get('properties')", "sr.record): fld_type = types[fld] if fld_type == 'D': value =", "If not specified it will try to get it from", "if ty == 'GeometryCollection': geometries = geom['geometries'] for g in", "properties = feature['properties'] fid = feature['id'] geom = feature['geometry']['type'] #", "= name.split(':') cleancrs = [part for part in splitcrs if", "fields) writer.writeheader() # write rows for feature in features: properties", "local file a CSV or geoJSON file. This uses a", "return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): \"\"\" Create a list", "of download, either CSV or JSON. Defaults to CSV. :param", "GeoJSON (.geojson) file. :rtype: file \"\"\" import json import os", "of the resulting file :type name: str :param path: The", "return thefile def toCSV(collection, filename, split_at=4000): \"\"\" Alternative to download", "ee.Feature inside. This is due to failing when attempting to", "not specified it will try to get it from the", "json import csv from .. import tools def fromShapefile(filename, crs=None,", "process more than 1000 records at a time. Found {}\"", "ee.List([collection]))) collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content = col.getInfo()", "= dict(zip(field_names, field_types)) features = [] projection = utils.getProjection(filename) if", "uses a different method than `toGeoJSON` and `toCSV` :param filetype:", "selectors, filename) thefile = utils.downloadFile(url, filename, filetype, path) return thefile", "CSV or JSON. Defaults to CSV. :param selectors: The selectors", "= utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features)", "geom['geometries'] for g in geometries: c = g['coordinates'] utils.removeZ(c) else:", "(only PATH, without filename) :type assetPath: str :param name: filename", "FeatureCollection to a GeoJSON file :param collection: The collection to", "else: if ty == 'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords)", "col = ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats = content['features'] for", "[] projection = utils.getProjection(filename) if not crs else crs #", "crs=None, encoding=None): \"\"\" Create a list of Features from a", "utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def", "fields.append('geometry') features = d['features'] ext = filename[-4:] if ext !=", "range(start, end): # atr = dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i)", "# atr = dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr =", "a tuple of features. \"\"\" if filename: with open(filename, 'r')", "filename: str :param start: :return: the FeatureCollection :rtype: ee.FeatureCollection \"\"\"", "== 'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom", "pass the same params as the original function :param table:", "upload the image (only PATH, without filename) :type assetPath: str", "= feat['geometry'] ty = geom['type'] if ty == 'GeometryCollection': geometries", ".dbf must be present) to a ee.FeatureCollection At the moment", "FeatureCollection :rtype: ee.FeatureCollection \"\"\" import shapefile wgs84 = ee.Projection('EPSG:4326') #", "sr.shape.__geo_interface__ if projection is not None: geometry = ee.Geometry(geom, projection)", "ty == 'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords) else coords", "= feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl') # remove Z", "list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext = filename[-4:] if ext", "less than 1000 records and doesn't handle complex shapes. :param", "raise ValueError(msg.format(end-start)) for i in range(start, end): # atr =", "thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader() # write rows for", "value geom = sr.shape.__geo_interface__ if projection is not None: geometry", "str :return: a tuple of features. \"\"\" geojsondict = utils.kmlToGeoJsonDict(filename,", "thecsv def toLocal(collection, filename, filetype=None, selectors=None, path=None): \"\"\" Download a", "of features. \"\"\" if filename: with open(filename, 'r') as geoj:", "split_at) limits = ee.List.zip(seq.slice(1), seq) def over_limits(n): n = ee.List(n)", "recognized'.format(name)) else: crs = 'EPSG:4326' for n, feat in enumerate(geodict.get('features')):", "a dict object \"\"\" size = collection.size() condition = size.gte(4999)", "to avoid an EE Exception :type split_at: int :return: A", "filename. If the shape is not in the same path", "= feature['geometry']['type'] # match fields properties['system:index'] = fid properties['geometry'] =", "collection to export :type collection: ee.FeatureCollection :param name: name of", "FeatureCollection as a CSV \"\"\" d = toDict(collection, split_at) fields", "ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition,", "path: path = os.getcwd() # name if name[-8:-1] != '.geojson':", ":return: A GeoJSON (.geojson) file. :rtype: file \"\"\" import json", "the file to load :type filename: str :param crs: a", "be downloaded \"\"\" if not filetype: filetype = 'CSV' url", "name) assetId = '/'.join([assetPath, name]) # Description description = utils.matchDescription(name)", "**kwargs): \"\"\" This function can create folders and ImageCollections on", "content = col.getInfo() feats = content['features'] for i in range(0,", "and doesn't handle complex shapes. :param filename: the name of", "as the original function :param table: the feature collection to", "== 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty ==", "to be downloaded \"\"\" if not filetype: filetype = 'CSV'", "utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def", "projection is not None: geometry = ee.Geometry(geom, projection) \\ .transform(wgs84,", "Pipe ERROR) out of the list. You can try creating", "ValueError('{} not recognized'.format(name)) else: crs = 'EPSG:4326' for n, feat", "= geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): \"\"\"", "coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000):", "a string with format \"EPSG:XXX\" if isinstance(projection, str): if 'EPSG:'", "format \"EPSG:XXX\" if isinstance(projection, str): if 'EPSG:' in projection: projection", "This is due to failing when attempting to create a", "start else 0 if not end: records = reader.shapeRecords() end", "collection.size() seq = tools.ee_list.sequence(0, size, split_at) limits = ee.List.zip(seq.slice(1), seq)", "ee from . import utils import json import csv from", "cleancrs = [part for part in splitcrs if part] try:", "file. If None, will be saved in the current folder", "feat = ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None,", "feat.get('geometry') ty = geom.get('type') coords = geom.get('coordinates') if ty ==", "= list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext = filename[-4:] if", "for feature in features: properties = feature['properties'] fid = feature['id']", "= feature['id'] geom = feature['geometry']['type'] # match fields properties['system:index'] =", "that should be used to determine which attributes will be", "\"{}/{}\".format(user, assetPath) if create: # Recrusive create path path2create =", "def toGeoJSON(collection, name, path=None, split_at=4000): \"\"\" Export a FeatureCollection to", "atr[fld] = value geom = sr.shape.__geo_interface__ if projection is not", "in the same path than the script, specify a path", "projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter records with start and", "else 0 if not end: records = reader.shapeRecords() end =", "of Features from a KML file. Return a python tuple", "n, feat in enumerate(geodict.get('features')): properties = feat.get('properties') geom = feat.get('geometry')", "name]) # Description description = utils.matchDescription(name) # Init task task", "c = g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords) return", "'.csv' with open(filename, 'w') as thecsv: writer = csv.DictWriter(thecsv, fields)", "'styleUrl' in prop: prop.pop('styleUrl') # remove Z value if needed", "= feature['properties'] fid = feature['id'] geom = feature['geometry']['type'] # match", "if name[-8:-1] != '.geojson': fname = name+'.geojson' content = toDict(collection,", "cleancrs[-1] == 'CRS84': crs = 'EPSG:4326' elif cleancrs[-2] == 'EPSG':", "assetId = '/'.join([assetPath, name]) # Description description = utils.matchDescription(name) #", "path = os.getcwd() # name if name[-8:-1] != '.geojson': fname", "If None, will be saved in the current folder :type", "crs: a coordinate reference system in EPSG format. If not", "value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N', 'F']: value", "= geom # write row writer.writerow(properties) return thecsv def toLocal(collection,", "# read the filename reader = shapefile.Reader(filename) fields = reader.fields[1:]", "crs: str :return: a tuple of features. \"\"\" if filename:", "Exception :type split_at: int :return: A GeoJSON (.geojson) file. :rtype:", "= \"Can't process more than 1000 records at a time.", "for the image (AssetID will be assetPath + name) :type", "string with format \"EPSG:XXX\" if isinstance(projection, str): if 'EPSG:' in", "the asset path is_user = (assetPath.split('/')[0] == 'users') if not", "is not None: geometry = ee.Geometry(geom, projection) \\ .transform(wgs84, 1)", "None: geometry = ee.Geometry(geom, projection) \\ .transform(wgs84, 1) else: geometry", "The filetype of download, either CSV or JSON. Defaults to", "to determine which attributes will be downloaded. :param filename: The", "assetPath + name) :type name: str :return: the tasks :rtype:", "the result of this function to a ee.List or using", "using it directly as a FeatureCollection argument. :param filename: the", "path: str :param split_at: limit to avoid an EE Exception", "filename) :type assetPath: str :param name: filename for the image", "return tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None): \"\"\" Create a", "fid properties['geometry'] = geom # write row writer.writerow(properties) return thecsv", "utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): \"\"\" Get the", "end): # atr = dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr", "name[-8:-1] != '.geojson': fname = name+'.geojson' content = toDict(collection, split_at)", "fromGeoJSON(filename=None, data=None, crs=None): \"\"\" Create a list of Features from", "a time. Found {}\" raise ValueError(msg.format(end-start)) for i in range(start,", "to a ee.List or using it directly as a FeatureCollection", "split_at) with open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content)) return thefile", "saved in the current folder :type path: str :param split_at:", "thefile def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs): \"\"\" This", "geom = feat.get('geometry') ty = geom.get('type') coords = geom.get('coordinates') if", "# write row writer.writerow(properties) return thecsv def toLocal(collection, filename, filetype=None,", "content['features'] = feats return content def toGeoJSON(collection, name, path=None, split_at=4000):", "limits = ee.List.zip(seq.slice(1), seq) def over_limits(n): n = ee.List(n) ini", "of the list. You can try creating it yourself casting", "size = collection.size() seq = tools.ee_list.sequence(0, size, split_at) limits =", "{} for fld, rec in zip(field_names, sr.record): fld_type = types[fld]", "name, path=None, split_at=4000): \"\"\" Export a FeatureCollection to a GeoJSON", "= ee.Number(n.get(0)) end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits)", "d = toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry') features =", "str :param crs: a coordinate reference system in EPSG format.", "the shape is not in the same path than the", "content def toGeoJSON(collection, name, path=None, split_at=4000): \"\"\" Export a FeatureCollection", "records = reader.shapeRecords() end = len(records) else: end = end", "if isinstance(projection, str): if 'EPSG:' in projection: projection = projection.split(':')[1]", "projection: projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter records", "= 'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1])", "ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user, assetPath) if create: # Recrusive create", "ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None):", "if 'EPSG:' in projection: projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection)", "\"\"\" Alternative to download a FeatureCollection as a CSV \"\"\"", "0 if not end: records = reader.shapeRecords() end = len(records)", "tuple of features. \"\"\" geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features", "in EPSG format. If not specified it will try to", "'{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name)) except IndexError: raise", ":rtype: file \"\"\" import json import os if not path:", "(assetPath.split('/')[0] == 'users') if not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath", "failing when attempting to create a FeatureCollection (Broken Pipe ERROR)", "crs else crs # catch a string with format \"EPSG:XXX\"", "limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo()", "= fid properties['geometry'] = geom # write row writer.writerow(properties) return", "than the script, specify a path instead. :type filename: str", "[field[1] for field in fields] types = dict(zip(field_names, field_types)) features", "reader.shapeRecords() end = len(records) else: end = end + 1", "The path where to save the file. If None, will", "not crs: filecrs = geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name')", "name.split(':') cleancrs = [part for part in splitcrs if part]", "features: properties = feature['properties'] fid = feature['id'] geom = feature['geometry']['type']", "in the asset path is_user = (assetPath.split('/')[0] == 'users') if", "'w') as thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename, split_at=4000):", "remove styleUrl prop = feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl')", "size.gte(4999) def greater(): size = collection.size() seq = tools.ee_list.sequence(0, size,", "greater(): size = collection.size() seq = tools.ee_list.sequence(0, size, split_at) limits", "atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): \"\"\" Create", "# Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start()", "not crs else crs # catch a string with format", "= '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name)) except IndexError:", "open(filename, 'w') as thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader() #", "Defaults to CSV. :param selectors: The selectors that should be", "due to failing when attempting to create a FeatureCollection (Broken", "# remove Z value if needed geom = feat['geometry'] ty", "if not end: records = reader.shapeRecords() end = len(records) else:", "of Features from a GeoJSON file. Return a python tuple", "= {} for fld, rec in zip(field_names, sr.record): fld_type =", "splitcrs = name.split(':') cleancrs = [part for part in splitcrs", "of this function to a ee.List or using it directly", "= c.getInfo() feats_c = content_c['features'] feats = feats + feats_c", "= ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col =", "'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N', 'F']:", "a FeatureCollection as a CSV \"\"\" d = toDict(collection, split_at)", "is specified in the asset path is_user = (assetPath.split('/')[0] ==", ":type name: str :return: the tasks :rtype: ee.batch.Task \"\"\" #", "list. You can try creating it yourself casting the result", "to load :type filename: str :param crs: a coordinate reference", "cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name)) except IndexError: raise ValueError('{}", "raise ValueError('{} not recognized'.format(name)) except IndexError: raise ValueError('{} not recognized'.format(name))", "shapefile wgs84 = ee.Projection('EPSG:4326') # read the filename reader =", "= feat.get('geometry') ty = geom.get('type') coords = geom.get('coordinates') if ty", "== 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not", "features = d['features'] ext = filename[-4:] if ext != '.csv':", "a coordinate reference system in EPSG format. If not specified", "to CSV. :param selectors: The selectors that should be used", "utils.createAssets([path2create], 'Folder', True) # Asset ID (Path + name) assetId", "name of the file to be downloaded \"\"\" if not", "with less than 1000 records and doesn't handle complex shapes.", "\"Can't process more than 1000 records at a time. Found", "geom = feat['geometry'] ty = geom['type'] if ty == 'GeometryCollection':", "ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N', 'F']: value = rec", "return limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size =", "features = [] # Get crs from GeoJSON if not", "= feats + feats_c content['features'] = feats return content def", "size = collection.size() condition = size.gte(4999) def greater(): size =", "write row writer.writerow(properties) return thecsv def toLocal(collection, filename, filetype=None, selectors=None,", "'.csv': filename += '.csv' with open(filename, 'w') as thecsv: writer", ":type filename: str :param crs: a coordinate reference system in", "thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename, split_at=4000): \"\"\" Alternative", "creating it yourself casting the result of this function to", "name of the file to load :type filename: str :param", "create: # Recrusive create path path2create = assetPath # '/'.join(assetPath.split('/')[:-1])", "geoj.read() geodict = json.loads(content) else: geodict = data features =", "opt_proj=crs) else: if ty == 'Polygon': coords = utils.removeZ(coords) if", "= geojsondict['features'] for feat in features: # remove styleUrl prop", "= filename[-4:] if ext != '.csv': filename += '.csv' with", "feat.get('properties') geom = feat.get('geometry') ty = geom.get('type') coords = geom.get('coordinates')", "in fields] types = dict(zip(field_names, field_types)) features = [] projection", "properties['system:index'] = fid properties['geometry'] = geom # write row writer.writerow(properties)", "reference system in EPSG format. If not specified it will", "GeoJSON if not crs: filecrs = geodict.get('crs') if filecrs: name", "split_at) fields = list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext =", "[] # Get crs from GeoJSON if not crs: filecrs", "create path path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True)", "in features: # remove styleUrl prop = feat['properties'] if 'styleUrl'", "Get the FeatureCollection as a dict object \"\"\" size =", "description = utils.matchDescription(name) # Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId,", "ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None,", "\\ .transform(wgs84, 1) else: geometry = ee.Geometry(geom) feat = ee.Feature(geometry,", "= (assetPath.split('/')[0] == 'users') if not is_user: user = ee.batch.data.getAssetRoots()[0]['id']", "projection) \\ .transform(wgs84, 1) else: geometry = ee.Geometry(geom) feat =", ":param collection: The collection to export :type collection: ee.FeatureCollection :param", "filename reader = shapefile.Reader(filename) fields = reader.fields[1:] field_names = [field[0]", "styleUrl prop = feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl') #", "geodict = json.loads(content) else: geodict = data features = []", "split_at: int :return: A GeoJSON (.geojson) file. :rtype: file \"\"\"", "ee.FeatureCollection :param name: name of the resulting file :type name:", "assetId=assetId, description=description, **kwargs) task.start() if verbose: print('Exporting {} to {}'.format(name,", "file a CSV or geoJSON file. This uses a different", "= [field[1] for field in fields] types = dict(zip(field_names, field_types))", "list of Features from a GeoJSON file. Return a python", "assetPath) if create: # Recrusive create path path2create = assetPath", "split_at: limit to avoid an EE Exception :type split_at: int", "+ name) :type name: str :return: the tasks :rtype: ee.batch.Task", "import shapefile wgs84 = ee.Projection('EPSG:4326') # read the filename reader", "task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if verbose: print('Exporting", "name: str :param path: The path where to save the", "= utils.getProjection(filename) if not crs else crs # catch a", "as geoj: content = geoj.read() geodict = json.loads(content) else: geodict", "content_c = c.getInfo() feats_c = content_c['features'] feats = feats +", "than 1000 records and doesn't handle complex shapes. :param filename:", "downloaded. :param filename: The name of the file to be", "else crs # catch a string with format \"EPSG:XXX\" if", "attributes will be downloaded. :param filename: The name of the", "ext = filename[-4:] if ext != '.csv': filename += '.csv'", "EE Exception :type split_at: int :return: A GeoJSON (.geojson) file.", "'users') if not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user,", "Export a FeatureCollection to a GeoJSON file :param collection: The", "collections_size): c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c = content_c['features']", "\"\"\" Download a FeatureCollection to a local file a CSV", "collection.size() condition = size.gte(4999) def greater(): size = collection.size() seq", "either CSV or JSON. Defaults to CSV. :param selectors: The", "name: filename for the image (AssetID will be assetPath +", ":return: the tasks :rtype: ee.batch.Task \"\"\" # Check if the", "ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): \"\"\"", "geoj: content = geoj.read() geodict = json.loads(content) else: geodict =", "= len(records) else: end = end + 1 if (end-start)>1000:", "'w') as thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader() # write", "if (end-start)>1000: msg = \"Can't process more than 1000 records", "crs=crs) def toDict(collection, split_at=4000): \"\"\" Get the FeatureCollection as a", "the filename reader = shapefile.Reader(filename) fields = reader.fields[1:] field_names =", "filename) thefile = utils.downloadFile(url, filename, filetype, path) return thefile def", "assetPath = \"{}/{}\".format(user, assetPath) if create: # Recrusive create path", "end + 1 if (end-start)>1000: msg = \"Can't process more", "# Asset ID (Path + name) assetId = '/'.join([assetPath, name])", "If the shape is not in the same path than", "create folders and ImageCollections on the fly. The rest is", "can try creating it yourself casting the result of this", "it from the geoJSON, and if not there it will", ":param filetype: The filetype of download, either CSV or JSON.", "str :param path: The path where to save the file.", "to a GeoJSON file :param collection: The collection to export", "row writer.writerow(properties) return thecsv def toLocal(collection, filename, filetype=None, selectors=None, path=None):", "the list. You can try creating it yourself casting the", "You can pass the same params as the original function", "KML file. Return a python tuple with ee.Feature inside. This", "= 'EPSG:{}'.format(projection) # filter records with start and end start", "= filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs = [part for part", "start=None, end=None): \"\"\" Convert an ESRI file (.shp and .dbf", "can pass the same params as the original function :param", "geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs", "ty = geom.get('type') coords = geom.get('coordinates') if ty == 'GeometryCollection':", "the file. If None, will be saved in the current", "file (.shp and .dbf must be present) to a ee.FeatureCollection", "(end-start)>1000: msg = \"Can't process more than 1000 records at", "'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else:", "file \"\"\" import json import os if not path: path", "selectors that should be used to determine which attributes will", "path path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) #", "ImageCollections on the fly. The rest is the same to", "= [] projection = utils.getProjection(filename) if not crs else crs", "isinstance(projection, str): if 'EPSG:' in projection: projection = projection.split(':')[1] projection", "if not filetype: filetype = 'CSV' url = collection.getDownloadURL(filetype, selectors,", "= ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats = content['features'] for i", "asset path is_user = (assetPath.split('/')[0] == 'users') if not is_user:", "fly. The rest is the same to Export.image.toAsset. You can", "the name of the file to load :type filename: str", "writer.writeheader() # write rows for feature in features: properties =", "'Folder', True) # Asset ID (Path + name) assetId =", "coding=utf-8 import ee from . import utils import json import", "in enumerate(geodict.get('features')): properties = feat.get('properties') geom = feat.get('geometry') ty =", "= toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry') features = d['features']", "split_at=4000): \"\"\" Get the FeatureCollection as a dict object \"\"\"", "in projection: projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter", "collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col", "import os if not path: path = os.getcwd() # name", "same params as the original function :param table: the feature", "'/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset ID (Path + name)", "At the moment only works for shapes with less than", "get it from the geoJSON, and if not there it", "g in geometries: c = g['coordinates'] utils.removeZ(c) else: coords =", "table: the feature collection to upload :type table: ee.FeatureCollection :param", "on the fly. The rest is the same to Export.image.toAsset.", "csv from .. import tools def fromShapefile(filename, crs=None, start=None, end=None):", "download, either CSV or JSON. Defaults to CSV. :param selectors:", "not end: records = reader.shapeRecords() end = len(records) else: end", ":type name: str :param path: The path where to save", "ValueError(msg.format(end-start)) for i in range(start, end): # atr = dict(zip(field_names,", "or geoJSON file. This uses a different method than `toGeoJSON`", "can create folders and ImageCollections on the fly. The rest", "'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty == 'Polygon':", "toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs): \"\"\" This function can", "limit to avoid an EE Exception :type split_at: int :return:", "== 'users') if not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath =", "= collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats =", "shapefile.Reader(filename) fields = reader.fields[1:] field_names = [field[0] for field in", "write rows for feature in features: properties = feature['properties'] fid", "be present) to a ee.FeatureCollection At the moment only works", "data=None, crs=None): \"\"\" Create a list of Features from a", "else: crs = 'EPSG:4326' for n, feat in enumerate(geodict.get('features')): properties", "\"\"\" # Check if the user is specified in the", "a CSV or geoJSON file. This uses a different method", "be downloaded. :param filename: The name of the file to", "\"\"\" if filename: with open(filename, 'r') as geoj: content =", "an ESRI file (.shp and .dbf must be present) to", "for shapes with less than 1000 records and doesn't handle", "geojsondict['features'] for feat in features: # remove styleUrl prop =", "rest is the same to Export.image.toAsset. You can pass the", "Found {}\" raise ValueError(msg.format(end-start)) for i in range(start, end): #", "Return a python tuple with ee.Feature inside. This is due", "def toDict(collection, split_at=4000): \"\"\" Get the FeatureCollection as a dict", "utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features'] for feat in features:", "i in range(start, end): # atr = dict(zip(field_names, sr.record)) sr", "raise ValueError('{} not recognized'.format(name)) else: crs = 'EPSG:4326' for n,", "if not crs else crs # catch a string with", "else: continue atr[fld] = value geom = sr.shape.__geo_interface__ if projection", "# Get crs from GeoJSON if not crs: filecrs =", "= ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None, crs=None,", "GeoJSON file :param collection: The collection to export :type collection:", ":type path: str :param split_at: limit to avoid an EE", "tuple of features. \"\"\" if filename: with open(filename, 'r') as", "= 'CSV' url = collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url,", "c.getInfo() feats_c = content_c['features'] feats = feats + feats_c content['features']", "and ImageCollections on the fly. The rest is the same", "The name of the file to be downloaded \"\"\" if", "file :type name: str :param path: The path where to", ":param assetPath: path to upload the image (only PATH, without", "filename: with open(filename, 'r') as geoj: content = geoj.read() geodict", "coords = geom.get('coordinates') if ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom,", "!= '.geojson': fname = name+'.geojson' content = toDict(collection, split_at) with", "= utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs))", "from GeoJSON if not crs: filecrs = geodict.get('crs') if filecrs:", "in splitcrs if part] try: if cleancrs[-1] == 'CRS84': crs", "feat in enumerate(geodict.get('features')): properties = feat.get('properties') geom = feat.get('geometry') ty", "a list of Features from a KML file. Return a", "prop: prop.pop('styleUrl') # remove Z value if needed geom =", "ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content", "if not path: path = os.getcwd() # name if name[-8:-1]", "will rise an error :type: crs: str :return: a tuple", "filename += '.csv' with open(filename, 'w') as thecsv: writer =", ":param path: The path where to save the file. If", "crs from GeoJSON if not crs: filecrs = geodict.get('crs') if", "'.geojson': fname = name+'.geojson' content = toDict(collection, split_at) with open(os.path.join(path,", "str :param name: filename for the image (AssetID will be", "\"\"\" This function can create folders and ImageCollections on the", "feats + feats_c content['features'] = feats return content def toGeoJSON(collection,", "str :param split_at: limit to avoid an EE Exception :type", "ee.FeatureCollection At the moment only works for shapes with less", "rec in zip(field_names, sr.record): fld_type = types[fld] if fld_type ==", "start and end start = start if start else 0", "toDict(collection, split_at=4000): \"\"\" Get the FeatureCollection as a dict object", "condition = size.gte(4999) def greater(): size = collection.size() seq =", "read the filename reader = shapefile.Reader(filename) fields = reader.fields[1:] field_names", "to upload :type table: ee.FeatureCollection :param assetPath: path to upload", "file. :rtype: file \"\"\" import json import os if not", "content = toDict(collection, split_at) with open(os.path.join(path, fname), 'w') as thefile:", "ee.Geometry(geom, projection) \\ .transform(wgs84, 1) else: geometry = ee.Geometry(geom) feat", "filename: the name of the file to load :type filename:", "geometry = ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features)", "= collection.size() condition = size.gte(4999) def greater(): size = collection.size()", "CSV. :param selectors: The selectors that should be used to", "'N', 'F']: value = rec else: continue atr[fld] = value", "def fromKML(filename=None, data=None, crs=None, encoding=None): \"\"\" Create a list of", ":return: a tuple of features. \"\"\" if filename: with open(filename,", "enumerate(geodict.get('features')): properties = feat.get('properties') geom = feat.get('geometry') ty = geom.get('type')", "except IndexError: raise ValueError('{} not recognized'.format(name)) else: crs = 'EPSG:4326'", "value if needed geom = feat['geometry'] ty = geom['type'] if", "fname = name+'.geojson' content = toDict(collection, split_at) with open(os.path.join(path, fname),", "as thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader() # write rows", "geoJSON file. This uses a different method than `toGeoJSON` and", "content = geoj.read() geodict = json.loads(content) else: geodict = data", "= ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections = ee.List(", "= ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if verbose: print('Exporting {}", "from . import utils import json import csv from ..", "= collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url, filename, filetype, path)", "the image (only PATH, without filename) :type assetPath: str :param", "= sr.shape.__geo_interface__ if projection is not None: geometry = ee.Geometry(geom,", "toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext", ":rtype: ee.batch.Task \"\"\" # Check if the user is specified", "from a GeoJSON file. Return a python tuple with ee.Feature", "to save the file. If None, will be saved in", "n = ee.List(n) ini = ee.Number(n.get(0)) end = ee.Number(n.get(1)) return", "not in the same path than the script, specify a", "a python tuple with ee.Feature inside. This is due to", "\"EPSG:XXX\" if isinstance(projection, str): if 'EPSG:' in projection: projection =", "tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None): \"\"\" Create a list", "= json.loads(content) else: geodict = data features = [] #", "as a FeatureCollection argument. :param filename: the name of the", "ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0))", "None, will be saved in the current folder :type path:", "= '/'.join([assetPath, name]) # Description description = utils.matchDescription(name) # Init", ":param start: :return: the FeatureCollection :rtype: ee.FeatureCollection \"\"\" import shapefile", "current folder :type path: str :param split_at: limit to avoid", "'/'.join([assetPath, name]) # Description description = utils.matchDescription(name) # Init task", "filetype=None, selectors=None, path=None): \"\"\" Download a FeatureCollection to a local", "to get it from the geoJSON, and if not there", "not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user, assetPath) if", "ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty == 'Polygon': coords", "Features from a KML file. Return a python tuple with", "ty = geom['type'] if ty == 'GeometryCollection': geometries = geom['geometries']", "ee.List(n) ini = ee.Number(n.get(0)) end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end))", "= collection.size() seq = tools.ee_list.sequence(0, size, split_at) limits = ee.List.zip(seq.slice(1),", "A GeoJSON (.geojson) file. :rtype: file \"\"\" import json import", "from the geoJSON, and if not there it will rise", "= ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user, assetPath) if create: # Recrusive", "of the filename. If the shape is not in the", "= ee.Geometry(geom, projection) \\ .transform(wgs84, 1) else: geometry = ee.Geometry(geom)", "a GeoJSON file :param collection: The collection to export :type", "url = collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url, filename, filetype,", "image (only PATH, without filename) :type assetPath: str :param name:", "fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): \"\"\" Get the FeatureCollection as", "feats = feats + feats_c content['features'] = feats return content", "= types[fld] if fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif", "the current folder :type path: str :param split_at: limit to", "Download a FeatureCollection to a local file a CSV or", "object \"\"\" size = collection.size() condition = size.gte(4999) def greater():", "return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition, greater(),", "fld, rec in zip(field_names, sr.record): fld_type = types[fld] if fld_type", "download a FeatureCollection as a CSV \"\"\" d = toDict(collection,", ":param filename: The name of the file to be downloaded", "['C', 'N', 'F']: value = rec else: continue atr[fld] =", "not there it will rise an error :type: crs: str", "geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): \"\"\" Get", "\"\"\" geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features'] for", "collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats", "path where to save the file. If None, will be", "+ feats_c content['features'] = feats return content def toGeoJSON(collection, name,", "= feat.get('properties') geom = feat.get('geometry') ty = geom.get('type') coords =", "= geom.get('type') coords = geom.get('coordinates') if ty == 'GeometryCollection': ee_geom", "seq) def over_limits(n): n = ee.List(n) ini = ee.Number(n.get(0)) end", "# coding=utf-8 import ee from . import utils import json", "features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): \"\"\" Create a", "feats_c = content_c['features'] feats = feats + feats_c content['features'] =", "moment only works for shapes with less than 1000 records", "data=None, crs=None, encoding=None): \"\"\" Create a list of Features from", "save the file. If None, will be saved in the", "# '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset ID (Path +", "= start if start else 0 if not end: records", "= geoj.read() geodict = json.loads(content) else: geodict = data features", "filename for the image (AssetID will be assetPath + name)", "# filter records with start and end start = start", "with format \"EPSG:XXX\" if isinstance(projection, str): if 'EPSG:' in projection:", "proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None,", "in range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c", "This uses a different method than `toGeoJSON` and `toCSV` :param", "# remove styleUrl prop = feat['properties'] if 'styleUrl' in prop:", "geometries: c = g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords)", "will be saved in the current folder :type path: str", "used to determine which attributes will be downloaded. :param filename:", "open(filename, 'r') as geoj: content = geoj.read() geodict = json.loads(content)", "ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if verbose: print('Exporting {} to", "projection = utils.getProjection(filename) if not crs else crs # catch", "def greater(): size = collection.size() seq = tools.ee_list.sequence(0, size, split_at)", "= csv.DictWriter(thecsv, fields) writer.writeheader() # write rows for feature in", "ee.Number(n.get(0)) end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections", "types[fld] if fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type", ":param filename: the name of the filename. If the shape", "`toGeoJSON` and `toCSV` :param filetype: The filetype of download, either", "a list of Features from a GeoJSON file. Return a", "filetype of download, either CSV or JSON. Defaults to CSV.", "JSON. Defaults to CSV. :param selectors: The selectors that should", "geom # write row writer.writerow(properties) return thecsv def toLocal(collection, filename,", ":param filename: the name of the file to load :type", "ERROR) out of the list. You can try creating it", "result of this function to a ee.List or using it", "remove Z value if needed geom = feat['geometry'] ty =", "in prop: prop.pop('styleUrl') # remove Z value if needed geom", "in features: properties = feature['properties'] fid = feature['id'] geom =", "file :param collection: The collection to export :type collection: ee.FeatureCollection", "name=None, create=True, verbose=False, **kwargs): \"\"\" This function can create folders", "must be present) to a ee.FeatureCollection At the moment only", "utils.matchDescription(name) # Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs)", "str): if 'EPSG:' in projection: projection = projection.split(':')[1] projection =", "directly as a FeatureCollection argument. :param filename: the name of", "= ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def", "or using it directly as a FeatureCollection argument. :param filename:", "# catch a string with format \"EPSG:XXX\" if isinstance(projection, str):", "it will try to get it from the geoJSON, and", "to a ee.FeatureCollection At the moment only works for shapes", "try creating it yourself casting the result of this function", "not recognized'.format(name)) except IndexError: raise ValueError('{} not recognized'.format(name)) else: crs", "feature['properties'] fid = feature['id'] geom = feature['geometry']['type'] # match fields", "start if start else 0 if not end: records =", "end = end + 1 if (end-start)>1000: msg = \"Can't", "ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats = content['features'] for i in", "coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat)", "of the file to be downloaded \"\"\" if not filetype:", "crs = 'EPSG:4326' for n, feat in enumerate(geodict.get('features')): properties =", "upload :type table: ee.FeatureCollection :param assetPath: path to upload the", ":rtype: ee.FeatureCollection \"\"\" import shapefile wgs84 = ee.Projection('EPSG:4326') # read", "Recrusive create path path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder',", "be used to determine which attributes will be downloaded. :param", "field_types = [field[1] for field in fields] types = dict(zip(field_names,", "an error :type: crs: str :return: a tuple of features.", "it will rise an error :type: crs: str :return: a", "user is specified in the asset path is_user = (assetPath.split('/')[0]", "return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection, split_at=4000): \"\"\" Get the FeatureCollection", "or JSON. Defaults to CSV. :param selectors: The selectors that", "You can try creating it yourself casting the result of", "[field[0] for field in fields] field_types = [field[1] for field", "greater(), ee.List([collection]))) collections_size = collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content =", "continue atr[fld] = value geom = sr.shape.__geo_interface__ if projection is", "the fly. The rest is the same to Export.image.toAsset. You", "(AssetID will be assetPath + name) :type name: str :return:", "start: :return: the FeatureCollection :rtype: ee.FeatureCollection \"\"\" import shapefile wgs84", "filename[-4:] if ext != '.csv': filename += '.csv' with open(filename,", "crs=None, start=None, end=None): \"\"\" Convert an ESRI file (.shp and", "Create a list of Features from a GeoJSON file. Return", "a FeatureCollection argument. :param filename: the name of the file", "import json import os if not path: path = os.getcwd()", "the geoJSON, and if not there it will rise an", "zip(field_names, sr.record): fld_type = types[fld] if fld_type == 'D': value", "the original function :param table: the feature collection to upload", "name: str :return: the tasks :rtype: ee.batch.Task \"\"\" # Check", "as a CSV \"\"\" d = toDict(collection, split_at) fields =", "\"\"\" d = toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry') features", "the image (AssetID will be assetPath + name) :type name:", "= content['features'] for i in range(0, collections_size): c = ee.FeatureCollection(collections.get(i))", "atr = dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr = {}", "\"\"\" if not filetype: filetype = 'CSV' url = collection.getDownloadURL(filetype,", "for feat in features: # remove styleUrl prop = feat['properties']", "Get crs from GeoJSON if not crs: filecrs = geodict.get('crs')", "reader.shapeRecord(i) atr = {} for fld, rec in zip(field_names, sr.record):", "doesn't handle complex shapes. :param filename: the name of the", "not path: path = os.getcwd() # name if name[-8:-1] !=", "'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom =", "collection: ee.FeatureCollection :param name: name of the resulting file :type", "for field in fields] types = dict(zip(field_names, field_types)) features =", "the filename. If the shape is not in the same", "to upload the image (only PATH, without filename) :type assetPath:", "encoding=None): \"\"\" Create a list of Features from a KML", "= ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c = content_c['features'] feats =", "file to load :type filename: str :param crs: a coordinate", "# write rows for feature in features: properties = feature['properties']", "if ty == 'Polygon': coords = utils.removeZ(coords) if utils.hasZ(coords) else", "dict(zip(field_names, field_types)) features = [] projection = utils.getProjection(filename) if not", "+ 1 if (end-start)>1000: msg = \"Can't process more than", "of the file to load :type filename: str :param crs:", "fields properties['system:index'] = fid properties['geometry'] = geom # write row", "properties = feat.get('properties') geom = feat.get('geometry') ty = geom.get('type') coords", "= os.getcwd() # name if name[-8:-1] != '.geojson': fname =", "ee.FeatureCollection \"\"\" import shapefile wgs84 = ee.Projection('EPSG:4326') # read the", "FeatureCollection as a dict object \"\"\" size = collection.size() condition", "not filetype: filetype = 'CSV' url = collection.getDownloadURL(filetype, selectors, filename)", "'r') as geoj: content = geoj.read() geodict = json.loads(content) else:", "if create: # Recrusive create path path2create = assetPath #", "sr = reader.shapeRecord(i) atr = {} for fld, rec in", "elif fld_type in ['C', 'N', 'F']: value = rec else:", "= ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None):", "geodict = data features = [] # Get crs from", "\"\"\" import json import os if not path: path =", "CSV or geoJSON file. This uses a different method than", "specified in the asset path is_user = (assetPath.split('/')[0] == 'users')", "[part for part in splitcrs if part] try: if cleancrs[-1]", "as a dict object \"\"\" size = collection.size() condition =", "\"\"\" Create a list of Features from a KML file.", "sr.record)) sr = reader.shapeRecord(i) atr = {} for fld, rec", "to Export.image.toAsset. You can pass the same params as the", "and .dbf must be present) to a ee.FeatureCollection At the", "utils import json import csv from .. import tools def", "script, specify a path instead. :type filename: str :param start:", "table: ee.FeatureCollection :param assetPath: path to upload the image (only", "fields] types = dict(zip(field_names, field_types)) features = [] projection =", "fields = list(d['columns'].keys()) fields.append('geometry') features = d['features'] ext = filename[-4:]", "if projection is not None: geometry = ee.Geometry(geom, projection) \\", "dict object \"\"\" size = collection.size() condition = size.gte(4999) def", "content_c['features'] feats = feats + feats_c content['features'] = feats return", "return content def toGeoJSON(collection, name, path=None, split_at=4000): \"\"\" Export a", "in ['C', 'N', 'F']: value = rec else: continue atr[fld]", "= geom['type'] if ty == 'GeometryCollection': geometries = geom['geometries'] for", "the tasks :rtype: ee.batch.Task \"\"\" # Check if the user", "if ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if", "GeoJSON file. Return a python tuple with ee.Feature inside. This", "ee.FeatureCollection(features) def fromGeoJSON(filename=None, data=None, crs=None): \"\"\" Create a list of", "filename: The name of the file to be downloaded \"\"\"", "Export.image.toAsset. You can pass the same params as the original", "features = geojsondict['features'] for feat in features: # remove styleUrl", "task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if verbose:", "Z value if needed geom = feat['geometry'] ty = geom['type']", "features = [] projection = utils.getProjection(filename) if not crs else", "end = len(records) else: end = end + 1 if", "range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c =", "else: coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs) def toDict(collection,", "= [] # Get crs from GeoJSON if not crs:", "+ name) assetId = '/'.join([assetPath, name]) # Description description =", "an EE Exception :type split_at: int :return: A GeoJSON (.geojson)", "tools def fromShapefile(filename, crs=None, start=None, end=None): \"\"\" Convert an ESRI", "argument. :param filename: the name of the file to load", "the resulting file :type name: str :param path: The path", "ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties) features.append(ee_feat) return", "= utils.downloadFile(url, filename, filetype, path) return thefile def toAsset(table, assetPath,", "folders and ImageCollections on the fly. The rest is the", "if part] try: if cleancrs[-1] == 'CRS84': crs = 'EPSG:4326'", "features: # remove styleUrl prop = feat['properties'] if 'styleUrl' in", "\"\"\" size = collection.size() condition = size.gte(4999) def greater(): size", "end start = start if start else 0 if not", "1 if (end-start)>1000: msg = \"Can't process more than 1000", "int :return: A GeoJSON (.geojson) file. :rtype: file \"\"\" import", "as thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename, split_at=4000): \"\"\"", "description=description, **kwargs) task.start() if verbose: print('Exporting {} to {}'.format(name, assetPath))", "CSV \"\"\" d = toDict(collection, split_at) fields = list(d['columns'].keys()) fields.append('geometry')", "method than `toGeoJSON` and `toCSV` :param filetype: The filetype of", "ValueError('{} not recognized'.format(name)) except IndexError: raise ValueError('{} not recognized'.format(name)) else:", "geometries = geom['geometries'] for g in geometries: c = g['coordinates']", "feat['geometry'] ty = geom['type'] if ty == 'GeometryCollection': geometries =", "filetype: filetype = 'CSV' url = collection.getDownloadURL(filetype, selectors, filename) thefile", "function can create folders and ImageCollections on the fly. The", "for fld, rec in zip(field_names, sr.record): fld_type = types[fld] if", "file. This uses a different method than `toGeoJSON` and `toCSV`", "else: raise ValueError('{} not recognized'.format(name)) except IndexError: raise ValueError('{} not", "there it will rise an error :type: crs: str :return:", "params as the original function :param table: the feature collection", "name of the filename. If the shape is not in", "filecrs = geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name') splitcrs =", "Check if the user is specified in the asset path", "catch a string with format \"EPSG:XXX\" if isinstance(projection, str): if", "path is_user = (assetPath.split('/')[0] == 'users') if not is_user: user", "g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict, crs=crs)", "= ee.Projection('EPSG:4326') # read the filename reader = shapefile.Reader(filename) fields", "import json import csv from .. import tools def fromShapefile(filename,", "= reader.fields[1:] field_names = [field[0] for field in fields] field_types", "filecrs: name = filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs = [part", "c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c = content_c['features'] feats", ":type collection: ee.FeatureCollection :param name: name of the resulting file", "works for shapes with less than 1000 records and doesn't", "This function can create folders and ImageCollections on the fly.", "Convert an ESRI file (.shp and .dbf must be present)", ":type: crs: str :return: a tuple of features. \"\"\" if", "= toDict(collection, split_at) with open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content))", "PATH, without filename) :type assetPath: str :param name: filename for", "wgs84 = ee.Projection('EPSG:4326') # read the filename reader = shapefile.Reader(filename)", "time. Found {}\" raise ValueError(msg.format(end-start)) for i in range(start, end):", "json.loads(content) else: geodict = data features = [] # Get", "folder :type path: str :param split_at: limit to avoid an", "coordinate reference system in EPSG format. If not specified it", "match fields properties['system:index'] = fid properties['geometry'] = geom # write", "format. If not specified it will try to get it", "rows for feature in features: properties = feature['properties'] fid =", "import utils import json import csv from .. import tools", "attempting to create a FeatureCollection (Broken Pipe ERROR) out of", "if not crs: filecrs = geodict.get('crs') if filecrs: name =", "json import os if not path: path = os.getcwd() #", "+= '.csv' with open(filename, 'w') as thecsv: writer = csv.DictWriter(thecsv,", "try: if cleancrs[-1] == 'CRS84': crs = 'EPSG:4326' elif cleancrs[-2]", "ee.List or using it directly as a FeatureCollection argument. :param", "filetype = 'CSV' url = collection.getDownloadURL(filetype, selectors, filename) thefile =", "ee.FeatureCollection :param assetPath: path to upload the image (only PATH,", "1000 records and doesn't handle complex shapes. :param filename: the", "a FeatureCollection to a local file a CSV or geoJSON", "ESRI file (.shp and .dbf must be present) to a", "the same to Export.image.toAsset. You can pass the same params", "def over_limits(n): n = ee.List(n) ini = ee.Number(n.get(0)) end =", "if start else 0 if not end: records = reader.shapeRecords()", "Alternative to download a FeatureCollection as a CSV \"\"\" d", "in fields] field_types = [field[1] for field in fields] types", "split_at=4000): \"\"\" Alternative to download a FeatureCollection as a CSV", "feature in features: properties = feature['properties'] fid = feature['id'] geom", "(.geojson) file. :rtype: file \"\"\" import json import os if", "feats = content['features'] for i in range(0, collections_size): c =", "assetPath: path to upload the image (only PATH, without filename)", "(Path + name) assetId = '/'.join([assetPath, name]) # Description description", "return thefile def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs): \"\"\"", "a different method than `toGeoJSON` and `toCSV` :param filetype: The", "if utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat =", "records at a time. Found {}\" raise ValueError(msg.format(end-start)) for i", ":param name: name of the resulting file :type name: str", "= rec else: continue atr[fld] = value geom = sr.shape.__geo_interface__", "str :return: a tuple of features. \"\"\" if filename: with", "a GeoJSON file. Return a python tuple with ee.Feature inside.", "with ee.Feature inside. This is due to failing when attempting", "shape is not in the same path than the script,", "= geom['geometries'] for g in geometries: c = g['coordinates'] utils.removeZ(c)", "out of the list. You can try creating it yourself", "= content_c['features'] feats = feats + feats_c content['features'] = feats", "export :type collection: ee.FeatureCollection :param name: name of the resulting", "for g in geometries: c = g['coordinates'] utils.removeZ(c) else: coords", "features. \"\"\" if filename: with open(filename, 'r') as geoj: content", "= feats return content def toGeoJSON(collection, name, path=None, split_at=4000): \"\"\"", "= value geom = sr.shape.__geo_interface__ if projection is not None:", "True) # Asset ID (Path + name) assetId = '/'.join([assetPath,", "else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom, properties)", "assetPath: str :param name: filename for the image (AssetID will", "downloaded \"\"\" if not filetype: filetype = 'CSV' url =", "else: geometry = ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat) return", "1) else: geometry = ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat)", "'F']: value = rec else: continue atr[fld] = value geom", "is due to failing when attempting to create a FeatureCollection", "end: records = reader.shapeRecords() end = len(records) else: end =", "ee.Projection('EPSG:4326') # read the filename reader = shapefile.Reader(filename) fields =", "feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl') # remove Z value", ":type split_at: int :return: A GeoJSON (.geojson) file. :rtype: file", "user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user, assetPath) if create: #", "a FeatureCollection (Broken Pipe ERROR) out of the list. You", "toLocal(collection, filename, filetype=None, selectors=None, path=None): \"\"\" Download a FeatureCollection to", "!= '.csv': filename += '.csv' with open(filename, 'w') as thecsv:", ":param table: the feature collection to upload :type table: ee.FeatureCollection", "handle complex shapes. :param filename: the name of the filename.", "feats_c content['features'] = feats return content def toGeoJSON(collection, name, path=None,", "it directly as a FeatureCollection argument. :param filename: the name", "reader = shapefile.Reader(filename) fields = reader.fields[1:] field_names = [field[0] for", "if needed geom = feat['geometry'] ty = geom['type'] if ty", "be saved in the current folder :type path: str :param", "Description description = utils.matchDescription(name) # Init task task = ee.batch.Export.table.toAsset(table,", "system in EPSG format. If not specified it will try", "toGeoJSON(collection, name, path=None, split_at=4000): \"\"\" Export a FeatureCollection to a", "the same params as the original function :param table: the", "where to save the file. If None, will be saved", "the feature collection to upload :type table: ee.FeatureCollection :param assetPath:", "writer = csv.DictWriter(thecsv, fields) writer.writeheader() # write rows for feature", "collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url, filename, filetype, path) return", "is the same to Export.image.toAsset. You can pass the same", "projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter records with", "feature['id'] geom = feature['geometry']['type'] # match fields properties['system:index'] = fid", "len(records) else: end = end + 1 if (end-start)>1000: msg", "= assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset ID", "ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection])))", "if ext != '.csv': filename += '.csv' with open(filename, 'w')", "if not is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user, assetPath)", ":return: a tuple of features. \"\"\" geojsondict = utils.kmlToGeoJsonDict(filename, data,", "geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features'] for feat", "Asset ID (Path + name) assetId = '/'.join([assetPath, name]) #", "start = start if start else 0 if not end:", "\"\"\" Export a FeatureCollection to a GeoJSON file :param collection:", "name: name of the resulting file :type name: str :param", "for i in range(start, end): # atr = dict(zip(field_names, sr.record))", "field_types)) features = [] projection = utils.getProjection(filename) if not crs", "EPSG format. If not specified it will try to get", "a CSV \"\"\" d = toDict(collection, split_at) fields = list(d['columns'].keys())", ":param crs: a coordinate reference system in EPSG format. If", "assetPath, name=None, create=True, verbose=False, **kwargs): \"\"\" This function can create", "'CRS84': crs = 'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs =", "name of the resulting file :type name: str :param path:", "= shapefile.Reader(filename) fields = reader.fields[1:] field_names = [field[0] for field", "= utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features'] for feat in", "feat in features: # remove styleUrl prop = feat['properties'] if", "ty == 'GeometryCollection': geometries = geom['geometries'] for g in geometries:", "size, split_at) limits = ee.List.zip(seq.slice(1), seq) def over_limits(n): n =", "d['features'] ext = filename[-4:] if ext != '.csv': filename +=", "properties['geometry'] = geom # write row writer.writerow(properties) return thecsv def", "toCSV(collection, filename, split_at=4000): \"\"\" Alternative to download a FeatureCollection as", "reader.fields[1:] field_names = [field[0] for field in fields] field_types =", "this function to a ee.List or using it directly as", "dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr = {} for fld,", ":return: the FeatureCollection :rtype: ee.FeatureCollection \"\"\" import shapefile wgs84 =", "path: The path where to save the file. If None,", "FeatureCollection (Broken Pipe ERROR) out of the list. You can", "and `toCSV` :param filetype: The filetype of download, either CSV", "if not there it will rise an error :type: crs:", "is_user = (assetPath.split('/')[0] == 'users') if not is_user: user =", "the FeatureCollection :rtype: ee.FeatureCollection \"\"\" import shapefile wgs84 = ee.Projection('EPSG:4326')", "for field in fields] field_types = [field[1] for field in", "at a time. Found {}\" raise ValueError(msg.format(end-start)) for i in", "import tools def fromShapefile(filename, crs=None, start=None, end=None): \"\"\" Convert an", "crs: str :return: a tuple of features. \"\"\" geojsondict =", "FeatureCollection argument. :param filename: the name of the file to", "value = rec else: continue atr[fld] = value geom =", "== 'GeometryCollection': geometries = geom['geometries'] for g in geometries: c", "thefile = utils.downloadFile(url, filename, filetype, path) return thefile def toAsset(table,", "end)) return limits.map(over_limits) collections = ee.List( ee.Algorithms.If(condition, greater(), ee.List([collection]))) collections_size", "with open(filename, 'w') as thecsv: writer = csv.DictWriter(thecsv, fields) writer.writeheader()", "part in splitcrs if part] try: if cleancrs[-1] == 'CRS84':", "and end start = start if start else 0 if", "with open(filename, 'r') as geoj: content = geoj.read() geodict =", "with start and end start = start if start else", "\"\"\" Create a list of Features from a GeoJSON file.", "part] try: if cleancrs[-1] == 'CRS84': crs = 'EPSG:4326' elif", "= tools.ee_list.sequence(0, size, split_at) limits = ee.List.zip(seq.slice(1), seq) def over_limits(n):", "= projection.split(':')[1] projection = 'EPSG:{}'.format(projection) # filter records with start", "prop.pop('styleUrl') # remove Z value if needed geom = feat['geometry']", "properties) features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None): \"\"\"", "a ee.List or using it directly as a FeatureCollection argument.", "resulting file :type name: str :param path: The path where", "= \"{}/{}\".format(user, assetPath) if create: # Recrusive create path path2create", "= utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty == 'Polygon': coords =", "to export :type collection: ee.FeatureCollection :param name: name of the", "utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat = ee.feature.Feature(ee_geom,", "path to upload the image (only PATH, without filename) :type", "tasks :rtype: ee.batch.Task \"\"\" # Check if the user is", "ee.batch.Task \"\"\" # Check if the user is specified in", "filetype: The filetype of download, either CSV or JSON. Defaults", "than `toGeoJSON` and `toCSV` :param filetype: The filetype of download,", "utils.getProjection(filename) if not crs else crs # catch a string", "is_user: user = ee.batch.data.getAssetRoots()[0]['id'] assetPath = \"{}/{}\".format(user, assetPath) if create:", "same path than the script, specify a path instead. :type", "utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty == 'Polygon': coords = utils.removeZ(coords)", "features.append(ee_feat) return tuple(features) def fromKML(filename=None, data=None, crs=None, encoding=None): \"\"\" Create", "path than the script, specify a path instead. :type filename:", "same to Export.image.toAsset. You can pass the same params as", "the same path than the script, specify a path instead.", "crs = 'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2],", "recognized'.format(name)) except IndexError: raise ValueError('{} not recognized'.format(name)) else: crs =", "= col.getInfo() feats = content['features'] for i in range(0, collections_size):", "a path instead. :type filename: str :param start: :return: the", "The rest is the same to Export.image.toAsset. You can pass", "more than 1000 records at a time. Found {}\" raise", "atr = {} for fld, rec in zip(field_names, sr.record): fld_type", "if 'styleUrl' in prop: prop.pop('styleUrl') # remove Z value if", "from .. import tools def fromShapefile(filename, crs=None, start=None, end=None): \"\"\"", "name+'.geojson' content = toDict(collection, split_at) with open(os.path.join(path, fname), 'w') as", "Features from a GeoJSON file. Return a python tuple with", "filename: str :param crs: a coordinate reference system in EPSG", "rec else: continue atr[fld] = value geom = sr.shape.__geo_interface__ if", "without filename) :type assetPath: str :param name: filename for the", "= [part for part in splitcrs if part] try: if", "in zip(field_names, sr.record): fld_type = types[fld] if fld_type == 'D':", "**kwargs) task.start() if verbose: print('Exporting {} to {}'.format(name, assetPath)) return", "load :type filename: str :param crs: a coordinate reference system", "FeatureCollection to a local file a CSV or geoJSON file.", "which attributes will be downloaded. :param filename: The name of", "present) to a ee.FeatureCollection At the moment only works for", "assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset ID (Path", "else: end = end + 1 if (end-start)>1000: msg =", "= ee.List.zip(seq.slice(1), seq) def over_limits(n): n = ee.List(n) ini =", "crs # catch a string with format \"EPSG:XXX\" if isinstance(projection,", "over_limits(n): n = ee.List(n) ini = ee.Number(n.get(0)) end = ee.Number(n.get(1))", "data, encoding) features = geojsondict['features'] for feat in features: #", "fname), 'w') as thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename,", "= g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates'] utils.removeZ(coords) return fromGeoJSON(data=geojsondict,", "path) return thefile def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs):", "if fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in", "thefile def toCSV(collection, filename, split_at=4000): \"\"\" Alternative to download a", "IndexError: raise ValueError('{} not recognized'.format(name)) else: crs = 'EPSG:4326' for", "import ee from . import utils import json import csv", "complex shapes. :param filename: the name of the filename. If", "'EPSG:4326' for n, feat in enumerate(geodict.get('features')): properties = feat.get('properties') geom", "of features. \"\"\" geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features =", "ee.List.zip(seq.slice(1), seq) def over_limits(n): n = ee.List(n) ini = ee.Number(n.get(0))", "the file to be downloaded \"\"\" if not filetype: filetype", "task.start() if verbose: print('Exporting {} to {}'.format(name, assetPath)) return task", "filetype, path) return thefile def toAsset(table, assetPath, name=None, create=True, verbose=False,", "to create a FeatureCollection (Broken Pipe ERROR) out of the", "function :param table: the feature collection to upload :type table:", "geom.get('coordinates') if ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else:", "to download a FeatureCollection as a CSV \"\"\" d =", "(Broken Pipe ERROR) out of the list. You can try", "open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content)) return thefile def toCSV(collection,", "should be used to determine which attributes will be downloaded.", "records with start and end start = start if start", "1000 records at a time. Found {}\" raise ValueError(msg.format(end-start)) for", "seq = tools.ee_list.sequence(0, size, split_at) limits = ee.List.zip(seq.slice(1), seq) def", "Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description, **kwargs) task.start() if", "= geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name') splitcrs = name.split(':')", "= name+'.geojson' content = toDict(collection, split_at) with open(os.path.join(path, fname), 'w')", "will try to get it from the geoJSON, and if", "selectors: The selectors that should be used to determine which", "geoJSON, and if not there it will rise an error", "ee.FeatureCollection(collections.get(i)) content_c = c.getInfo() feats_c = content_c['features'] feats = feats", ".transform(wgs84, 1) else: geometry = ee.Geometry(geom) feat = ee.Feature(geometry, atr)", "os if not path: path = os.getcwd() # name if", ":type assetPath: str :param name: filename for the image (AssetID", "# Check if the user is specified in the asset", "= reader.shapeRecords() end = len(records) else: end = end +", "error :type: crs: str :return: a tuple of features. \"\"\"", ":type: crs: str :return: a tuple of features. \"\"\" geojsondict", "will be downloaded. :param filename: The name of the file", "a ee.FeatureCollection At the moment only works for shapes with", "ee.Geometry(geom) feat = ee.Feature(geometry, atr) features.append(feat) return ee.FeatureCollection(features) def fromGeoJSON(filename=None,", "yourself casting the result of this function to a ee.List", "shapes with less than 1000 records and doesn't handle complex", "= data features = [] # Get crs from GeoJSON", "features. \"\"\" geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding) features = geojsondict['features']", ":type table: ee.FeatureCollection :param assetPath: path to upload the image", "will be assetPath + name) :type name: str :return: the", "path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create], 'Folder', True) # Asset", "crs: filecrs = geodict.get('crs') if filecrs: name = filecrs.get('properties').get('name') splitcrs", "data features = [] # Get crs from GeoJSON if", ":param name: filename for the image (AssetID will be assetPath", "\"\"\" import shapefile wgs84 = ee.Projection('EPSG:4326') # read the filename", "feature collection to upload :type table: ee.FeatureCollection :param assetPath: path", "tools.ee_list.sequence(0, size, split_at) limits = ee.List.zip(seq.slice(1), seq) def over_limits(n): n", "for n, feat in enumerate(geodict.get('features')): properties = feat.get('properties') geom =", "geom['type'] if ty == 'GeometryCollection': geometries = geom['geometries'] for g", "def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs): \"\"\" This function", "# Recrusive create path path2create = assetPath # '/'.join(assetPath.split('/')[:-1]) utils.createAssets([path2create],", "tuple with ee.Feature inside. This is due to failing when", "for i in range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c =", "if the user is specified in the asset path is_user", "specified it will try to get it from the geoJSON,", ".. import tools def fromShapefile(filename, crs=None, start=None, end=None): \"\"\" Convert", "end=None): \"\"\" Convert an ESRI file (.shp and .dbf must", "str :return: the tasks :rtype: ee.batch.Task \"\"\" # Check if", "crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{} not recognized'.format(name)) except", "= utils.matchDescription(name) # Init task task = ee.batch.Export.table.toAsset(table, assetId=assetId, description=description,", "specify a path instead. :type filename: str :param start: :return:", "types = dict(zip(field_names, field_types)) features = [] projection = utils.getProjection(filename)", "and if not there it will rise an error :type:", "determine which attributes will be downloaded. :param filename: The name", "filename, split_at=4000): \"\"\" Alternative to download a FeatureCollection as a", "splitcrs if part] try: if cleancrs[-1] == 'CRS84': crs =", "= size.gte(4999) def greater(): size = collection.size() seq = tools.ee_list.sequence(0,", "The selectors that should be used to determine which attributes", "def fromGeoJSON(filename=None, data=None, crs=None): \"\"\" Create a list of Features", "ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs) else: if ty", "is not in the same path than the script, specify", "the moment only works for shapes with less than 1000", "path instead. :type filename: str :param start: :return: the FeatureCollection", "geom.get('type') coords = geom.get('coordinates') if ty == 'GeometryCollection': ee_geom =", "= ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N', 'F']: value =", "image (AssetID will be assetPath + name) :type name: str", "than 1000 records at a time. Found {}\" raise ValueError(msg.format(end-start))", "with open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content)) return thefile def", "crs=None): \"\"\" Create a list of Features from a GeoJSON", "prop = feat['properties'] if 'styleUrl' in prop: prop.pop('styleUrl') # remove", ". import utils import json import csv from .. import", "not recognized'.format(name)) else: crs = 'EPSG:4326' for n, feat in", "geometry = ee.Geometry(geom, projection) \\ .transform(wgs84, 1) else: geometry =", "casting the result of this function to a ee.List or", "fromKML(filename=None, data=None, crs=None, encoding=None): \"\"\" Create a list of Features", "The collection to export :type collection: ee.FeatureCollection :param name: name", "os.getcwd() # name if name[-8:-1] != '.geojson': fname = name+'.geojson'", "# match fields properties['system:index'] = fid properties['geometry'] = geom #", "path=None, split_at=4000): \"\"\" Export a FeatureCollection to a GeoJSON file", "the user is specified in the asset path is_user =", "= geom.get('coordinates') if ty == 'GeometryCollection': ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs)", "try to get it from the geoJSON, and if not", "\"\"\" Convert an ESRI file (.shp and .dbf must be", "collection: The collection to export :type collection: ee.FeatureCollection :param name:", "col.getInfo() feats = content['features'] for i in range(0, collections_size): c", "a FeatureCollection to a GeoJSON file :param collection: The collection", "ini = ee.Number(n.get(0)) end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return", "python tuple with ee.Feature inside. This is due to failing", "csv.DictWriter(thecsv, fields) writer.writeheader() # write rows for feature in features:", "list of Features from a KML file. Return a python", "in range(start, end): # atr = dict(zip(field_names, sr.record)) sr =", "a KML file. Return a python tuple with ee.Feature inside.", "'EPSG:{}'.format(projection) # filter records with start and end start =", "utils.downloadFile(url, filename, filetype, path) return thefile def toAsset(table, assetPath, name=None,", "if filename: with open(filename, 'r') as geoj: content = geoj.read()", "filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs = [part for part in", "be assetPath + name) :type name: str :return: the tasks", "a local file a CSV or geoJSON file. This uses", "field in fields] field_types = [field[1] for field in fields]", "split_at=4000): \"\"\" Export a FeatureCollection to a GeoJSON file :param", "verbose=False, **kwargs): \"\"\" This function can create folders and ImageCollections", "def fromShapefile(filename, crs=None, start=None, end=None): \"\"\" Convert an ESRI file", "encoding) features = geojsondict['features'] for feat in features: # remove", "msg = \"Can't process more than 1000 records at a", "collections.size().getInfo() col = ee.FeatureCollection(collections.get(0)) content = col.getInfo() feats = content['features']", "import csv from .. import tools def fromShapefile(filename, crs=None, start=None,", "name if name[-8:-1] != '.geojson': fname = name+'.geojson' content =", "content['features'] for i in range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c", "instead. :type filename: str :param start: :return: the FeatureCollection :rtype:", "if filecrs: name = filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs =", "= reader.shapeRecord(i) atr = {} for fld, rec in zip(field_names,", "= end + 1 if (end-start)>1000: msg = \"Can't process", "(.shp and .dbf must be present) to a ee.FeatureCollection At", "field_names = [field[0] for field in fields] field_types = [field[1]", "inside. This is due to failing when attempting to create", "function to a ee.List or using it directly as a", "collection to upload :type table: ee.FeatureCollection :param assetPath: path to", "elif cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise", "== 'CRS84': crs = 'EPSG:4326' elif cleancrs[-2] == 'EPSG': crs", "i in range(0, collections_size): c = ee.FeatureCollection(collections.get(i)) content_c = c.getInfo()", "str :param start: :return: the FeatureCollection :rtype: ee.FeatureCollection \"\"\" import", "create=True, verbose=False, **kwargs): \"\"\" This function can create folders and", "in geometries: c = g['coordinates'] utils.removeZ(c) else: coords = geom['coordinates']", "geom = sr.shape.__geo_interface__ if projection is not None: geometry =", "== 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C', 'N',", "for part in splitcrs if part] try: if cleancrs[-1] ==", "create a FeatureCollection (Broken Pipe ERROR) out of the list.", "records and doesn't handle complex shapes. :param filename: the name", "shapes. :param filename: the name of the filename. If the", "fields] field_types = [field[1] for field in fields] types =", "return thecsv def toLocal(collection, filename, filetype=None, selectors=None, path=None): \"\"\" Download", ":param split_at: limit to avoid an EE Exception :type split_at:", "fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo() elif fld_type in ['C',", "end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini, end)) return limits.map(over_limits) collections =", "toDict(collection, split_at) with open(os.path.join(path, fname), 'w') as thefile: thefile.write(json.dumps(content)) return", ":param selectors: The selectors that should be used to determine", "to failing when attempting to create a FeatureCollection (Broken Pipe", "a tuple of features. \"\"\" geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding)", "`toCSV` :param filetype: The filetype of download, either CSV or", "the FeatureCollection as a dict object \"\"\" size = collection.size()", "file to be downloaded \"\"\" if not filetype: filetype =", "path=None): \"\"\" Download a FeatureCollection to a local file a", "fld_type = types[fld] if fld_type == 'D': value = ee.Date(rec.isoformat()).millis().getInfo()", "avoid an EE Exception :type split_at: int :return: A GeoJSON", "geom = feature['geometry']['type'] # match fields properties['system:index'] = fid properties['geometry']", "selectors=None, path=None): \"\"\" Download a FeatureCollection to a local file", "def toCSV(collection, filename, split_at=4000): \"\"\" Alternative to download a FeatureCollection", "the name of the filename. If the shape is not", "not None: geometry = ee.Geometry(geom, projection) \\ .transform(wgs84, 1) else:", "= ee.List(n) ini = ee.Number(n.get(0)) end = ee.Number(n.get(1)) return ee.FeatureCollection(collection.toList(ini,", ":type filename: str :param start: :return: the FeatureCollection :rtype: ee.FeatureCollection", "only works for shapes with less than 1000 records and", "cleancrs[-2] == 'EPSG': crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1]) else: raise ValueError('{}", "it yourself casting the result of this function to a", "ID (Path + name) assetId = '/'.join([assetPath, name]) # Description", "file. Return a python tuple with ee.Feature inside. This is", "\"\"\" Get the FeatureCollection as a dict object \"\"\" size", "utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs)) ee_feat", "'CSV' url = collection.getDownloadURL(filetype, selectors, filename) thefile = utils.downloadFile(url, filename,", "when attempting to create a FeatureCollection (Broken Pipe ERROR) out", "name = filecrs.get('properties').get('name') splitcrs = name.split(':') cleancrs = [part for", "ext != '.csv': filename += '.csv' with open(filename, 'w') as", "needed geom = feat['geometry'] ty = geom['type'] if ty ==", "filename: the name of the filename. If the shape is", "in the current folder :type path: str :param split_at: limit", "def toLocal(collection, filename, filetype=None, selectors=None, path=None): \"\"\" Download a FeatureCollection", "feature['geometry']['type'] # match fields properties['system:index'] = fid properties['geometry'] = geom", "to a local file a CSV or geoJSON file. This", "# Description description = utils.matchDescription(name) # Init task task =", "name) :type name: str :return: the tasks :rtype: ee.batch.Task \"\"\"", "fields = reader.fields[1:] field_names = [field[0] for field in fields]", "coords = utils.removeZ(coords) if utils.hasZ(coords) else coords ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords,", "original function :param table: the feature collection to upload :type", "from a KML file. Return a python tuple with ee.Feature", "# name if name[-8:-1] != '.geojson': fname = name+'.geojson' content", "filename, filetype=None, selectors=None, path=None): \"\"\" Download a FeatureCollection to a", "'EPSG:' in projection: projection = projection.split(':')[1] projection = 'EPSG:{}'.format(projection) #", "feats return content def toGeoJSON(collection, name, path=None, split_at=4000): \"\"\" Export", "different method than `toGeoJSON` and `toCSV` :param filetype: The filetype", "filename, filetype, path) return thefile def toAsset(table, assetPath, name=None, create=True,", "the script, specify a path instead. :type filename: str :param", "projection = 'EPSG:{}'.format(projection) # filter records with start and end", "else: geodict = data features = [] # Get crs", "thefile.write(json.dumps(content)) return thefile def toCSV(collection, filename, split_at=4000): \"\"\" Alternative to", "if cleancrs[-1] == 'CRS84': crs = 'EPSG:4326' elif cleancrs[-2] ==", "= d['features'] ext = filename[-4:] if ext != '.csv': filename", "= dict(zip(field_names, sr.record)) sr = reader.shapeRecord(i) atr = {} for", "{}\" raise ValueError(msg.format(end-start)) for i in range(start, end): # atr", "fid = feature['id'] geom = feature['geometry']['type'] # match fields properties['system:index']", "Create a list of Features from a KML file. Return", "writer.writerow(properties) return thecsv def toLocal(collection, filename, filetype=None, selectors=None, path=None): \"\"\"", "rise an error :type: crs: str :return: a tuple of", "= [field[0] for field in fields] field_types = [field[1] for", "fromShapefile(filename, crs=None, start=None, end=None): \"\"\" Convert an ESRI file (.shp" ]
[ "# The key idea of this program is to equate", "if name == \"rock\": return 0 elif name == 'Spock':", "(diff == 1) or (diff == 2): print \"Player wins!\"", "\"rock\" elif number == 1: return 'Spock' elif number ==", "scissors import random def name_to_number(name): if name == \"rock\": return", "elif number == 3: return 'lizard' elif number == 4:", "return 'Spock' elif number == 2: return 'paper' elif number", "return 0 elif name == 'Spock': return 1 elif name", "paper # 3 - lizard # 4 - scissors import", "# Rock-paper-scissors-lizard-Spock template # The key idea of this program", "- scissors import random def name_to_number(name): if name == \"rock\":", "return None def number_to_name(number): if number == 0: return \"rock\"", "== 3) or (diff == 4): print \"Computer wins!\" else", "== \"rock\": return 0 elif name == 'Spock': return 1", "\"rock\": return 0 elif name == 'Spock': return 1 elif", "follows: # # 0 - rock # 1 - Spock", "= random.randrange(5) comp_choice = number_to_name(comp_number) print \"Computer chooses\",comp_choice diff =", "print \"Computer wins!\" else : print \"Tie!\" rpsls(\"rock\") rpsls(\"Spock\") rpsls(\"paper\")", "chooses\",player_choice player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_choice = number_to_name(comp_number)", "3) or (diff == 4): print \"Computer wins!\" else :", "2 - paper # 3 - lizard # 4 -", "\"Spock\" to numbers # as follows: # # 0 -", "number == 4: return 'scissors' else : return None def", ": return None def rpsls(player_choice): print \"\" print \"Player chooses\",player_choice", "2): print \"Player wins!\" elif (diff == 3) or (diff", "print \"Player wins!\" elif (diff == 3) or (diff ==", "number == 0: return \"rock\" elif number == 1: return", "print \"\" print \"Player chooses\",player_choice player_number = name_to_number(player_choice) comp_number =", "elif number == 4: return 'scissors' else : return None", "= number_to_name(comp_number) print \"Computer chooses\",comp_choice diff = (player_number - comp_number)%5", "\"\" print \"Player chooses\",player_choice player_number = name_to_number(player_choice) comp_number = random.randrange(5)", "== 2): print \"Player wins!\" elif (diff == 3) or", "== 4): print \"Computer wins!\" else : print \"Tie!\" rpsls(\"rock\")", "name == \"rock\": return 0 elif name == 'Spock': return", "equate the strings # \"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\" to", "return 2 elif name == 'lizard': return 3 elif name", ": return None def number_to_name(number): if number == 0: return", "return None def rpsls(player_choice): print \"\" print \"Player chooses\",player_choice player_number", "0 elif name == 'Spock': return 1 elif name ==", "- comp_number)%5 if (diff == 1) or (diff == 2):", "1 elif name == 'paper': return 2 elif name ==", "elif name == 'paper': return 2 elif name == 'lizard':", "3 elif name == 'scissors': return 4 else : return", "number == 2: return 'paper' elif number == 3: return", "return 3 elif name == 'scissors': return 4 else :", "else : return None def number_to_name(number): if number == 0:", "elif name == 'scissors': return 4 else : return None", "number_to_name(comp_number) print \"Computer chooses\",comp_choice diff = (player_number - comp_number)%5 if", "# 1 - Spock # 2 - paper # 3", "Rock-paper-scissors-lizard-Spock template # The key idea of this program is", "\"paper\", \"scissors\", \"lizard\", \"Spock\" to numbers # as follows: #", "program is to equate the strings # \"rock\", \"paper\", \"scissors\",", "numbers # as follows: # # 0 - rock #", "1) or (diff == 2): print \"Player wins!\" elif (diff", "of this program is to equate the strings # \"rock\",", "print \"Computer chooses\",comp_choice diff = (player_number - comp_number)%5 if (diff", "comp_number)%5 if (diff == 1) or (diff == 2): print", "number == 1: return 'Spock' elif number == 2: return", "else : return None def rpsls(player_choice): print \"\" print \"Player", "def name_to_number(name): if name == \"rock\": return 0 elif name", "== 1) or (diff == 2): print \"Player wins!\" elif", "(player_number - comp_number)%5 if (diff == 1) or (diff ==", "4): print \"Computer wins!\" else : print \"Tie!\" rpsls(\"rock\") rpsls(\"Spock\")", "wins!\" else : print \"Tie!\" rpsls(\"rock\") rpsls(\"Spock\") rpsls(\"paper\") rpsls(\"lizard\") rpsls(\"scissors\")", "'Spock' elif number == 2: return 'paper' elif number ==", "0 - rock # 1 - Spock # 2 -", "\"lizard\", \"Spock\" to numbers # as follows: # # 0", "# # 0 - rock # 1 - Spock #", "elif number == 1: return 'Spock' elif number == 2:", "\"Computer wins!\" else : print \"Tie!\" rpsls(\"rock\") rpsls(\"Spock\") rpsls(\"paper\") rpsls(\"lizard\")", "2 elif name == 'lizard': return 3 elif name ==", "# as follows: # # 0 - rock # 1", "key idea of this program is to equate the strings", "print \"Player chooses\",player_choice player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_choice", "# 0 - rock # 1 - Spock # 2", "template # The key idea of this program is to", "# 4 - scissors import random def name_to_number(name): if name", "return 1 elif name == 'paper': return 2 elif name", "or (diff == 2): print \"Player wins!\" elif (diff ==", "4 else : return None def number_to_name(number): if number ==", "is to equate the strings # \"rock\", \"paper\", \"scissors\", \"lizard\",", "'paper' elif number == 3: return 'lizard' elif number ==", "strings # \"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\" to numbers #", "to equate the strings # \"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\"", "# 2 - paper # 3 - lizard # 4", "chooses\",comp_choice diff = (player_number - comp_number)%5 if (diff == 1)", "1: return 'Spock' elif number == 2: return 'paper' elif", "name_to_number(player_choice) comp_number = random.randrange(5) comp_choice = number_to_name(comp_number) print \"Computer chooses\",comp_choice", "rpsls(player_choice): print \"\" print \"Player chooses\",player_choice player_number = name_to_number(player_choice) comp_number", "== 'scissors': return 4 else : return None def number_to_name(number):", "# 3 - lizard # 4 - scissors import random", "3: return 'lizard' elif number == 4: return 'scissors' else", "(diff == 4): print \"Computer wins!\" else : print \"Tie!\"", "name == 'lizard': return 3 elif name == 'scissors': return", "comp_number = random.randrange(5) comp_choice = number_to_name(comp_number) print \"Computer chooses\",comp_choice diff", "'Spock': return 1 elif name == 'paper': return 2 elif", "wins!\" elif (diff == 3) or (diff == 4): print", "return 'scissors' else : return None def rpsls(player_choice): print \"\"", "player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_choice = number_to_name(comp_number) print", "elif number == 2: return 'paper' elif number == 3:", "- lizard # 4 - scissors import random def name_to_number(name):", "'scissors': return 4 else : return None def number_to_name(number): if", "\"scissors\", \"lizard\", \"Spock\" to numbers # as follows: # #", "== 'paper': return 2 elif name == 'lizard': return 3", "== 3: return 'lizard' elif number == 4: return 'scissors'", "idea of this program is to equate the strings #", "(diff == 2): print \"Player wins!\" elif (diff == 3)", "'lizard': return 3 elif name == 'scissors': return 4 else", "the strings # \"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\" to numbers", "this program is to equate the strings # \"rock\", \"paper\",", "0: return \"rock\" elif number == 1: return 'Spock' elif", "return 4 else : return None def number_to_name(number): if number", "\"Player chooses\",player_choice player_number = name_to_number(player_choice) comp_number = random.randrange(5) comp_choice =", "4 - scissors import random def name_to_number(name): if name ==", "import random def name_to_number(name): if name == \"rock\": return 0", "name_to_number(name): if name == \"rock\": return 0 elif name ==", "== 0: return \"rock\" elif number == 1: return 'Spock'", "None def rpsls(player_choice): print \"\" print \"Player chooses\",player_choice player_number =", "if number == 0: return \"rock\" elif number == 1:", "== 'Spock': return 1 elif name == 'paper': return 2", "elif name == 'lizard': return 3 elif name == 'scissors':", "random def name_to_number(name): if name == \"rock\": return 0 elif", "to numbers # as follows: # # 0 - rock", "4: return 'scissors' else : return None def rpsls(player_choice): print", "lizard # 4 - scissors import random def name_to_number(name): if", "= (player_number - comp_number)%5 if (diff == 1) or (diff", "name == 'Spock': return 1 elif name == 'paper': return", "== 4: return 'scissors' else : return None def rpsls(player_choice):", "number == 3: return 'lizard' elif number == 4: return", "\"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\" to numbers # as follows:", "diff = (player_number - comp_number)%5 if (diff == 1) or", "elif name == 'Spock': return 1 elif name == 'paper':", "2: return 'paper' elif number == 3: return 'lizard' elif", "def number_to_name(number): if number == 0: return \"rock\" elif number", "\"Computer chooses\",comp_choice diff = (player_number - comp_number)%5 if (diff ==", "= name_to_number(player_choice) comp_number = random.randrange(5) comp_choice = number_to_name(comp_number) print \"Computer", "(diff == 3) or (diff == 4): print \"Computer wins!\"", "or (diff == 4): print \"Computer wins!\" else : print", "- Spock # 2 - paper # 3 - lizard", "== 2: return 'paper' elif number == 3: return 'lizard'", "1 - Spock # 2 - paper # 3 -", "return 'lizard' elif number == 4: return 'scissors' else :", "number_to_name(number): if number == 0: return \"rock\" elif number ==", "elif (diff == 3) or (diff == 4): print \"Computer", "random.randrange(5) comp_choice = number_to_name(comp_number) print \"Computer chooses\",comp_choice diff = (player_number", "return \"rock\" elif number == 1: return 'Spock' elif number", "as follows: # # 0 - rock # 1 -", "comp_choice = number_to_name(comp_number) print \"Computer chooses\",comp_choice diff = (player_number -", "'lizard' elif number == 4: return 'scissors' else : return", "- rock # 1 - Spock # 2 - paper", "3 - lizard # 4 - scissors import random def", "== 1: return 'Spock' elif number == 2: return 'paper'", "\"Player wins!\" elif (diff == 3) or (diff == 4):", "None def number_to_name(number): if number == 0: return \"rock\" elif", "name == 'scissors': return 4 else : return None def", "def rpsls(player_choice): print \"\" print \"Player chooses\",player_choice player_number = name_to_number(player_choice)", "# \"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\" to numbers # as", "name == 'paper': return 2 elif name == 'lizard': return", "if (diff == 1) or (diff == 2): print \"Player", "'scissors' else : return None def rpsls(player_choice): print \"\" print", "'paper': return 2 elif name == 'lizard': return 3 elif", "== 'lizard': return 3 elif name == 'scissors': return 4", "rock # 1 - Spock # 2 - paper #", "- paper # 3 - lizard # 4 - scissors", "return 'paper' elif number == 3: return 'lizard' elif number", "The key idea of this program is to equate the", "Spock # 2 - paper # 3 - lizard #" ]
[ "function returns the combined partition that maximizes the selection criterion.", "sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score as ari from", "combined partition. methods: a list of methods to apply on", "ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m, ensemble, k): m.__name__ for", "clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition, } ) for attr in", "algorithm is AgglomerativeClustering (from sklearn) and the linkage method is", "The number of jobs used by the pairwise_distance matrix from", "(coassociation matrix). \"\"\" def _compare(x, y): xy = np.array([x, y]).T", "`method_func`. Returns: It returns a tuple with the data partition", "part = method_func(ensemble_data, k, **kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member,", "a fixed set of parameters). Args: data: A numpy array,", "using DBSCAN) partition[partition < 0] = np.nan # get number", "distance), the affinity_matrix is given as data input to the", "pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble, k, methods,", "partition # # for agglomerative clustering both data and affinity_matrix", "\"\"\" def _compare(x, y): xy = np.array([x, y]).T xy =", "np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member in ensemble", "specified. \"\"\" ensemble = [] for clus_name, clus_obj in tqdm(clusterers.items(),", "pairwise_distance matrix from sklearn. Returns: A numpy array representing a", "the ensemble. Args: ensemble: A numpy array representing a set", "coassociation matrix contains the percentage of times the pair of", "computes the coassociation matrix (a distance matrix for all objects", "criterion_value = selection_criterion(ensemble, part) methods_results[method_name] = { \"partition\": part, \"criterion_value\":", "ensemble from the data given a set of clusterers (a", "data input. clusterers: A dictionary with clusterers specified in this", "partition = clus_obj.fit_predict(data).astype(float) # remove from partition noisy points (for", "columns are objects. n_jobs: The number of jobs used by", "best method name, best criterion value) \"\"\" from concurrent.futures import", "disable=(not use_tqdm), ncols=100, ): method_name = tasks[future] part = future.result()", "number of jobs used by the pairwise_distance matrix from sklearn.", "data using the specified method, and some performance measures of", "xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] != xy[:, 1]).sum() /", "clustering algorithm with a fixed set of parameters). Args: data:", "the coassociation matrix contains the percentage of times the pair", "to `method_func`. Returns: It returns a tuple with the data", "including \"n_clusters\" will extract this attribute from the estimator and", "(partitions in rows, objects in columns). k: The number of", "all objects (coassociation matrix). \"\"\" def _compare(x, y): xy =", "and the linkage method is different than ward (which only", "of huge data structures not needed in # this context", "from partition noisy points (for example, if using DBSCAN) partition[partition", "other linkage # methods the affinity_matrix is used if (type(clus_obj).__name__", "use_weighting=True) for ensemble_member in ensemble ] ) ari_values = np.array(", "points (for example, if using DBSCAN) partition[partition < 0] =", "representing the ensemble (partitions in rows, objects in columns). k:", "np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\":", "was clustered together in the ensemble. Args: ensemble: A numpy", "clustering solution (partition) and columns are objects. n_jobs: The number", "clusterers: dict, attributes: list, affinity_matrix=None): \"\"\" It generates an ensemble", "method. For evidence accumulation methods, this is the coassociation matrix", "an instance of a clustering algorithm with a fixed set", "ensemble). ensemble_data: A numpy array with the ensemble data that", "for attr in attributes: if attr == \"n_clusters\" and not", "euclidean distance), the affinity_matrix is given as data input to", "by the clusterers. Columns include the clusterer name/id, the partition,", "ensemble_member in ensemble ] ) ami_values = np.array( [ compare_arrays(ensemble_member,", "np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values), \"nmi_std\": np.std(nmi_values), }", "\"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values),", "this is needed, because otherwise # the estimator saves references", "and affinity_matrix should be # given; for ward linkage, data", "kwargs: Other parameters passed to `method_func`. Returns: It returns a", "the final dataframe returned. affinity_matrix: If the clustering algorithm is", "if using DBSCAN) partition[partition < 0] = np.nan # get", "Returns: A pandas DataFrame with all the partitions generated by", "ensemble (partitions in rows, objects in columns). k: The number", "**kwargs): \"\"\" Runs a consensus clustering method on the ensemble", "is used, and for the other linkage # methods the", "Returns a tuple: (partition, best method name, best criterion value)", "and computes a series of performance measures. Args: method_func: A", "a square distance matrix for all objects (coassociation matrix). \"\"\"", "for future in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ):", "A numpy array representing a set of clustering solutions on", "sklearn.metrics import normalized_mutual_info_score as nmi from tqdm import tqdm from", ") def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): \"\"\" Runs a", "jobs used by the pairwise_distance matrix from sklearn. Returns: A", "a clustering algorithm with a fixed set of parameters). Args:", "input to the estimator instead of data. Returns: A pandas", "res[attr] = n_clusters else: res[attr] = getattr(clus_obj, attr) ensemble.append(res) #", "# the estimator saves references of huge data structures not", "# get partition # # for agglomerative clustering both data", "the data given a set of clusterers (a clusterer is", "xy = np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0]", "getattr(clus_obj, attr) ensemble.append(res) # for some estimators such as DBSCAN", "other structure supported by the clusterers as data input. clusterers:", "ensemble data, obtains the consolidated partition with the desired number", "get_params() method) and any other attribute specified. \"\"\" ensemble =", "the ensemble). ensemble_data: A numpy array with the ensemble data", "clusterers specified in this format: { 'k-means #1': KMeans(n_clusters=2), ...", "this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given", "method. kwargs: Other parameters passed to `method_func`. Returns: It returns", "both data and affinity_matrix should be # given; for ward", "the selection criterion. Args: ensemble: a clustering ensemble (rows are", "the second one. n_jobs: number of jobs. use_tqdm: ensembles/disables the", "input. clusterers: A dictionary with clusterers specified in this format:", "def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False): \"\"\" It combines", "for some estimators such as DBSCAN this is needed, because", "and include it in the final dataframe returned. affinity_matrix: If", "columns). k: The number of clusters to obtain from the", "return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble, k,", "all objects using the ensemble information). For each object pair,", "def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): \"\"\" Runs a consensus", "For evidence accumulation methods, this is the coassociation matrix (a", "evidence accumulation methods, this is the coassociation matrix (a square", "it computes the coassociation matrix (a distance matrix for all", "that the user can specify. Each of these methods combines", "and a partition as the second one. n_jobs: number of", "extract this attribute from the estimator and include it in", "with the get_params() method) and any other attribute specified. \"\"\"", "noisy points (for example, if using DBSCAN) partition[partition < 0]", "is the coassociation matrix (a square matrix with the distance", "only support euclidean distance), the affinity_matrix is given as data", "a set of clusterers (a clusterer is an instance of", "functions to generate and combine a clustering ensemble. \"\"\" import", "clusterer is an instance of a clustering algorithm with a", "= [] for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get", "A consensus function (first argument is either the ensemble or", "ensemble_data, ensemble, k, **kwargs): \"\"\" Runs a consensus clustering method", "used, and for the other linkage # methods the affinity_matrix", "= n_clusters else: res[attr] = getattr(clus_obj, attr) ensemble.append(res) # for", "derived from the ensemble). ensemble_data: A numpy array with the", "Columns include the clusterer name/id, the partition, the estimator parameters", "def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given an ensemble, it computes the", ") def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False): \"\"\" It", "[ compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member in ensemble ]", "= { \"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values),", "function has to accept an ensemble as the first argument,", "objects in columns). k: The number of clusters to obtain", "methods: a list of methods to apply on the ensemble;", "pandas as pd from sklearn.metrics import pairwise_distances from sklearn.metrics import", "_compare(x, y): xy = np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)] return", "save in the final dataframe; for example, including \"n_clusters\" will", "of clustering solutions on the same data. Each row is", "= xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0]", "tqdm from clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict,", "value) \"\"\" from concurrent.futures import ProcessPoolExecutor, as_completed methods_results = {}", "distance matrix for all objects using the ensemble information). For", "is used if (type(clus_obj).__name__ == \"AgglomerativeClustering\") and ( clus_obj.linkage !=", "from concurrent.futures import ProcessPoolExecutor, as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs)", "ensemble, k): m.__name__ for m in methods} for future in", "for m in methods} for future in tqdm( as_completed(tasks), total=len(tasks),", "import adjusted_rand_score as ari from sklearn.metrics import adjusted_mutual_info_score as ami", "selection_criterion, n_jobs=1, use_tqdm=False): \"\"\" It combines a clustering ensemble using", "coassociation matrix derived from the ensemble). ensemble_data: A numpy array", "with clusterers specified in this format: { 'k-means #1': KMeans(n_clusters=2),", "the selection criterion; this function has to accept an ensemble", "in ensemble ] ) performance_values = { \"ari_mean\": np.mean(ari_values), \"ari_median\":", "ensemble and returns a single partition. This function returns the", "in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name =", "progress bar. Returns: Returns a tuple: (partition, best method name,", "the clusterers. Columns include the clusterer name/id, the partition, the", "criterion value) \"\"\" from concurrent.futures import ProcessPoolExecutor, as_completed methods_results =", "measures of this partition. \"\"\" part = method_func(ensemble_data, k, **kwargs)", "ensemble. Args: ensemble: A numpy array representing a set of", "partition. selection_criterion: a function that represents the selection criterion; this", "should be # given; for ward linkage, data is used,", "= max( methods_results, key=lambda x: methods_results[x][\"criterion_value\"] ) best_method_results = methods_results[best_method]", "adjusted_rand_score as ari from sklearn.metrics import adjusted_mutual_info_score as ami from", "of clusters, and computes a series of performance measures. Args:", ") ami_values = np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True) for", "parameters). Args: data: A numpy array, pandas dataframe, or any", "data input to the estimator instead of data. Returns: A", "the affinity_matrix is used if (type(clus_obj).__name__ == \"AgglomerativeClustering\") and (", "attr) ensemble.append(res) # for some estimators such as DBSCAN this", "to show a progress bar. Returns: Returns a tuple: (partition,", "from the ensemble). ensemble_data: A numpy array with the ensemble", "from sklearn.metrics import adjusted_rand_score as ari from sklearn.metrics import adjusted_mutual_info_score", "\"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values), \"nmi_std\": np.std(nmi_values), } return", "is an instance of a clustering algorithm with a fixed", "list, affinity_matrix=None): \"\"\" It generates an ensemble from the data", "a clustering ensemble (rows are partitions, columns are objects). k:", "executor: tasks = {executor.submit(m, ensemble, k): m.__name__ for m in", "pd.Series( { \"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition, } )", "(a square matrix with the distance between object pairs derived", "tqdm to show a progress bar. Returns: Returns a tuple:", "# get number of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters =", "generates an ensemble from the data given a set of", "be # given; for ward linkage, data is used, and", "from sklearn.metrics import adjusted_mutual_info_score as ami from sklearn.metrics import normalized_mutual_info_score", "performance_values = { \"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\":", "normalized_mutual_info_score as nmi from tqdm import tqdm from clustering.utils import", "on the ensemble data, obtains the consolidated partition with the", "ensemble information). For each object pair, the coassociation matrix contains", "list of attributes to save in the final dataframe; for", "the user can specify. Each of these methods combines the", "will extract this attribute from the estimator and include it", "the partition, the estimator parameters (obtained with the get_params() method)", "with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m, ensemble, k): m.__name__", "matrix for all objects using the ensemble information). For each", "partition that maximizes the selection criterion. Args: ensemble: a clustering", "{ \"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition, } ) for", "attribute from the estimator and include it in the final", "\"\"\" It generates an ensemble from the data given a", "array, pandas dataframe, or any other structure supported by the", "= pd.Series( { \"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition, }", "= tasks[future] part = future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name]", "a consensus clustering method on the ensemble data, obtains the", "the specified method. kwargs: Other parameters passed to `method_func`. Returns:", "from the data given a set of clusterers (a clusterer", "\"partition\": part, \"criterion_value\": criterion_value, } # select the best performing", "get number of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0]", "A numpy array with the ensemble data that will be", "n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):", "\"partition\": partition, } ) for attr in attributes: if attr", "\"\"\" ensemble = [] for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):", "as data input. clusterers: A dictionary with clusterers specified in", "to the selection criterion best_method = max( methods_results, key=lambda x:", "use of tqdm to show a progress bar. Returns: Returns", "[] for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get partition", "a set of methods that the user can specify. Each", "best performing method according to the selection criterion best_method =", "(a clusterer is an instance of a clustering algorithm with", "show a progress bar. Returns: Returns a tuple: (partition, best", "data and affinity_matrix should be # given; for ward linkage,", "numpy array, pandas dataframe, or any other structure supported by", "not hasattr(clus_obj, attr): res[attr] = n_clusters else: res[attr] = getattr(clus_obj,", "tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name = tasks[future]", "selection_criterion(ensemble, part) methods_results[method_name] = { \"partition\": part, \"criterion_value\": criterion_value, }", "argument, and a partition as the second one. n_jobs: number", "method on the ensemble data, obtains the consolidated partition with", "columns are objects). k: the final number of clusters for", "= method_func(ensemble_data, k, **kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member, part,", "clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) # remove from partition noisy", "total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name = tasks[future] part =", "if (type(clus_obj).__name__ == \"AgglomerativeClustering\") and ( clus_obj.linkage != \"ward\" ):", "clusterers: A dictionary with clusterers specified in this format: {", "on the ensemble; each returns a combined partition. selection_criterion: a", "linkage method is different than ward (which only support euclidean", "and for the other linkage # methods the affinity_matrix is", "ward (which only support euclidean distance), the affinity_matrix is given", "needed in # this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble,", "tasks[future] part = future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name] =", "= future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name] = { \"partition\":", "percentage of times the pair of objects was clustered together", "ensemble; each returns a combined partition. selection_criterion: a function that", "context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given an", "(a distance matrix for all objects using the ensemble information).", "matrix for all objects (coassociation matrix). \"\"\" def _compare(x, y):", "method name, best criterion value) \"\"\" from concurrent.futures import ProcessPoolExecutor,", "(obtained with the get_params() method) and any other attribute specified.", "combines the ensemble and returns a single partition. This function", "} ) for attr in attributes: if attr == \"n_clusters\"", "part, nmi, use_weighting=True) for ensemble_member in ensemble ] ) ami_values", "dataframe, or any other structure supported by the clusterers as", "n_jobs: number of jobs. use_tqdm: ensembles/disables the use of tqdm", "clusters for the combined partition. methods: a list of methods", "clusterers (a clusterer is an instance of a clustering algorithm", "ProcessPoolExecutor, as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks", "ensemble. \"\"\" import numpy as np import pandas as pd", "\"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values),", "(which only support euclidean distance), the affinity_matrix is given as", "as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks =", "partitions, columns are objects). k: the final number of clusters", "a single partition. This function returns the combined partition that", "an ensemble, it computes the coassociation matrix (a distance matrix", "n_jobs=1, use_tqdm=False): \"\"\" It combines a clustering ensemble using a", "accumulation methods, this is the coassociation matrix (a square matrix", "!= \"ward\" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float)", "sklearn. Returns: A numpy array representing a square distance matrix", "# methods the affinity_matrix is used if (type(clus_obj).__name__ == \"AgglomerativeClustering\")", "estimator saves references of huge data structures not needed in", "select the best performing method according to the selection criterion", "np.unique(partition_no_nan).shape[0] # stop if n_clusters <= 1 if n_clusters <=", ") performance_values = { \"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values),", "of jobs. use_tqdm: ensembles/disables the use of tqdm to show", "in ensemble ] ) ami_values = np.array( [ compare_arrays(ensemble_member, part,", "== \"n_clusters\" and not hasattr(clus_obj, attr): res[attr] = n_clusters else:", "if n_clusters <= 1 if n_clusters <= 1: reset_estimator(clus_obj) continue", "attributes: list, affinity_matrix=None): \"\"\" It generates an ensemble from the", "of methods that the user can specify. Each of these", "example, if using DBSCAN) partition[partition < 0] = np.nan #", "a tuple: (partition, best method name, best criterion value) \"\"\"", "performance measures. Args: method_func: A consensus function (first argument is", "methods that the user can specify. Each of these methods", "support euclidean distance), the affinity_matrix is given as data input", "(partition) and columns are objects. n_jobs: The number of jobs", "set of clustering solutions on the same data. Each row", "Runs a consensus clustering method on the ensemble data, obtains", "part) methods_results[method_name] = { \"partition\": part, \"criterion_value\": criterion_value, } #", "in rows, objects in columns). k: The number of clusters", "list of methods to apply on the ensemble; each returns", "of parameters). Args: data: A numpy array, pandas dataframe, or", "name/id, the partition, the estimator parameters (obtained with the get_params()", "such as DBSCAN this is needed, because otherwise # the", "one. n_jobs: number of jobs. use_tqdm: ensembles/disables the use of", "numpy as np import pandas as pd from sklearn.metrics import", "compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member in ensemble ] )", "\"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values),", "np import pandas as pd from sklearn.metrics import pairwise_distances from", "best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): \"\"\" Runs", "method_func(ensemble_data, k, **kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member, part, nmi,", "the ensemble information). For each object pair, the coassociation matrix", "(rows are partitions, columns are objects). k: the final number", "\"\"\" import numpy as np import pandas as pd from", "= clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) # remove from partition", "as executor: tasks = {executor.submit(m, ensemble, k): m.__name__ for m", "object pair, the coassociation matrix contains the percentage of times", "methods_results[x][\"criterion_value\"] ) best_method_results = methods_results[best_method] return ( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"],", "\"\"\" It combines a clustering ensemble using a set of", "the coassociation matrix (a distance matrix for all objects using", "method is different than ward (which only support euclidean distance),", "together in the ensemble. Args: ensemble: A numpy array representing", "is a clustering solution (partition) and columns are objects. n_jobs:", "matrix contains the percentage of times the pair of objects", "criterion best_method = max( methods_results, key=lambda x: methods_results[x][\"criterion_value\"] ) best_method_results", "this format: { 'k-means #1': KMeans(n_clusters=2), ... } attributes: A", "clustering solutions on the same data. Each row is a", "ensemble data that will be given to the specified method.", "for all objects (coassociation matrix). \"\"\" def _compare(x, y): xy", "in the final dataframe; for example, including \"n_clusters\" will extract", "number of clusters for the combined partition. methods: a list", "attributes to save in the final dataframe; for example, including", "times the pair of objects was clustered together in the", "A numpy array representing the ensemble (partitions in rows, objects", "ensemble.append(res) # for some estimators such as DBSCAN this is", "references of huge data structures not needed in # this", "row is a clustering solution (partition) and columns are objects.", "partition. \"\"\" part = method_func(ensemble_data, k, **kwargs) nmi_values = np.array(", "sklearn) and the linkage method is different than ward (which", "future in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name", "import adjusted_mutual_info_score as ami from sklearn.metrics import normalized_mutual_info_score as nmi", "returns the combined partition that maximizes the selection criterion. Args:", "\"\"\" from concurrent.futures import ProcessPoolExecutor, as_completed methods_results = {} with", "estimator and include it in the final dataframe returned. affinity_matrix:", "of data. Returns: A pandas DataFrame with all the partitions", "and any other attribute specified. \"\"\" ensemble = [] for", "a tuple with the data partition derived from the ensemble", "coassociation matrix (a distance matrix for all objects using the", "clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get partition # # for", "It returns a tuple with the data partition derived from", "parameters passed to `method_func`. Returns: It returns a tuple with", "as np import pandas as pd from sklearn.metrics import pairwise_distances", "\"n_clusters\" and not hasattr(clus_obj, attr): res[attr] = n_clusters else: res[attr]", "Contains functions to generate and combine a clustering ensemble. \"\"\"", "instance of a clustering algorithm with a fixed set of", "= partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop if n_clusters <=", "Args: ensemble: A numpy array representing a set of clustering", "some estimators such as DBSCAN this is needed, because otherwise", "tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}", "compare_arrays def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None): \"\"\" It", "bar. Returns: Returns a tuple: (partition, best method name, best", "square distance matrix for all objects (coassociation matrix). \"\"\" def", "first argument, and a partition as the second one. n_jobs:", "using the specified method, and some performance measures of this", "affinity_matrix=None): \"\"\" It generates an ensemble from the data given", "returns a tuple with the data partition derived from the", "because otherwise # the estimator saves references of huge data", "matrix). \"\"\" def _compare(x, y): xy = np.array([x, y]).T xy", "combined partition that maximizes the selection criterion. Args: ensemble: a", "final dataframe; for example, including \"n_clusters\" will extract this attribute", "from clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict, attributes:", "# stop if n_clusters <= 1 if n_clusters <= 1:", "A pandas DataFrame with all the partitions generated by the", "ensemble: A numpy array representing the ensemble (partitions in rows,", "with all the partitions generated by the clusterers. Columns include", "): method_name = tasks[future] part = future.result() criterion_value = selection_criterion(ensemble,", "The number of clusters to obtain from the ensemble data", "method) and any other attribute specified. \"\"\" ensemble = []", "y]).T xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] != xy[:, 1]).sum()", "ensemble using a set of methods that the user can", "data. Returns: A pandas DataFrame with all the partitions generated", "format: { 'k-means #1': KMeans(n_clusters=2), ... } attributes: A list", "the ensemble (partitions in rows, objects in columns). k: The", "'k-means #1': KMeans(n_clusters=2), ... } attributes: A list of attributes", "= clus_obj.fit_predict(data).astype(float) # remove from partition noisy points (for example,", "x: methods_results[x][\"criterion_value\"] ) best_method_results = methods_results[best_method] return ( best_method_results[\"partition\"], best_method,", "specified method. For evidence accumulation methods, this is the coassociation", "performance measures of this partition. \"\"\" part = method_func(ensemble_data, k,", "in ensemble ] ) ari_values = np.array( [ compare_arrays(ensemble_member, part,", "pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given an ensemble, it computes", "for ward linkage, data is used, and for the other", "objects (coassociation matrix). \"\"\" def _compare(x, y): xy = np.array([x,", "= np.unique(partition_no_nan).shape[0] # stop if n_clusters <= 1 if n_clusters", "the clusterers as data input. clusterers: A dictionary with clusterers", "dataframe returned. affinity_matrix: If the clustering algorithm is AgglomerativeClustering (from", "the data partition derived from the ensemble data using the", "0] != xy[:, 1]).sum() / xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare,", "Returns: A numpy array representing a square distance matrix for", "ensemble, k, **kwargs): \"\"\" Runs a consensus clustering method on", "nmi from tqdm import tqdm from clustering.utils import reset_estimator, compare_arrays", "np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values), \"nmi_std\": np.std(nmi_values), } return part,", "contains the percentage of times the pair of objects was", "needed, because otherwise # the estimator saves references of huge", "function that represents the selection criterion; this function has to", "number of jobs. use_tqdm: ensembles/disables the use of tqdm to", "= { \"partition\": part, \"criterion_value\": criterion_value, } # select the", "else: partition = clus_obj.fit_predict(data).astype(float) # remove from partition noisy points", "partition. methods: a list of methods to apply on the", "0] = np.nan # get number of clusters partition_no_nan =", "of this partition. \"\"\" part = method_func(ensemble_data, k, **kwargs) nmi_values", "function (first argument is either the ensemble or the coassociation", "of a clustering algorithm with a fixed set of parameters).", "solution (partition) and columns are objects. n_jobs: The number of", "ensemble data using the specified method, and some performance measures", "clusterers. Columns include the clusterer name/id, the partition, the estimator", "methods} for future in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100,", "clustering both data and affinity_matrix should be # given; for", "= np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member in", "derived from the ensemble). ensemble: A numpy array representing the", "structure supported by the clusterers as data input. clusterers: A", "(from sklearn) and the linkage method is different than ward", "to obtain from the ensemble data using the specified method.", "objects). k: the final number of clusters for the combined", "the ensemble; each returns a combined partition. selection_criterion: a function", "k): m.__name__ for m in methods} for future in tqdm(", "is given as data input to the estimator instead of", "data, obtains the consolidated partition with the desired number of", "include the clusterer name/id, the partition, the estimator parameters (obtained", "# for some estimators such as DBSCAN this is needed,", "include it in the final dataframe returned. affinity_matrix: If the", "linkage # methods the affinity_matrix is used if (type(clus_obj).__name__ ==", "in this format: { 'k-means #1': KMeans(n_clusters=2), ... } attributes:", "selection criterion best_method = max( methods_results, key=lambda x: methods_results[x][\"criterion_value\"] )", "part, ari, use_weighting=True) for ensemble_member in ensemble ] ) performance_values", "reset_estimator(clus_obj) continue res = pd.Series( { \"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()),", "and columns are objects. n_jobs: The number of jobs used", "that represents the selection criterion; this function has to accept", "to the specified method. For evidence accumulation methods, this is", "data is used, and for the other linkage # methods", "using a set of methods that the user can specify.", "partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) # remove from", "dictionary with clusterers specified in this format: { 'k-means #1':", "dataframe; for example, including \"n_clusters\" will extract this attribute from", "== \"AgglomerativeClustering\") and ( clus_obj.linkage != \"ward\" ): partition =", "in methods} for future in tqdm( as_completed(tasks), total=len(tasks), disable=(not use_tqdm),", "all the partitions generated by the clusterers. Columns include the", "consolidated partition with the desired number of clusters, and computes", "the estimator saves references of huge data structures not needed", "a progress bar. Returns: Returns a tuple: (partition, best method", "clustering algorithm is AgglomerativeClustering (from sklearn) and the linkage method", "np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values), \"nmi_std\":", "returns a combined partition. selection_criterion: a function that represents the", "estimator instead of data. Returns: A pandas DataFrame with all", "affinity_matrix is given as data input to the estimator instead", "max( methods_results, key=lambda x: methods_results[x][\"criterion_value\"] ) best_method_results = methods_results[best_method] return", "square matrix with the distance between object pairs derived from", "given to the specified method. For evidence accumulation methods, this", "!= xy[:, 1]).sum() / xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs,", "get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given an ensemble, it computes the coassociation", "For each object pair, the coassociation matrix contains the percentage", "has to accept an ensemble as the first argument, and", "method, and some performance measures of this partition. \"\"\" part", "of tqdm to show a progress bar. Returns: Returns a", "numpy array with the ensemble data that will be given", "# remove from partition noisy points (for example, if using", "some performance measures of this partition. \"\"\" part = method_func(ensemble_data,", ") best_method_results = methods_results[best_method] return ( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], )", "accept an ensemble as the first argument, and a partition", "np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member in ensemble", "attribute specified. \"\"\" ensemble = [] for clus_name, clus_obj in", "clustering ensemble (rows are partitions, columns are objects). k: the", "an ensemble as the first argument, and a partition as", "ensemble (rows are partitions, columns are objects). k: the final", "= {} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m, ensemble,", "concurrent.futures import ProcessPoolExecutor, as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as", "import reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):", "between object pairs derived from the ensemble). ensemble: A numpy", "future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name] = { \"partition\": part,", "example, including \"n_clusters\" will extract this attribute from the estimator", "numpy array representing a set of clustering solutions on the", "the estimator instead of data. Returns: A pandas DataFrame with", "distance matrix for all objects (coassociation matrix). \"\"\" def _compare(x,", "using the specified method. kwargs: Other parameters passed to `method_func`.", "best_method, best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): \"\"\"", "n_jobs: The number of jobs used by the pairwise_distance matrix", "partition, } ) for attr in attributes: if attr ==", "import normalized_mutual_info_score as nmi from tqdm import tqdm from clustering.utils", "as_completed(tasks), total=len(tasks), disable=(not use_tqdm), ncols=100, ): method_name = tasks[future] part", "xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0] return", "def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None): \"\"\" It generates", "the best performing method according to the selection criterion best_method", "of objects was clustered together in the ensemble. Args: ensemble:", "clus_obj.fit_predict(data).astype(float) # remove from partition noisy points (for example, if", "are partitions, columns are objects). k: the final number of", "pandas DataFrame with all the partitions generated by the clusterers.", "this is the coassociation matrix (a square matrix with the", "data. Each row is a clustering solution (partition) and columns", "objects using the ensemble information). For each object pair, the", "... } attributes: A list of attributes to save in", "[ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member in ensemble ]", "pandas dataframe, or any other structure supported by the clusterers", "this attribute from the estimator and include it in the", "in tqdm(clusterers.items(), total=len(clusterers)): # get partition # # for agglomerative", "combined partition. selection_criterion: a function that represents the selection criterion;", "if n_clusters <= 1: reset_estimator(clus_obj) continue res = pd.Series( {", ") ari_values = np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True) for", "estimators such as DBSCAN this is needed, because otherwise #", "jobs. use_tqdm: ensembles/disables the use of tqdm to show a", "res = pd.Series( { \"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition,", "\"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition, } ) for attr", "methods combines the ensemble and returns a single partition. This", "with a fixed set of parameters). Args: data: A numpy", "to apply on the ensemble; each returns a combined partition.", "series of performance measures. Args: method_func: A consensus function (first", "measures. Args: method_func: A consensus function (first argument is either", "{ \"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\":", "represents the selection criterion; this function has to accept an", "import tqdm from clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data, clusterers:", "partitions generated by the clusterers. Columns include the clusterer name/id,", "1 if n_clusters <= 1: reset_estimator(clus_obj) continue res = pd.Series(", "if attr == \"n_clusters\" and not hasattr(clus_obj, attr): res[attr] =", "in the final dataframe returned. affinity_matrix: If the clustering algorithm", "use_weighting=True) for ensemble_member in ensemble ] ) ami_values = np.array(", "ensemble, it computes the coassociation matrix (a distance matrix for", "# this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\"", "methods_results[best_method] return ( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func, ensemble_data,", "It generates an ensemble from the data given a set", "ami from sklearn.metrics import normalized_mutual_info_score as nmi from tqdm import", "reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given an ensemble,", "data that will be given to the specified method. For", "the ensemble data using the specified method, and some performance", "get partition # # for agglomerative clustering both data and", "pair of objects was clustered together in the ensemble. Args:", "from the estimator and include it in the final dataframe", "import ProcessPoolExecutor, as_completed methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as executor:", "sklearn.metrics import adjusted_mutual_info_score as ami from sklearn.metrics import normalized_mutual_info_score as", "is different than ward (which only support euclidean distance), the", "criterion_value, } # select the best performing method according to", "use_weighting=True) for ensemble_member in ensemble ] ) performance_values = {", "nmi, use_weighting=True) for ensemble_member in ensemble ] ) ami_values =", "data given a set of clusterers (a clusterer is an", "of jobs used by the pairwise_distance matrix from sklearn. Returns:", "ensemble: a clustering ensemble (rows are partitions, columns are objects).", "in attributes: if attr == \"n_clusters\" and not hasattr(clus_obj, attr):", "= selection_criterion(ensemble, part) methods_results[method_name] = { \"partition\": part, \"criterion_value\": criterion_value,", "the distance between object pairs derived from the ensemble). ensemble:", "of these methods combines the ensemble and returns a single", "of methods to apply on the ensemble; each returns a", "(type(clus_obj).__name__ == \"AgglomerativeClustering\") and ( clus_obj.linkage != \"ward\" ): partition", "and not hasattr(clus_obj, attr): res[attr] = n_clusters else: res[attr] =", "numpy array representing a square distance matrix for all objects", "on the same data. Each row is a clustering solution", "consensus function (first argument is either the ensemble or the", "ensemble data using the specified method. kwargs: Other parameters passed", "the ensemble). ensemble: A numpy array representing the ensemble (partitions", "the specified method, and some performance measures of this partition.", "np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\":", "data structures not needed in # this context reset_estimator(clus_obj) return", "are objects). k: the final number of clusters for the", "desired number of clusters, and computes a series of performance", "clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get partition # #", "\"\"\" Contains functions to generate and combine a clustering ensemble.", "by the clusterers as data input. clusterers: A dictionary with", "xy[:, 1]).sum() / xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\"", "criterion; this function has to accept an ensemble as the", "m.__name__ for m in methods} for future in tqdm( as_completed(tasks),", "use_tqdm), ncols=100, ): method_name = tasks[future] part = future.result() criterion_value", "partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop if n_clusters", "any other attribute specified. \"\"\" ensemble = [] for clus_name,", "the specified method. For evidence accumulation methods, this is the", "clustering ensemble. \"\"\" import numpy as np import pandas as", "Returns: Returns a tuple: (partition, best method name, best criterion", "method according to the selection criterion best_method = max( methods_results,", "stop if n_clusters <= 1 if n_clusters <= 1: reset_estimator(clus_obj)", "{executor.submit(m, ensemble, k): m.__name__ for m in methods} for future", "str(clus_obj.get_params()), \"partition\": partition, } ) for attr in attributes: if", "\"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values), \"nmi_std\": np.std(nmi_values), } return part, performance_values", "than ward (which only support euclidean distance), the affinity_matrix is", "to the estimator instead of data. Returns: A pandas DataFrame", "the ensemble or the coassociation matrix derived from the ensemble).", "selection criterion; this function has to accept an ensemble as", "] ) ari_values = np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True)", "# # for agglomerative clustering both data and affinity_matrix should", "from sklearn.metrics import normalized_mutual_info_score as nmi from tqdm import tqdm", "as the second one. n_jobs: number of jobs. use_tqdm: ensembles/disables", "other attribute specified. \"\"\" ensemble = [] for clus_name, clus_obj", "generated by the clusterers. Columns include the clusterer name/id, the", "def _compare(x, y): xy = np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)]", "Each row is a clustering solution (partition) and columns are", "partition, the estimator parameters (obtained with the get_params() method) and", "m in methods} for future in tqdm( as_completed(tasks), total=len(tasks), disable=(not", "for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): # get partition #", "AgglomerativeClustering (from sklearn) and the linkage method is different than", "affinity_matrix should be # given; for ward linkage, data is", "same data. Each row is a clustering solution (partition) and", "a partition as the second one. n_jobs: number of jobs.", "hasattr(clus_obj, attr): res[attr] = n_clusters else: res[attr] = getattr(clus_obj, attr)", "ensemble_member in ensemble ] ) ari_values = np.array( [ compare_arrays(ensemble_member,", "number of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] #", "derived from the ensemble data using the specified method, and", "partition[partition < 0] = np.nan # get number of clusters", "and returns a single partition. This function returns the combined", "sklearn.metrics import adjusted_rand_score as ari from sklearn.metrics import adjusted_mutual_info_score as", "maximizes the selection criterion. Args: ensemble: a clustering ensemble (rows", "as ami from sklearn.metrics import normalized_mutual_info_score as nmi from tqdm", "continue res = pd.Series( { \"clusterer_id\": clus_name, \"clusterer_params\": str(clus_obj.get_params()), \"partition\":", "the consolidated partition with the desired number of clusters, and", "ensemble). ensemble: A numpy array representing the ensemble (partitions in", "with the data partition derived from the ensemble data using", "A numpy array, pandas dataframe, or any other structure supported", "best_method = max( methods_results, key=lambda x: methods_results[x][\"criterion_value\"] ) best_method_results =", "(first argument is either the ensemble or the coassociation matrix", "\"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values),", "methods to apply on the ensemble; each returns a combined", "n_clusters else: res[attr] = getattr(clus_obj, attr) ensemble.append(res) # for some", "from the ensemble). ensemble: A numpy array representing the ensemble", "to accept an ensemble as the first argument, and a", "agglomerative clustering both data and affinity_matrix should be # given;", "part, \"criterion_value\": criterion_value, } # select the best performing method", "is either the ensemble or the coassociation matrix derived from", "np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member in ensemble", "is AgglomerativeClustering (from sklearn) and the linkage method is different", "clusters to obtain from the ensemble data using the specified", "= np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] !=", "(for example, if using DBSCAN) partition[partition < 0] = np.nan", "are objects. n_jobs: The number of jobs used by the", "supported by the clusterers as data input. clusterers: A dictionary", "for agglomerative clustering both data and affinity_matrix should be #", "= getattr(clus_obj, attr) ensemble.append(res) # for some estimators such as", "clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict, attributes: list,", "ensemble = [] for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)): #", "consensus clustering method on the ensemble data, obtains the consolidated", "from sklearn. Returns: A numpy array representing a square distance", "a series of performance measures. Args: method_func: A consensus function", "the final dataframe; for example, including \"n_clusters\" will extract this", "Each of these methods combines the ensemble and returns a", "array representing the ensemble (partitions in rows, objects in columns).", "for all objects using the ensemble information). For each object", "representing a set of clustering solutions on the same data.", "supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False): \"\"\" It combines a", "ensemble ] ) ari_values = np.array( [ compare_arrays(ensemble_member, part, ari,", "the pairwise_distance matrix from sklearn. Returns: A numpy array representing", "reset_estimator, compare_arrays def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None): \"\"\"", "partition as the second one. n_jobs: number of jobs. use_tqdm:", "a list of methods to apply on the ensemble; each", "algorithm with a fixed set of parameters). Args: data: A", "\"criterion_value\": criterion_value, } # select the best performing method according", "= methods_results[best_method] return ( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func,", "methods, this is the coassociation matrix (a square matrix with", "compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member in ensemble ] )", "argument is either the ensemble or the coassociation matrix derived", "specified method. kwargs: Other parameters passed to `method_func`. Returns: It", "the clusterer name/id, the partition, the estimator parameters (obtained with", "representing a square distance matrix for all objects (coassociation matrix).", "(partition, best method name, best criterion value) \"\"\" from concurrent.futures", "for example, including \"n_clusters\" will extract this attribute from the", "as DBSCAN this is needed, because otherwise # the estimator", "of clusters for the combined partition. methods: a list of", "\"ward\" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) #", "n_clusters <= 1: reset_estimator(clus_obj) continue res = pd.Series( { \"clusterer_id\":", "set of methods that the user can specify. Each of", "# given; for ward linkage, data is used, and for", "clusterers as data input. clusterers: A dictionary with clusterers specified", "that maximizes the selection criterion. Args: ensemble: a clustering ensemble", "dict, attributes: list, affinity_matrix=None): \"\"\" It generates an ensemble from", "ensemble_data: A numpy array with the ensemble data that will", "the use of tqdm to show a progress bar. Returns:", "tqdm import tqdm from clustering.utils import reset_estimator, compare_arrays def generate_ensemble(data,", "method_func: A consensus function (first argument is either the ensemble", "= {executor.submit(m, ensemble, k): m.__name__ for m in methods} for", "rows, objects in columns). k: The number of clusters to", "affinity_matrix is used if (type(clus_obj).__name__ == \"AgglomerativeClustering\") and ( clus_obj.linkage", ") for attr in attributes: if attr == \"n_clusters\" and", "each object pair, the coassociation matrix contains the percentage of", "the get_params() method) and any other attribute specified. \"\"\" ensemble", "import pairwise_distances from sklearn.metrics import adjusted_rand_score as ari from sklearn.metrics", "n_clusters = np.unique(partition_no_nan).shape[0] # stop if n_clusters <= 1 if", "not needed in # this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def", "run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs): \"\"\" Runs a consensus clustering", "nmi_values = np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member", "returns a single partition. This function returns the combined partition", "use_tqdm=False): \"\"\" It combines a clustering ensemble using a set", "and combine a clustering ensemble. \"\"\" import numpy as np", "these methods combines the ensemble and returns a single partition.", "this partition. \"\"\" part = method_func(ensemble_data, k, **kwargs) nmi_values =", "according to the selection criterion best_method = max( methods_results, key=lambda", "ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble, k, methods, selection_criterion,", "<= 1: reset_estimator(clus_obj) continue res = pd.Series( { \"clusterer_id\": clus_name,", "\"\"\" Given an ensemble, it computes the coassociation matrix (a", "of attributes to save in the final dataframe; for example,", "information). For each object pair, the coassociation matrix contains the", "( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k,", "} # select the best performing method according to the", "of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop", "performing method according to the selection criterion best_method = max(", "ari_values = np.array( [ compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member", "combine a clustering ensemble. \"\"\" import numpy as np import", "selection criterion. Args: ensemble: a clustering ensemble (rows are partitions,", "): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition = clus_obj.fit_predict(data).astype(float) # remove", "k, **kwargs): \"\"\" Runs a consensus clustering method on the", "Other parameters passed to `method_func`. Returns: It returns a tuple", "of times the pair of objects was clustered together in", "the ensemble data using the specified method. kwargs: Other parameters", "KMeans(n_clusters=2), ... } attributes: A list of attributes to save", "clus_obj.linkage != \"ward\" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition =", "for ensemble_member in ensemble ] ) ari_values = np.array( [", "{} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m, ensemble, k):", "the estimator parameters (obtained with the get_params() method) and any", "key=lambda x: methods_results[x][\"criterion_value\"] ) best_method_results = methods_results[best_method] return ( best_method_results[\"partition\"],", "clusters, and computes a series of performance measures. Args: method_func:", "returned. affinity_matrix: If the clustering algorithm is AgglomerativeClustering (from sklearn)", "that will be given to the specified method. For evidence", "ensemble ] ) ami_values = np.array( [ compare_arrays(ensemble_member, part, ami,", "[ compare_arrays(ensemble_member, part, ari, use_weighting=True) for ensemble_member in ensemble ]", "and ( clus_obj.linkage != \"ward\" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else:", "matrix (a square matrix with the distance between object pairs", "matrix from sklearn. Returns: A numpy array representing a square", "ami_values = np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member", "either the ensemble or the coassociation matrix derived from the", "of clusters to obtain from the ensemble data using the", "pairwise_distances from sklearn.metrics import adjusted_rand_score as ari from sklearn.metrics import", "as pd from sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score", "specified in this format: { 'k-means #1': KMeans(n_clusters=2), ... }", "data partition derived from the ensemble data using the specified", "clusterer name/id, the partition, the estimator parameters (obtained with the", "methods_results, key=lambda x: methods_results[x][\"criterion_value\"] ) best_method_results = methods_results[best_method] return (", "best criterion value) \"\"\" from concurrent.futures import ProcessPoolExecutor, as_completed methods_results", "for the combined partition. methods: a list of methods to", "as ari from sklearn.metrics import adjusted_mutual_info_score as ami from sklearn.metrics", "= np.nan # get number of clusters partition_no_nan = partition[~np.isnan(partition)]", "partition with the desired number of clusters, and computes a", "given; for ward linkage, data is used, and for the", "best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):", "{ 'k-means #1': KMeans(n_clusters=2), ... } attributes: A list of", "estimator parameters (obtained with the get_params() method) and any other", "as the first argument, and a partition as the second", "otherwise # the estimator saves references of huge data structures", "generate and combine a clustering ensemble. \"\"\" import numpy as", "a clustering ensemble using a set of methods that the", "the final number of clusters for the combined partition. methods:", "apply on the ensemble; each returns a combined partition. selection_criterion:", "method_name = tasks[future] part = future.result() criterion_value = selection_criterion(ensemble, part)", "the estimator and include it in the final dataframe returned.", "the coassociation matrix (a square matrix with the distance between", "of performance measures. Args: method_func: A consensus function (first argument", "user can specify. Each of these methods combines the ensemble", "A list of attributes to save in the final dataframe;", "A dictionary with clusterers specified in this format: { 'k-means", "pd from sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score as", "set of parameters). Args: data: A numpy array, pandas dataframe,", "partition noisy points (for example, if using DBSCAN) partition[partition <", "any other structure supported by the clusterers as data input.", "selection_criterion: a function that represents the selection criterion; this function", "clustered together in the ensemble. Args: ensemble: A numpy array", "part = future.result() criterion_value = selection_criterion(ensemble, part) methods_results[method_name] = {", "use_tqdm: ensembles/disables the use of tqdm to show a progress", "with the desired number of clusters, and computes a series", "xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble,", "be given to the specified method. For evidence accumulation methods,", "k, methods, selection_criterion, n_jobs=1, use_tqdm=False): \"\"\" It combines a clustering", "huge data structures not needed in # this context reset_estimator(clus_obj)", "objects. n_jobs: The number of jobs used by the pairwise_distance", "and some performance measures of this partition. \"\"\" part =", "remove from partition noisy points (for example, if using DBSCAN)", "an ensemble from the data given a set of clusterers", "a combined partition. selection_criterion: a function that represents the selection", "metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1,", "number of clusters to obtain from the ensemble data using", "object pairs derived from the ensemble). ensemble: A numpy array", "in columns). k: The number of clusters to obtain from", "will be given to the specified method. For evidence accumulation", "ensemble_member in ensemble ] ) performance_values = { \"ari_mean\": np.mean(ari_values),", "is needed, because otherwise # the estimator saves references of", "methods_results = {} with ProcessPoolExecutor(max_workers=n_jobs) as executor: tasks = {executor.submit(m,", "attr == \"n_clusters\" and not hasattr(clus_obj, attr): res[attr] = n_clusters", "#1': KMeans(n_clusters=2), ... } attributes: A list of attributes to", "tuple: (partition, best method name, best criterion value) \"\"\" from", "different than ward (which only support euclidean distance), the affinity_matrix", "fixed set of parameters). Args: data: A numpy array, pandas", "(xy[:, 0] != xy[:, 1]).sum() / xy.shape[0] return pairwise_distances( ensemble.T,", "{ \"partition\": part, \"criterion_value\": criterion_value, } # select the best", "pair, the coassociation matrix contains the percentage of times the", "1]).sum() / xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" )", "return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1): \"\"\" Given an ensemble, it", "numpy array representing the ensemble (partitions in rows, objects in", "to generate and combine a clustering ensemble. \"\"\" import numpy", "generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None): \"\"\" It generates an", "array with the ensemble data that will be given to", "obtains the consolidated partition with the desired number of clusters,", "] ) ami_values = np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True)", "from the ensemble data using the specified method, and some", "return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0] return pairwise_distances(", "] ) performance_values = { \"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values), \"ari_std\":", "distance between object pairs derived from the ensemble). ensemble: A", "partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop if n_clusters <= 1", "can specify. Each of these methods combines the ensemble and", "criterion. Args: ensemble: a clustering ensemble (rows are partitions, columns", "ensemble as the first argument, and a partition as the", "with the distance between object pairs derived from the ensemble).", "Returns: It returns a tuple with the data partition derived", "used if (type(clus_obj).__name__ == \"AgglomerativeClustering\") and ( clus_obj.linkage != \"ward\"", "\"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\": np.mean(nmi_values), \"nmi_median\": np.median(nmi_values), \"nmi_std\": np.std(nmi_values),", "array representing a set of clustering solutions on the same", "If the clustering algorithm is AgglomerativeClustering (from sklearn) and the", "data using the specified method. kwargs: Other parameters passed to", "= np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for ensemble_member in", "res[attr] = getattr(clus_obj, attr) ensemble.append(res) # for some estimators such", "attributes: A list of attributes to save in the final", "computes a series of performance measures. Args: method_func: A consensus", "<= 1 if n_clusters <= 1: reset_estimator(clus_obj) continue res =", "ensembles/disables the use of tqdm to show a progress bar.", "ari, use_weighting=True) for ensemble_member in ensemble ] ) performance_values =", "1: reset_estimator(clus_obj) continue res = pd.Series( { \"clusterer_id\": clus_name, \"clusterer_params\":", "the pair of objects was clustered together in the ensemble.", "a clustering solution (partition) and columns are objects. n_jobs: The", "tqdm(clusterers.items(), total=len(clusterers)): # get partition # # for agglomerative clustering", "each returns a combined partition. selection_criterion: a function that represents", "with the ensemble data that will be given to the", "the other linkage # methods the affinity_matrix is used if", "methods the affinity_matrix is used if (type(clus_obj).__name__ == \"AgglomerativeClustering\") and", "k: The number of clusters to obtain from the ensemble", "obtain from the ensemble data using the specified method. kwargs:", "\"\"\" Runs a consensus clustering method on the ensemble data,", "parameters (obtained with the get_params() method) and any other attribute", "combines a clustering ensemble using a set of methods that", "for the other linkage # methods the affinity_matrix is used", "import pandas as pd from sklearn.metrics import pairwise_distances from sklearn.metrics", "data: A numpy array, pandas dataframe, or any other structure", "\"AgglomerativeClustering\") and ( clus_obj.linkage != \"ward\" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float)", "array representing a square distance matrix for all objects (coassociation", "partition derived from the ensemble data using the specified method,", "import numpy as np import pandas as pd from sklearn.metrics", "structures not needed in # this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\")", "the ensemble and returns a single partition. This function returns", "coassociation matrix (a square matrix with the distance between object", "It combines a clustering ensemble using a set of methods", "used by the pairwise_distance matrix from sklearn. Returns: A numpy", "name, best criterion value) \"\"\" from concurrent.futures import ProcessPoolExecutor, as_completed", "y): xy = np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:,", "part, ami, use_weighting=True) for ensemble_member in ensemble ] ) ari_values", "A numpy array representing a square distance matrix for all", "the combined partition. methods: a list of methods to apply", "from sklearn.metrics import pairwise_distances from sklearn.metrics import adjusted_rand_score as ari", "final dataframe returned. affinity_matrix: If the clustering algorithm is AgglomerativeClustering", "# for agglomerative clustering both data and affinity_matrix should be", "ensemble ] ) performance_values = { \"ari_mean\": np.mean(ari_values), \"ari_median\": np.median(ari_values),", "the partitions generated by the clusterers. Columns include the clusterer", "the linkage method is different than ward (which only support", "return ( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], ) def run_method_and_compute_agreement(method_func, ensemble_data, ensemble,", "tuple with the data partition derived from the ensemble data", "n_clusters <= 1 if n_clusters <= 1: reset_estimator(clus_obj) continue res", "} attributes: A list of attributes to save in the", "np.nan # get number of clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters", "final number of clusters for the combined partition. methods: a", "for ensemble_member in ensemble ] ) ami_values = np.array( [", "to save in the final dataframe; for example, including \"n_clusters\"", "the same data. Each row is a clustering solution (partition)", "np.array([x, y]).T xy = xy[~np.isnan(xy).any(axis=1)] return (xy[:, 0] != xy[:,", "given as data input to the estimator instead of data.", "This function returns the combined partition that maximizes the selection", "matrix derived from the ensemble). ensemble_data: A numpy array with", "using the ensemble information). For each object pair, the coassociation", "attr in attributes: if attr == \"n_clusters\" and not hasattr(clus_obj,", "ari from sklearn.metrics import adjusted_mutual_info_score as ami from sklearn.metrics import", "k: the final number of clusters for the combined partition.", "# select the best performing method according to the selection", "k, **kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True)", "the affinity_matrix is given as data input to the estimator", "the ensemble data that will be given to the specified", "Args: data: A numpy array, pandas dataframe, or any other", "Given an ensemble, it computes the coassociation matrix (a distance", "ami, use_weighting=True) for ensemble_member in ensemble ] ) ari_values =", "total=len(clusterers)): # get partition # # for agglomerative clustering both", "adjusted_mutual_info_score as ami from sklearn.metrics import normalized_mutual_info_score as nmi from", "force_all_finite=\"allow-nan\" ) def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False): \"\"\"", "by the pairwise_distance matrix from sklearn. Returns: A numpy array", "/ xy.shape[0] return pairwise_distances( ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite=\"allow-nan\" ) def", "np.median(ari_values), \"ari_std\": np.std(ari_values), \"ami_mean\": np.mean(ami_values), \"ami_median\": np.median(ami_values), \"ami_std\": np.std(ami_values), \"nmi_mean\":", "saves references of huge data structures not needed in #", "the percentage of times the pair of objects was clustered", "ncols=100, ): method_name = tasks[future] part = future.result() criterion_value =", "best_method_results = methods_results[best_method] return ( best_method_results[\"partition\"], best_method, best_method_results[\"criterion_value\"], ) def", "clustering method on the ensemble data, obtains the consolidated partition", "clusters partition_no_nan = partition[~np.isnan(partition)] n_clusters = np.unique(partition_no_nan).shape[0] # stop if", "in # this context reset_estimator(clus_obj) return pd.DataFrame(ensemble).set_index(\"clusterer_id\") def get_ensemble_distance_matrix(ensemble, n_jobs=1):", "set of clusterers (a clusterer is an instance of a", "ensemble: A numpy array representing a set of clustering solutions", "this function has to accept an ensemble as the first", "pairs derived from the ensemble). ensemble: A numpy array representing", "**kwargs) nmi_values = np.array( [ compare_arrays(ensemble_member, part, nmi, use_weighting=True) for", "= np.array( [ compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member in", "given a set of clusterers (a clusterer is an instance", "attr): res[attr] = n_clusters else: res[attr] = getattr(clus_obj, attr) ensemble.append(res)", "n_jobs=1): \"\"\" Given an ensemble, it computes the coassociation matrix", "( clus_obj.linkage != \"ward\" ): partition = clus_obj.fit_predict(affinity_matrix).astype(float) else: partition", "in the ensemble. Args: ensemble: A numpy array representing a", "specified method, and some performance measures of this partition. \"\"\"", "DBSCAN this is needed, because otherwise # the estimator saves", "specify. Each of these methods combines the ensemble and returns", "methods_results[method_name] = { \"partition\": part, \"criterion_value\": criterion_value, } # select", "\"clusterer_params\": str(clus_obj.get_params()), \"partition\": partition, } ) for attr in attributes:", "\"\"\" part = method_func(ensemble_data, k, **kwargs) nmi_values = np.array( [", "passed to `method_func`. Returns: It returns a tuple with the", "a set of clustering solutions on the same data. Each", "clustering ensemble using a set of methods that the user", "ensemble or the coassociation matrix derived from the ensemble). ensemble_data:", "from the ensemble data using the specified method. kwargs: Other", "matrix with the distance between object pairs derived from the", "< 0] = np.nan # get number of clusters partition_no_nan", "linkage, data is used, and for the other linkage #", "objects was clustered together in the ensemble. Args: ensemble: A", "instead of data. Returns: A pandas DataFrame with all the", "the first argument, and a partition as the second one.", "as nmi from tqdm import tqdm from clustering.utils import reset_estimator,", "affinity_matrix: If the clustering algorithm is AgglomerativeClustering (from sklearn) and", "Args: ensemble: a clustering ensemble (rows are partitions, columns are", "matrix (a distance matrix for all objects using the ensemble", "number of clusters, and computes a series of performance measures.", "from tqdm import tqdm from clustering.utils import reset_estimator, compare_arrays def", "as data input to the estimator instead of data. Returns:", "ward linkage, data is used, and for the other linkage", "compare_arrays(ensemble_member, part, ami, use_weighting=True) for ensemble_member in ensemble ] )", "Args: method_func: A consensus function (first argument is either the", "partition. This function returns the combined partition that maximizes the", "or the coassociation matrix derived from the ensemble). ensemble_data: A", "it in the final dataframe returned. affinity_matrix: If the clustering", "\"n_clusters\" will extract this attribute from the estimator and include", "methods, selection_criterion, n_jobs=1, use_tqdm=False): \"\"\" It combines a clustering ensemble", "attributes: if attr == \"n_clusters\" and not hasattr(clus_obj, attr): res[attr]", "for ensemble_member in ensemble ] ) performance_values = { \"ari_mean\":", "a clustering ensemble. \"\"\" import numpy as np import pandas", "solutions on the same data. Each row is a clustering", "second one. n_jobs: number of jobs. use_tqdm: ensembles/disables the use", "the selection criterion best_method = max( methods_results, key=lambda x: methods_results[x][\"criterion_value\"]", "DataFrame with all the partitions generated by the clusterers. Columns", "DBSCAN) partition[partition < 0] = np.nan # get number of", "else: res[attr] = getattr(clus_obj, attr) ensemble.append(res) # for some estimators", "the combined partition that maximizes the selection criterion. Args: ensemble:", "a function that represents the selection criterion; this function has", "the ensemble data, obtains the consolidated partition with the desired", "the desired number of clusters, and computes a series of", "or any other structure supported by the clusterers as data", "the coassociation matrix derived from the ensemble). ensemble_data: A numpy", "the clustering algorithm is AgglomerativeClustering (from sklearn) and the linkage", "single partition. This function returns the combined partition that maximizes", "of clusterers (a clusterer is an instance of a clustering" ]
[ "# データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def update_employees(): \"\"\" Employeesを更新する", "= Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id = department_id self.regist_model.first_name =", "select_employees(): \"\"\" Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for", "for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name))", "self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from): \"\"\"", "\"\"\" departmentsを登録する \"\"\" self.regist_model = Departments() self.regist_model.department_no = department_no self.regist_model.department_name", "department_no self.regist_model.department_name = department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now())", "import logging from django.db import transaction, connection from django.utils import", "self.regist_model.save() return self.regist_model.id def _regist_departments(self, department_no, department_name): \"\"\" departmentsを登録する \"\"\"", "localtime from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import GenderType from", "import transaction, connection from django.utils import timezone from django.utils.timezone import", "employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id = employees_item.id select_model =", "= \"20190902\" self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt =", "Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod", "<= 5: department_no = DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value select_model", "def create_employees(): \"\"\" Employeesを作成する \"\"\" service = EmployeesService() for emp_no", "= DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod", "filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id", "self.regist_model.last_name = \"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from =", "logging from django.db import transaction, connection from django.utils import timezone", "= employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id'] department_date_from =", "self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt = localtime(timezone.now()) self.update_model.save(update_fields=['department_id',", "for department_type in DepartmentType: department_no = department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count()", "employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) #", "EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3,", "= select_model['id'] department_date_from = 20190903 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from)", "department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt = localtime(timezone.now()) self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])", "def _regist_employees(self, department_id, emp_no): \"\"\" employeesを登録する \"\"\" self.regist_model = Employees()", "= localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def _regist_departments(self,", "range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if emp_no <=", "employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id'] department_date_from", "\"\"\" cursor = connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id,", "localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from): \"\"\" 配属情報を更新する \"\"\"", "from chart.models import Employees, Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService):", "Employeesを更新する \"\"\" service = EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for", "logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees(): \"\"\" トランケートを行う \"\"\"", "self.regist_model.department_no = department_no self.regist_model.department_name = department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt", "= department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt = localtime(timezone.now()) self.update_model.save(update_fields=['department_id', 'department_date_from',", "ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in DepartmentType: department_no = department_type.value if", "self.regist_model = Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id = department_id self.regist_model.first_name", "delete_flag=0).count() == 0: if emp_no <= 5: department_no = DepartmentType.SALES.value", "= GenderType.MAN.value self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag = 0 self.regist_model.regist_dt =", "department_name): \"\"\" departmentsを登録する \"\"\" self.regist_model = Departments() self.regist_model.department_no = department_no", "AppLogicBaseService from chart.models import Employees, Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\" class", "select_model['id'] department_date_from = 20190905 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod", "transaction, connection from django.utils import timezone from django.utils.timezone import localtime", "def update_employees(): \"\"\" Employeesを更新する \"\"\" service = EmployeesService() # filterによる絞込を行う", "employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id'] department_date_from = 20190905", "# Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name))", "\"\"\" service = EmployeesService() for emp_no in range(1, 11): if", "in range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if emp_no", "connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no): \"\"\" employeesを登録する", "in DepartmentType: department_no = department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0:", "django.utils.timezone import localtime from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import", "employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id = employees_item.id select_model =", "self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id,", "select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id'] department_date_from = 20190905 #", "return self.regist_model.id def _regist_departments(self, department_no, department_name): \"\"\" departmentsを登録する \"\"\" self.regist_model", "self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag = 0 self.regist_model.regist_dt", "EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in DepartmentType:", "chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import AppLogicBaseService from chart.models import", "Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id'] department_date_from = 20190905 # データを更新する service._update_employees_department(employees_id,", "Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: # データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic()", "for emp_no in range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0:", "_update_employees_department(self, employees_id, department_id, department_date_from): \"\"\" 配属情報を更新する \"\"\" self.update_model = Employees()", "Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします #", "= department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: # データを登録する service._regist_departments(department_no,", "from django.utils.timezone import localtime from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type", "Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item in", "# Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name))", "logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7,", "employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name))", "emp_no__lt=3, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id", "= 20190905 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def select_employees():", "emp_no) @staticmethod @transaction.atomic() def create_departments(): \"\"\" Departmentsを作成する \"\"\" service =", "= 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def", "truncate_employees(): \"\"\" トランケートを行う \"\"\" cursor = connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table))", "department_id, department_date_from): \"\"\" 配属情報を更新する \"\"\" self.update_model = Employees() self.update_model.pk =", "@staticmethod def select_employees(): \"\"\" Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER JOINになる #", "django.db import transaction, connection from django.utils import timezone from django.utils.timezone", "__init__(self): super().__init__() @staticmethod @transaction.atomic() def create_employees(): \"\"\" Employeesを作成する \"\"\" service", "def truncate_employees(): \"\"\" トランケートを行う \"\"\" cursor = connection.cursor() cursor.execute('TRUNCATE TABLE", "delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id']", "if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if emp_no <= 5: department_no", "# Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name))", "department_date_from): \"\"\" 配属情報を更新する \"\"\" self.update_model = Employees() self.update_model.pk = employees_id", "select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name))", "配属情報を更新する \"\"\" self.update_model = Employees() self.update_model.pk = employees_id self.update_model.department_id =", "filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id", "logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item in", "from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import AppLogicBaseService from chart.models", "= Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def create_departments():", "= Employees() self.update_model.pk = employees_id self.update_model.department_id = department_id self.update_model.department_date_from =", "logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees(): \"\"\" トランケートを行う", "timezone from django.utils.timezone import localtime from chart.application.enums.department_type import DepartmentType from", "# テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0):", "# prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"):", "employees_id, department_id, department_date_from): \"\"\" 配属情報を更新する \"\"\" self.update_model = Employees() self.update_model.pk", "_regist_departments(self, department_no, department_name): \"\"\" departmentsを登録する \"\"\" self.regist_model = Departments() self.regist_model.department_no", "service = EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in", "Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします #", "= DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() #", "for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id = employees_item.id select_model", "logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7,", "+ str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag =", "logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees(): \"\"\"", "self.regist_model.department_name = department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt", "\"\"\" Employeesを更新する \"\"\" service = EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる", "# データを更新する service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for", "localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def _regist_departments(self, department_no,", "= employees_id self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt =", "self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from): \"\"\" 配属情報を更新する \"\"\" self.update_model", "department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now())", "department_date_from = 20190905 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def", "service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def update_employees(): \"\"\" Employeesを更新する \"\"\" service", "def select_employees(): \"\"\" Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます", "Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id", "department_no = department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: # データを登録する", "in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values(", "@staticmethod @transaction.atomic() def create_departments(): \"\"\" Departmentsを作成する \"\"\" service = EmployeesService()", "== 0: if emp_no <= 5: department_no = DepartmentType.SALES.value else:", "connection from django.utils import timezone from django.utils.timezone import localtime from", "# データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def select_employees(): \"\"\" Employeesを検索する", "= EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in", "# gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id =", "def create_departments(): \"\"\" Departmentsを作成する \"\"\" service = EmployeesService() # データをすべて削除する", "in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first()", "department_type.en_name) @staticmethod @transaction.atomic() def update_employees(): \"\"\" Employeesを更新する \"\"\" service =", "\"\"\" self.regist_model = Departments() self.regist_model.department_no = department_no self.regist_model.department_name = department_name", "0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self,", "Employees() self.update_model.pk = employees_id self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from", "department_date_from) @staticmethod def select_employees(): \"\"\" Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER JOINになる", "# filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0):", "Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name))", "DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する", "logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related(", "employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) #", "cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no): \"\"\" employeesを登録する \"\"\"", "== 0: # データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def update_employees():", "= localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def _regist_departments(self, department_no, department_name): \"\"\"", "\"\"\" employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod @transaction.atomic()", "Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def create_departments(): \"\"\"", "select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def", "_regist_employees(self, department_id, emp_no): \"\"\" employeesを登録する \"\"\" self.regist_model = Employees() self.regist_model.emp_no", "create_employees(): \"\"\" Employeesを作成する \"\"\" service = EmployeesService() for emp_no in", "service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def create_departments(): \"\"\" Departmentsを作成する \"\"\" service", "\"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees():", "self.regist_model.department_id = department_id self.regist_model.first_name = \"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name =", "Employeesを作成する \"\"\" service = EmployeesService() for emp_no in range(1, 11):", "self.regist_model.id def _regist_departments(self, department_no, department_name): \"\"\" departmentsを登録する \"\"\" self.regist_model =", "logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for", "logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item", "Employees, Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__()", "chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import", "データを更新する service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item", "else: department_no = DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'],", "delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def", "super().__init__() @staticmethod @transaction.atomic() def create_employees(): \"\"\" Employeesを作成する \"\"\" service =", "self.update_model = Employees() self.update_model.pk = employees_id self.update_model.department_id = department_id self.update_model.department_date_from", "\"\"\" class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod @transaction.atomic() def create_employees():", "gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0): employees_id = employees_item.id", "\"\"\" Employeesを作成する \"\"\" service = EmployeesService() for emp_no in range(1,", "# filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0):", "emp_no self.regist_model.department_id = department_id self.regist_model.first_name = \"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name", "@transaction.atomic() def create_departments(): \"\"\" Departmentsを作成する \"\"\" service = EmployeesService() #", "\"id\").first() department_id = select_model['id'] department_date_from = 20190903 # データを更新する service._update_employees_department(employees_id,", "@staticmethod @transaction.atomic() def update_employees(): \"\"\" Employeesを更新する \"\"\" service = EmployeesService()", "department_type in DepartmentType: department_no = department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() ==", "11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if emp_no <= 5:", "logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item in", "5: department_no = DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value select_model =", "= 20190903 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う #", "department_id = select_model['id'] department_date_from = 20190905 # データを更新する service._update_employees_department(employees_id, department_id,", "chart.application.service.app_logic_base import AppLogicBaseService from chart.models import Employees, Departments \"\"\" employeesテーブルを操作するクラスです。", "emp_no in range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if", "department_id, emp_no): \"\"\" employeesを登録する \"\"\" self.regist_model = Employees() self.regist_model.emp_no =", "= Departments() self.regist_model.department_no = department_no self.regist_model.department_name = department_name self.regist_model.delete_flag =", "\"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag", "= \"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from = \"20190902\"", "from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base", "0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return self.regist_model.id", "self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save()", "\"\"\" service = EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item", "\"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender =", "self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def", "employees_id self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt = localtime(timezone.now())", "create_departments(): \"\"\" Departmentsを作成する \"\"\" service = EmployeesService() # データをすべて削除する #", "Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id = department_id self.regist_model.first_name = \"first_name_\"", "\"\"\" self.regist_model = Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id = department_id", "20190905 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def select_employees(): \"\"\"", "employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod @transaction.atomic() def", "EmployeesService() for emp_no in range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() ==", "chart.models import Employees, Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService): def", "トランケートを行う \"\"\" cursor = connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self,", "def _update_employees_department(self, employees_id, department_id, department_date_from): \"\"\" 配属情報を更新する \"\"\" self.update_model =", "in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします", "= select_model['id'] department_date_from = 20190905 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from)", "service = EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type", "if emp_no <= 5: department_no = DepartmentType.SALES.value else: department_no =", "select_model['id'] department_date_from = 20190903 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) #", "self.regist_model.emp_no = emp_no self.regist_model.department_id = department_id self.regist_model.first_name = \"first_name_\" +", "department_id, department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7,", "Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0: if emp_no <= 5: department_no =", "DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic()", "\"\"\" service = EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for", "\"\"\" Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item", "delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id =", "\"\"\" # テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value,", "class EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod @transaction.atomic() def create_employees(): \"\"\"", "データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def create_departments(): \"\"\" Departmentsを作成する \"\"\"", "= department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt =", "department_date_from = 20190903 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う", "prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no))", "employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id = select_model['id']", "self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt", "department_no = DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first()", "= localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from): \"\"\" 配属情報を更新する", "import DepartmentType from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import AppLogicBaseService", "DepartmentType: department_no = department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: #", "service = EmployeesService() for emp_no in range(1, 11): if Employees.objects.filter(emp_no=emp_no,", "import AppLogicBaseService from chart.models import Employees, Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\"", "= employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id = select_model['id'] department_date_from", "in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします", "department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9,", "cursor = connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no):", "from django.db import transaction, connection from django.utils import timezone from", "@staticmethod @transaction.atomic() def create_employees(): \"\"\" Employeesを作成する \"\"\" service = EmployeesService()", "データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def update_employees(): \"\"\" Employeesを更新する \"\"\"", "self.regist_model = Departments() self.regist_model.department_no = department_no self.regist_model.department_name = department_name self.regist_model.delete_flag", "for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id = employees_item.id select_model", "\"\"\" Departmentsを作成する \"\"\" service = EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する", "= connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no): \"\"\"", "\"\"\" 配属情報を更新する \"\"\" self.update_model = Employees() self.update_model.pk = employees_id self.update_model.department_id", "Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no))", "= EmployeesService() for emp_no in range(1, 11): if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count()", "@transaction.atomic() def create_employees(): \"\"\" Employeesを作成する \"\"\" service = EmployeesService() for", "@staticmethod @transaction.atomic() def truncate_employees(): \"\"\" トランケートを行う \"\"\" cursor = connection.cursor()", "import localtime from chart.application.enums.department_type import DepartmentType from chart.application.enums.gender_type import GenderType", "GenderType from chart.application.service.app_logic_base import AppLogicBaseService from chart.models import Employees, Departments", "= 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return", "@transaction.atomic() def update_employees(): \"\"\" Employeesを更新する \"\"\" service = EmployeesService() #", "str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from", "JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no))", "department_no = DepartmentType.MARKETING.value select_model = Departments.objects.filter(department_no=department_no).values(\"id\").first() # データを登録する service._regist_employees(select_model['id'], emp_no)", "Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic()", "self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def _regist_departments(self, department_no, department_name):", "\"\"\" self.update_model = Employees() self.update_model.pk = employees_id self.update_model.department_id = department_id", "Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first()", "def __init__(self): super().__init__() @staticmethod @transaction.atomic() def create_employees(): \"\"\" Employeesを作成する \"\"\"", "department_no, department_name): \"\"\" departmentsを登録する \"\"\" self.regist_model = Departments() self.regist_model.department_no =", "self.regist_model.first_name = \"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\" + str(emp_no).zfill(3)", "service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def select_employees(): \"\"\" Employeesを検索する \"\"\" #", "\"\"\" トランケートを行う \"\"\" cursor = connection.cursor() cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table)) def", "delete_flag=0).count() == 0: # データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def", "import timezone from django.utils.timezone import localtime from chart.application.enums.department_type import DepartmentType", "emp_no <= 5: department_no = DepartmentType.SALES.value else: department_no = DepartmentType.MARKETING.value", "Departments.objects.all().delete() for department_type in DepartmentType: department_no = department_type.value if Departments.objects.filter(department_no=department_no,", "テーブル名__項目名で指定するとINNER JOINになる # Queryは参照先のテーブルを参照する度に発行されます for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no))", "logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item", "for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no))", "logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees(): \"\"\" トランケートを行う \"\"\" cursor", "+ str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value", "emp_no): \"\"\" employeesを登録する \"\"\" self.regist_model = Employees() self.regist_model.emp_no = emp_no", "Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id = select_model['id'] department_date_from = 20190903 # データを更新する", "department_id, department_date_from) @staticmethod def select_employees(): \"\"\" Employeesを検索する \"\"\" # テーブル名__項目名で指定するとINNER", "def _regist_departments(self, department_no, department_name): \"\"\" departmentsを登録する \"\"\" self.regist_model = Departments()", "EmployeesService(AppLogicBaseService): def __init__(self): super().__init__() @staticmethod @transaction.atomic() def create_employees(): \"\"\" Employeesを作成する", "0: # データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod @transaction.atomic() def update_employees(): \"\"\"", "localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id, department_date_from):", "# gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id =", "データを更新する service._update_employees_department(employees_id, department_id, department_date_from) @staticmethod def select_employees(): \"\"\" Employeesを検索する \"\"\"", "Queryは2回発行されてForeignKeyで結合します for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name))", "service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる for employees_item in", "logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod @transaction.atomic() def truncate_employees(): \"\"\" トランケートを行う \"\"\" cursor =", "logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"):", "データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in DepartmentType: department_no =", "department_type.value if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: # データを登録する service._regist_departments(department_no, department_type.en_name)", "20190903 # データを更新する service._update_employees_department(employees_id, department_id, department_date_from) # filterによる絞込を行う # gte:...以上(>=),lte:...以下(<=)になる", "self.update_model.pk = employees_id self.update_model.department_id = department_id self.update_model.department_date_from = department_date_from self.update_model.update_dt", "TABLE {0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no): \"\"\" employeesを登録する \"\"\" self.regist_model", "employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id = select_model['id'] department_date_from =", "import Employees, Departments \"\"\" employeesテーブルを操作するクラスです。 \"\"\" class EmployeesService(AppLogicBaseService): def __init__(self):", "Departments() self.regist_model.department_no = department_no self.regist_model.department_name = department_name self.regist_model.delete_flag = 0", "from django.utils import timezone from django.utils.timezone import localtime from chart.application.enums.department_type", "emp_no__lte=9, delete_flag=0): employees_id = employees_item.id select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id =", "# データを登録する service._regist_employees(select_model['id'], emp_no) @staticmethod @transaction.atomic() def create_departments(): \"\"\" Departmentsを作成する", "str(emp_no).zfill(3) self.regist_model.gender = GenderType.MAN.value self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag = 0", "select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id = select_model['id'] department_date_from = 20190903", "delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します", "update_employees(): \"\"\" Employeesを更新する \"\"\" service = EmployeesService() # filterによる絞込を行う #", "DepartmentType from chart.application.enums.gender_type import GenderType from chart.application.service.app_logic_base import AppLogicBaseService from", "delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name)) # select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます", "# データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in DepartmentType: department_no", "import GenderType from chart.application.service.app_logic_base import AppLogicBaseService from chart.models import Employees,", "= Departments.objects.filter(department_no=DepartmentType.SALES.value).values(\"id\").first() department_id = select_model['id'] department_date_from = 20190905 # データを更新する", "logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"select_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"select_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"select_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"select_related:department_name={}\".format(employees_item.department.department_name)) # prefetch_relatedを使用した参照先情報を取得してキャッシュします # Queryは2回発行されてForeignKeyで結合します for", "GenderType.MAN.value self.regist_model.department_date_from = \"20190902\" self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now())", "for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value, delete_flag=0): logging.debug(\"reference:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"reference:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"reference:department_name={}\".format(employees_item.department.department_name)) logging.debug(\"reference:first_name={}\".format(employees_item.first_name)) logging.debug(\"reference:last_name={}\".format(employees_item.last_name))", "@transaction.atomic() def truncate_employees(): \"\"\" トランケートを行う \"\"\" cursor = connection.cursor() cursor.execute('TRUNCATE", "= \"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\" + str(emp_no).zfill(3) self.regist_model.gender", "department_id = select_model['id'] department_date_from = 20190903 # データを更新する service._update_employees_department(employees_id, department_id,", "= department_no self.regist_model.department_name = department_name self.regist_model.delete_flag = 0 self.regist_model.regist_dt =", "django.utils import timezone from django.utils.timezone import localtime from chart.application.enums.department_type import", "employeesを登録する \"\"\" self.regist_model = Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id =", "from chart.application.service.app_logic_base import AppLogicBaseService from chart.models import Employees, Departments \"\"\"", "departmentsを登録する \"\"\" self.regist_model = Departments() self.regist_model.department_no = department_no self.regist_model.department_name =", "if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0: # データを登録する service._regist_departments(department_no, department_type.en_name) @staticmethod", "# select_relatedを使用した参照先情報を取得してキャッシュします # Queryは1回のみ発行されます for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related(\"department\"): logging.debug(\"select_related:emp_no={}\".format(employees_item.emp_no))", "= emp_no self.regist_model.department_id = department_id self.regist_model.first_name = \"first_name_\" + str(emp_no).zfill(3)", "# ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete() for department_type in DepartmentType: department_no = department_type.value", "department_id self.regist_model.first_name = \"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\" +", "= EmployeesService() # filterによる絞込を行う # gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1,", "= Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values( \"id\").first() department_id = select_model['id'] department_date_from = 20190903 #", "= localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now()) self.regist_model.save() def _update_employees_department(self, employees_id, department_id,", "= department_id self.regist_model.first_name = \"first_name_\" + str(emp_no).zfill(3) self.regist_model.last_name = \"last_name_\"", "localtime(timezone.now()) self.regist_model.save() return self.regist_model.id def _regist_departments(self, department_no, department_name): \"\"\" departmentsを登録する", "in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related( \"department__employees_set\"): logging.debug(\"prefetch_related:emp_no={}\".format(employees_item.emp_no)) logging.debug(\"prefetch_related:first_name={}\".format(employees_item.first_name)) logging.debug(\"prefetch_related:last_name={}\".format(employees_item.last_name)) logging.debug(\"prefetch_related:department_no={}\".format(employees_item.department.department_no)) logging.debug(\"prefetch_related:department_name={}\".format(employees_item.department.department_name)) @staticmethod", "Departmentsを作成する \"\"\" service = EmployeesService() # データをすべて削除する # ForeignKeyが指定されているためdeleteコマンドを実行する Departments.objects.all().delete()", "\"20190902\" self.regist_model.delete_flag = 0 self.regist_model.regist_dt = localtime(timezone.now()) self.regist_model.update_dt = localtime(timezone.now())", "{0}'.format(Employees._meta.db_table)) def _regist_employees(self, department_id, emp_no): \"\"\" employeesを登録する \"\"\" self.regist_model =", "0: if emp_no <= 5: department_no = DepartmentType.SALES.value else: department_no", "gt:...より大きい(>),lt:...より小さい(<)になる for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0): employees_id = employees_item.id", "\"\"\" employeesを登録する \"\"\" self.regist_model = Employees() self.regist_model.emp_no = emp_no self.regist_model.department_id" ]
[ "from flask import render_template def home(): return render_template('upload.html') def about():", "import render_template def home(): return render_template('upload.html') def about(): return render_template('about.html')", "flask import render_template def home(): return render_template('upload.html') def about(): return" ]
[ "alınan verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon =", "self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1 = QtGui.QIcon()", "Qt, QDate, QDateTime # Veritabanı bağlantısı için sql cümleciği oluşturuldu.", "lost! from PyQt5 import QtCore, QtGui, QtWidgets import mysql.connector from", "= mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor = db.cursor()", "= QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 =", "QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire lineedit'ten", "self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1) self.lbl_hadsoyad", "= QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1) self.dt_hdt =", "icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget =", "1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1", "\"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı", "utf-8 -*- # Form implementation generated from reading ui file", "= QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1) self.lbl_hcinsiyet =", "1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1,", "\"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__ == \"__main__\": import sys", "QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)", "QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2)", "121, 31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\")", ") cursor = db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\")", "QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\")", "1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1)", "be lost! from PyQt5 import QtCore, QtGui, QtWidgets import mysql.connector", "1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1,", "self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad,", "1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1)", "0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0,", "if (veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\") else: QMessageBox.information(self,", "this file will be lost! from PyQt5 import QtCore, QtGui,", "QtWidgets import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import", "= QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1) self.lineEdit_2 =", "150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10,", "1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0,", "hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount except: veri=2", "\"İşlem Başarısız\") def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt", "self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\")", "self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc,", "will be lost! from PyQt5 import QtCore, QtGui, QtWidgets import", "-*- # Form implementation generated from reading ui file '.\\hastakayit_gui.ui'", "setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal,", "def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"),", "\"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\",", "self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 =", "0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0,", "(%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount except: veri=2 if (veri", "= cursor.rowcount except: veri=2 if (veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME',", "icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis", "file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets", "1, 0, 1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2,", "Created by: PyQt5 UI code generator 5.11.3 # # WARNING!", "self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31))", "0, 1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1),", "QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit =", "Tespit Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC", "1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1,", "MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\",", "1, 1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1,", "= QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1) self.lbl_hdt =", "MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180,", "# # Created by: PyQt5 UI code generator 5.11.3 #", "PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate, QDateTime #", "changes made in this file will be lost! from PyQt5", "0, 1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0,", "def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit", "self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)", "icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit", "coding: utf-8 -*- # Form implementation generated from reading ui", "icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle = (\"INSERT INTO", "1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0,", "-*- coding: utf-8 -*- # Form implementation generated from reading", "WARNING! All changes made in this file will be lost!", "self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3,", "passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor = db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self,", "self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet:", "Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\"))", "QDateTime # Veritabanı bağlantısı için sql cümleciği oluşturuldu. db =", "kayitekle(self): # k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text()", "self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2", "cümleciği oluşturuldu. db = mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" )", "__name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow =", "MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire lineedit'ten alınan verileri", "1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1,", "= QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)", "QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121,", "1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow)", "1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1)", "10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0,", "QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle = (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt)", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire lineedit'ten alınan verileri sorguya", "import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui =", "self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1) self.lbl_hdt", "QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150,", "# Form implementation generated from reading ui file '.\\hastakayit_gui.ui' #", "made in this file will be lost! from PyQt5 import", "QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore", "QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar", "'BİLGİLENDİRME', \"İşlem Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\") def retranslateUi(self,", "QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)", "1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1)", "sql cümleciği oluşturuldu. db = mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\"", "\"yyyy.MM.dd\")) if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv)", "QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow2() ui.setupUi2(MainWindow) MainWindow.show() sys.exit(app.exec_())", "# Veritabanı bağlantısı için sql cümleciği oluşturuldu. db = mysql.connector.connect(", "self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit,", "sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"),", "self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\")", "QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2)", "MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self):", "self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire lineedit'ten alınan", "self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))", "571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0)", "== 1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem", "self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc =", "2, 0, 1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1,", "self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\")", "cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount except: veri=2 if (veri ==", "self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt,", "QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\")", "0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar =", "Başarısız\") def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları", "Form implementation generated from reading ui file '.\\hastakayit_gui.ui' # #", "QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)", "retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit Uygulaması-Hasta", "1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1,", "# # WARNING! All changes made in this file will", "from PyQt5 import QtCore, QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets", "QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)", "implementation generated from reading ui file '.\\hastakayit_gui.ui' # # Created", "def kayitekle(self): # k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir. h_tc=self.lineEdit.text()", "class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon =", "self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2,", "No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum", "0, 1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0,", "== \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow()", "self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc", "1, 1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1)", "QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget)", "self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\"))", "'BİLGİLENDİRME', \"İşlem Başarısız\") def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\",", "31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle)", "cursor = db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600,", "self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1) self.dt_hdt", "Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if", "QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\") def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate", "All changes made in this file will be lost! from", "Veritabanı bağlantısı için sql cümleciği oluşturuldu. db = mysql.connector.connect( host=\"localhost\",", "in this file will be lost! from PyQt5 import QtCore,", "UI code generator 5.11.3 # # WARNING! All changes made", "0, 0, 1, 1) self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1,", "self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\"))", "self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1) self.lineEdit", "self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__ == \"__main__\": import", "Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon = QtGui.QIcon()", "app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow2() ui.setupUi2(MainWindow)", "= db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205)", "MainWindow.resize(600, 205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)", "# Created by: PyQt5 UI code generator 5.11.3 # #", "1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1)", "150, 121, 31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1)", "self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta", "self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1) self.lbl_hcinsiyet", "from PyQt5.QtCore import Qt, QDate, QDateTime # Veritabanı bağlantısı için", "VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount except: veri=2 if", "\"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \"))", "user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor = db.cursor() class Ui_MainWindow2(QMainWindow): def", "mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate,", "Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__ == \"__main__\": import sys app", "\"İşlem Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\") def retranslateUi(self, MainWindow):", "self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\")", "121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571,", "lineedit'ten alınan verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon", "1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1,", "0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1,", "3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar)", "h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self,", "from reading ui file '.\\hastakayit_gui.ui' # # Created by: PyQt5", "= QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"),", "veri=2 if (veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\") else:", "5.11.3 # # WARNING! All changes made in this file", "# WARNING! All changes made in this file will be", "code generator 5.11.3 # # WARNING! All changes made in", "self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\")", "MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon)", "icon) try: hasta_ekle = (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt))", "QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2)", "if __name__ == \"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow", "1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def", "QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate, QDateTime # Veritabanı bağlantısı", "else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\") def retranslateUi(self, MainWindow): _translate =", "veri = cursor.rowcount except: veri=2 if (veri == 1): QMessageBox.information(self,", "QMessageBox.setWindowIcon(self, icon) try: hasta_ekle = (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\")", "QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\") def", "generated from reading ui file '.\\hastakayit_gui.ui' # # Created by:", "QDate, QDateTime # Veritabanı bağlantısı için sql cümleciği oluşturuldu. db", "h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try:", "= QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis =", "h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon)", "from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate, QDateTime", "self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019,", "MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)", "_translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı\"))", "self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): #", "PyQt5 UI code generator 5.11.3 # # WARNING! All changes", "205) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget", "1, 1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0,", "ui file '.\\hastakayit_gui.ui' # # Created by: PyQt5 UI code", "2, 1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0,", "QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle = (\"INSERT", "\"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\",", "QtCore, QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from", "cursor.rowcount except: veri=2 if (veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem", "= QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1) self.lineEdit_3 =", "import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt,", "3, 0, 1, 1) self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1,", "= QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle =", "self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3", "QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal,", "= QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\",", "except: veri=2 if (veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\")", "= QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt,", "\"Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\"))", "host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor = db.cursor() class Ui_MainWindow2(QMainWindow):", "self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1) self.lineEdit_2", "QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir.", "# -*- coding: utf-8 -*- # Form implementation generated from", "MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt", "= QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow2() ui.setupUi2(MainWindow) MainWindow.show()", "1, 1, 1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2,", "= QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName(\"statusbar\") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def kayitekle(self): # k_ad/k_sfire", "db = mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor =", "0, 1, 1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\") self.gridLayout_3.addWidget(self.lbl_hdt, 3,", "h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)", "QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2)", "self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1,", "QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget)", "31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128))", "reading ui file '.\\hastakayit_gui.ui' # # Created by: PyQt5 UI", "QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle = (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES", "oluşturuldu. db = mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor", "QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\"))", "database=\"cilth_vt\" ) cursor = db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow):", "Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\"))", "self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__ == \"__main__\": import sys app =", "try: hasta_ekle = (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit()", "self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1,", "import QtCore, QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow", "= QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1) self.lbl_hadsoyad =", "icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) QMessageBox.setWindowIcon(self, icon) try: hasta_ekle", "self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\")", "verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon()", "0, 1, 1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1,", "self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1) self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName(\"lineEdit\")", "self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__", "<reponame>roselight/Image-Recognition-with-OpenCv<filename>hastakayit_gui.py # -*- coding: utf-8 -*- # Form implementation generated", "Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\",", "self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close) self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10,", "= QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31))", "\"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__ ==", "k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text()", "için sql cümleciği oluşturuldu. db = mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\",", "1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget)", "= (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri =", "\"__main__\": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui", "1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\")", "1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1,", "gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text() h_dt=self.dt_hdt.text() icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"heartbeat.png\"), QtGui.QIcon.Normal,", "self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\") self.gridLayout_3.addWidget(self.lbl_hcinsiyet,", "PyQt5.QtCore import Qt, QDate, QDateTime # Veritabanı bağlantısı için sql", "0))) self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtWidgets.QStatusBar(MainWindow)", "(veri == 1): QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME',", "128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\")", "self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1) self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\")", "QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dt_hdt.setObjectName(\"dt_hdt\") self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0))) self.gridLayout_3.addWidget(self.dt_hdt, 3,", "self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121,", "Başarılı.\") else: QMessageBox.information(self, 'BİLGİLENDİRME', \"İşlem Başarısız\") def retranslateUi(self, MainWindow): _translate", "self.lineEdit.setObjectName(\"lineEdit\") self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1) self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hdt.setObjectName(\"lbl_hdt\")", "MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150,", "INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount except:", "hasta_ekle = (\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri", "db.commit() veri = cursor.rowcount except: veri=2 if (veri == 1):", "\")) self.lbl_hdt.setText(_translate(\"MainWindow\", \"Doğum Tarihi:\")) self.dt_hdt.setDisplayFormat(_translate(\"MainWindow\", \"yyyy.MM.dd\")) if __name__ == \"__main__\":", "# k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir. h_tc=self.lineEdit.text() h_ads=self.lineEdit_2.text() h_csyt=self.lineEdit_3.text()", "bağlantısı için sql cümleciği oluşturuldu. db = mysql.connector.connect( host=\"localhost\", user=\"root\",", "import QMessageBox,QWidget,QMainWindow from PyQt5.QtCore import Qt, QDate, QDateTime # Veritabanı", "icon1.addPixmap(QtGui.QPixmap(\"../avatar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) self.btn_kayit.setIcon(icon1) self.btn_kayit.setObjectName(\"btn_kayit\") self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310,", "self.lbl_hadsoyad.setObjectName(\"lbl_hadsoyad\") self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1) self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_hcinsiyet.setObjectName(\"lbl_hcinsiyet\")", "self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0,", "= QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2)", "Kimlik No:\")) self.lbl_hadsoyad.setText(_translate(\"MainWindow\", \"Hasta Adı Soyadı:\")) self.lbl_hcinsiyet.setText(_translate(\"MainWindow\", \"Cinsiyet: \")) self.lbl_hdt.setText(_translate(\"MainWindow\",", "db.cursor() class Ui_MainWindow2(QMainWindow): def setupUi2(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(600, 205) icon", "PyQt5 import QtCore, QtGui, QtWidgets import mysql.connector from PyQt5.QtWidgets import", "sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow2()", "file '.\\hastakayit_gui.ui' # # Created by: PyQt5 UI code generator", "QtWidgets.QWidget(self.centralwidget) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128)) self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setContentsMargins(0,", "1) self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1)", "= QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(\"../heartbeat.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On) MainWindow.setWindowIcon(icon) MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular) self.centralwidget = QtWidgets.QWidget(MainWindow)", "= QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1) self.lineEdit =", "self.centralwidget.setObjectName(\"centralwidget\") self.btn_kayit = QtWidgets.QPushButton(self.centralwidget) self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31)) icon1 =", "(\"INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)\") cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt)) db.commit() veri = cursor.rowcount", "self.btn_kayit.clicked.connect(self.kayitekle) self.btn_cikis = QtWidgets.QPushButton(self.centralwidget) self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31)) self.btn_cikis.setObjectName(\"btn_cikis\") self.btn_cikis.clicked.connect(self.close)", "Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\", \"TC Kimlik", "self.gridLayout_3.setObjectName(\"gridLayout_3\") self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2) self.lbl_htc.setObjectName(\"lbl_htc\") self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1)", "mysql.connector.connect( host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", database=\"cilth_vt\" ) cursor = db.cursor() class", "by: PyQt5 UI code generator 5.11.3 # # WARNING! All", "self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1) self.lineEdit_3", "import Qt, QDate, QDateTime # Veritabanı bağlantısı için sql cümleciği", "Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı\")) self.btn_kayit.setText(_translate(\"MainWindow\", \"ONAYLA\")) self.btn_cikis.setText(_translate(\"MainWindow\", \"İPTAL\")) self.lbl_htc.setText(_translate(\"MainWindow\",", "generator 5.11.3 # # WARNING! All changes made in this", "'.\\hastakayit_gui.ui' # # Created by: PyQt5 UI code generator 5.11.3" ]
[ "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "a copy of this data, including any software or models", "Data without restriction, including without limitation the rights to use,", "x in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if build_times", "the keys to delete delete_keys = [v[0] for v in", "form, as well as any drawings, specifications, and documentation (collectively", "gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list of releases releases = repository.releases()", "is %s' % (latest_build_time)) for asset in assets: match =", "notice shall be included in all copies or substantial portions", "Copyright (c) 2021, Collins Aerospace. Developed with the sponsorship of", "EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS", "% (pformat(sorted_keys))) # filter to obtain the keys to delete", "AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER", "assets = rel.assets() print('In release %s found assets:' % (rel.name))", "# extract keys and sort by build date release_keys =", "sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1]) print('%s' % (pformat(sorted_keys)))", "deleting the releases and corresponding tags for rel in releases:", "in the Data without restriction, including without limitation the rights", "sys from github3 import GitHub from pprint import pformat GITHUB_API", "not None else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in", "repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list of releases releases", "% (latest_build_time)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) #", "os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN", "(pformat(sorted_keys))) # filter to obtain the keys to delete delete_keys", "None print('Lastest build time is %s' % (latest_build_time)) for asset", "hereby granted, free of charge, to any person obtaining a", "= gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list of releases releases =", "in releases if sname in x.name} sorted_keys = sorted(release_keys.items(), reverse=True,", "manage_daily_builds(sname): print('Managing builds matching %s' % (sname)) # obtain git", "= 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname):", "releases: print('examining rel %d from %s...' % (rel.id, str(rel.created_at))) if", "rel %d from %s...' % (rel.id, str(rel.created_at))) if rel.id in", "sort by build date release_keys = {x.id : x.created_at for", "releases: %s' % (pformat(delete_keys))) # iterate, deleting the releases and", "Agency (DARPA). Permission is hereby granted, free of charge, to", "OR OTHER DEALINGS IN THE DATA. ''' import os import", "of this data, including any software or models in source", "match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s matches %s' %", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named %s matches %s' % (asset.name,", "tag %s.' % (rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name))", "assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s matches %s'", "import os import re import sys from github3 import GitHub", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "OTHER DEALINGS IN THE DATA. ''' import os import re", "PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds matching %s' %", "2021, Collins Aerospace. Developed with the sponsorship of Defense Advanced", "any drawings, specifications, and documentation (collectively &quot;the Data&quot;), to deal", "limitation the rights to use, copy, modify, merge, publish, distribute,", "subject to the following conditions: The above copyright notice and", "all copies or substantial portions of the Data. THE DATA", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "whom the Data is furnished to do so, subject to", "%d and tag %s.' % (rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s'", "releases = repository.releases() # extract keys and sort by build", "PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "(rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if rel_tag_ref", "CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS", "None: print(' deleting tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else: #", "%s' % (sname)) # obtain git handle gh = GitHub(GITHUB_API,", "sponsorship of Defense Advanced Research Projects Agency (DARPA). Permission is", "of the Data, and to permit persons to whom the", "assets:' % (rel.name)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name)", "in releases: print('examining rel %d from %s...' % (rel.id, str(rel.created_at)))", "latest_build_time = build_times[-1] if build_times else None print('Lastest build time", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "as well as any drawings, specifications, and documentation (collectively &quot;the", "Projects Agency (DARPA). Permission is hereby granted, free of charge,", "of the Data. THE DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT", "{x.id : x.created_at for x in releases if sname in", "print('Deleting releases: %s' % (pformat(delete_keys))) # iterate, deleting the releases", "included in all copies or substantial portions of the Data.", "build time is %s' % (latest_build_time)) for asset in assets:", "= match.group(1) if asset_build_time != latest_build_time: print('deleting stale asset %s'", "'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds matching %s'", "= rel.assets() print('In release %s found assets:' % (rel.name)) for", "re import sys from github3 import GitHub from pprint import", "for v in sorted_keys[2:]] print('Deleting releases: %s' % (pformat(delete_keys))) #", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "matches %s' % (asset.name, match.group(1) if match is not None", "source or binary form, as well as any drawings, specifications,", "!= latest_build_time: print('deleting stale asset %s' % (asset.name)) asset.delete() if", "CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "delete delete_keys = [v[0] for v in sorted_keys[2:]] print('Deleting releases:", "Collins Aerospace. Developed with the sponsorship of Defense Advanced Research", "(rel.name)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset", "the releases and corresponding tags for rel in releases: print('examining", "in all copies or substantial portions of the Data. THE", "print('In release %s found assets:' % (rel.name)) for asset in", "REPOSITORY_REPO) # get list of releases releases = repository.releases() #", "furnished to do so, subject to the following conditions: The", "deleting release id %d and tag %s.' % (rel.id, rel.tag_name))", "OF OR IN CONNECTION WITH THE DATA OR THE USE", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "print('Managing builds matching %s' % (sname)) # obtain git handle", "else None print('Lastest build time is %s' % (latest_build_time)) for", "release id %d and tag %s.' % (rel.id, rel.tag_name)) rel_tag_ref", "%s matches %s' % (asset.name, match.group(1) if match is not", "THE DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY", "iterate, deleting the releases and corresponding tags for rel in", "Defense Advanced Research Projects Agency (DARPA). Permission is hereby granted,", "the Data without restriction, including without limitation the rights to", "= os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER =", "be included in all copies or substantial portions of the", "rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if rel_tag_ref is not", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named %s matches", "by build date release_keys = {x.id : x.created_at for x", "from %s...' % (rel.id, str(rel.created_at))) if rel.id in delete_keys and", "DATA. ''' import os import re import sys from github3", "in source or binary form, as well as any drawings,", "print('examining rel %d from %s...' % (rel.id, str(rel.created_at))) if rel.id", "pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN']", "keys and sort by build date release_keys = {x.id :", "x.name} sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1]) print('%s' %", "match = PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named %s matches %s'", "(sname)) # obtain git handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository", "import re import sys from github3 import GitHub from pprint", "(rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look for stale files in the", "else 'None')) if match is not None: asset_build_time = match.group(1)", "= sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time =", "to permit persons to whom the Data is furnished to", "the following conditions: The above copyright notice and this permission", "stale asset %s' % (asset.name)) asset.delete() if __name__ == '__main__':", "is not None: print(' deleting tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete()", "x in releases if sname in x.name} sorted_keys = sorted(release_keys.items(),", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "= PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s matches %s' % (asset.name,", "% (rel.id, str(rel.created_at))) if rel.id in delete_keys and rel.tag_name is", "person obtaining a copy of this data, including any software", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "os import re import sys from github3 import GitHub from", "match is not None else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for", "build_times else None print('Lastest build time is %s' % (latest_build_time))", "THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE", "os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks'", "not None: print(' deleting release id %d and tag %s.'", "in x.name} sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1]) print('%s'", "def manage_daily_builds(sname): print('Managing builds matching %s' % (sname)) # obtain", "the Data, and to permit persons to whom the Data", "%s' % (asset.name, match.group(1) if match is not None else", "'None')) if match is not None: asset_build_time = match.group(1) if", "notice and this permission notice shall be included in all", "is hereby granted, free of charge, to any person obtaining", "models in source or binary form, as well as any", "&quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list of releases", "(asset.name, match.group(1) if match is not None else 'None')) if", "in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s matches", "(pformat(delete_keys))) # iterate, deleting the releases and corresponding tags for", "tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look for stale", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS,", "python3 ''' Copyright (c) 2021, Collins Aerospace. Developed with the", "to the following conditions: The above copyright notice and this", "conditions: The above copyright notice and this permission notice shall", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "% (rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if", "releases releases = repository.releases() # extract keys and sort by", "do so, subject to the following conditions: The above copyright", "asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s", "v in sorted_keys[2:]] print('Deleting releases: %s' % (pformat(delete_keys))) # iterate,", "github3 import GitHub from pprint import pformat GITHUB_API = 'https://api.github.com/repos'", "any person obtaining a copy of this data, including any", "= [v[0] for v in sorted_keys[2:]] print('Deleting releases: %s' %", "matching %s' % (sname)) # obtain git handle gh =", "OR THE USE OR OTHER DEALINGS IN THE DATA. '''", "to whom the Data is furnished to do so, subject", "% (sname)) # obtain git handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN)", "rel.tag_name is not None: print(' deleting release id %d and", "build_times[-1] if build_times else None print('Lastest build time is %s'", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "copies or substantial portions of the Data. THE DATA IS", "print(' deleting tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look", "None: print(' deleting release id %d and tag %s.' %", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "if build_times else None print('Lastest build time is %s' %", "if sname in x.name} sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x:", "including without limitation the rights to use, copy, modify, merge,", "else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if", "print('Lastest build time is %s' % (latest_build_time)) for asset in", "gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get", "= build_times[-1] if build_times else None print('Lastest build time is", "merge, publish, distribute, sublicense, and/or sell copies of the Data,", "if asset_build_time != latest_build_time: print('deleting stale asset %s' % (asset.name))", "(DARPA). Permission is hereby granted, free of charge, to any", "reverse=True, key=lambda x: x[1]) print('%s' % (pformat(sorted_keys))) # filter to", "list of releases releases = repository.releases() # extract keys and", "(rel.id, str(rel.created_at))) if rel.id in delete_keys and rel.tag_name is not", "publish, distribute, sublicense, and/or sell copies of the Data, and", "if rel.id in delete_keys and rel.tag_name is not None: print('", "None else 'None')) if match is not None: asset_build_time =", "asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named", "without limitation the rights to use, copy, modify, merge, publish,", "repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if rel_tag_ref is not None: print('", "match is not None else 'None')) if match is not", "% (rel.name)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print('", "%s' % (pformat(delete_keys))) # iterate, deleting the releases and corresponding", "IN CONNECTION WITH THE DATA OR THE USE OR OTHER", "None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*')", "rel in releases: print('examining rel %d from %s...' % (rel.id,", "restriction, including without limitation the rights to use, copy, modify,", "= sorted(release_keys.items(), reverse=True, key=lambda x: x[1]) print('%s' % (pformat(sorted_keys))) #", "releases and corresponding tags for rel in releases: print('examining rel", "not None else 'None')) if match is not None: asset_build_time", "sublicense, and/or sell copies of the Data, and to permit", "= repository.releases() # extract keys and sort by build date", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN =", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "FROM, OUT OF OR IN CONNECTION WITH THE DATA OR", "'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in", "asset_build_time != latest_build_time: print('deleting stale asset %s' % (asset.name)) asset.delete()", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "portions of the Data. THE DATA IS PROVIDED &quot;AS IS&quot;,", "GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if", "%s' % (latest_build_time)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name)", "if match is not None else 'None')) if match is", "Advanced Research Projects Agency (DARPA). Permission is hereby granted, free", "data, including any software or models in source or binary", "or models in source or binary form, as well as", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing", "in os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE'", "# iterate, deleting the releases and corresponding tags for rel", "git handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO)", "from github3 import GitHub from pprint import pformat GITHUB_API =", "is not None: asset_build_time = match.group(1) if asset_build_time != latest_build_time:", "latest_build_time: print('deleting stale asset %s' % (asset.name)) asset.delete() if __name__", "substantial portions of the Data. THE DATA IS PROVIDED &quot;AS", "for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset", "Data&quot;), to deal in the Data without restriction, including without", "as any drawings, specifications, and documentation (collectively &quot;the Data&quot;), to", "%s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look for stale files", "if 'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO", "specifications, and documentation (collectively &quot;the Data&quot;), to deal in the", "build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time", "asset named %s matches %s' % (asset.name, match.group(1) if match", "any software or models in source or binary form, as", "key=lambda x: x[1]) print('%s' % (pformat(sorted_keys))) # filter to obtain", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "id %d and tag %s.' % (rel.id, rel.tag_name)) rel_tag_ref =", "is not None else 'None')) if match is not None:", "sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1]", "sorted_keys[2:]] print('Deleting releases: %s' % (pformat(delete_keys))) # iterate, deleting the", "PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if build_times else None print('Lastest build", "# obtain git handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository =", "match.group(1) if asset_build_time != latest_build_time: print('deleting stale asset %s' %", "(latest_build_time)) for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) # print('", "and tag %s.' % (rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' %", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS,", "% (rel.tag_name)) rel.delete() if rel_tag_ref is not None: print(' deleting", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "not None: print(' deleting tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else:", "sname in x.name} sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1])", "''' import os import re import sys from github3 import", "binary form, as well as any drawings, specifications, and documentation", "and sort by build date release_keys = {x.id : x.created_at", "= 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "and/or sell copies of the Data, and to permit persons", "extract keys and sort by build date release_keys = {x.id", "stale files in the release assets = rel.assets() print('In release", "'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None", "for x in releases if sname in x.name} sorted_keys =", "obtain git handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER,", "%s...' % (rel.id, str(rel.created_at))) if rel.id in delete_keys and rel.tag_name", "% (pformat(delete_keys))) # iterate, deleting the releases and corresponding tags", "for asset in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named", "release %s found assets:' % (rel.name)) for asset in assets:", "release assets = rel.assets() print('In release %s found assets:' %", "None: asset_build_time = match.group(1) if asset_build_time != latest_build_time: print('deleting stale", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, SPONSORS,", "so, subject to the following conditions: The above copyright notice", "in assets: match = PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named %s", "match.group(1) if match is not None else 'None')) build_times =", "to do so, subject to the following conditions: The above", "print(' asset named %s matches %s' % (asset.name, match.group(1) if", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "= GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list", "obtain the keys to delete delete_keys = [v[0] for v", "following conditions: The above copyright notice and this permission notice", "DATA OR THE USE OR OTHER DEALINGS IN THE DATA.", "documentation (collectively &quot;the Data&quot;), to deal in the Data without", "well as any drawings, specifications, and documentation (collectively &quot;the Data&quot;),", "Data. THE DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "= 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN'", "x.created_at for x in releases if sname in x.name} sorted_keys", "'GH_TOKEN' in os.environ.keys() else None REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO =", "permission notice shall be included in all copies or substantial", "in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if build_times else", "copies of the Data, and to permit persons to whom", "rel.delete() if rel_tag_ref is not None: print(' deleting tag %s'", "Research Projects Agency (DARPA). Permission is hereby granted, free of", "of Defense Advanced Research Projects Agency (DARPA). Permission is hereby", "or binary form, as well as any drawings, specifications, and", "REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "rel.assets() print('In release %s found assets:' % (rel.name)) for asset", "asset %s' % (asset.name)) asset.delete() if __name__ == '__main__': manage_daily_builds(sys.argv[1])", "GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) # get list of", "if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if build_times else None print('Lastest", "match.group(1) if match is not None else 'None')) if match", "get list of releases releases = repository.releases() # extract keys", "Data is furnished to do so, subject to the following", "[v[0] for v in sorted_keys[2:]] print('Deleting releases: %s' % (pformat(delete_keys)))", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "not None: asset_build_time = match.group(1) if asset_build_time != latest_build_time: print('deleting", "drawings, specifications, and documentation (collectively &quot;the Data&quot;), to deal in", ": x.created_at for x in releases if sname in x.name}", "#!/usr/bin/env python3 ''' Copyright (c) 2021, Collins Aerospace. Developed with", "print('%s' % (pformat(sorted_keys))) # filter to obtain the keys to", "IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "is not None else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x", "match is not None: asset_build_time = match.group(1) if asset_build_time !=", "copyright notice and this permission notice shall be included in", "the sponsorship of Defense Advanced Research Projects Agency (DARPA). Permission", "<filename>.travis/manage_daily_builds.py<gh_stars>1-10 #!/usr/bin/env python3 ''' Copyright (c) 2021, Collins Aerospace. Developed", "USE OR OTHER DEALINGS IN THE DATA. ''' import os", "for rel in releases: print('examining rel %d from %s...' %", "and corresponding tags for rel in releases: print('examining rel %d", "delete_keys = [v[0] for v in sorted_keys[2:]] print('Deleting releases: %s'", "and rel.tag_name is not None: print(' deleting release id %d", "= PRODUCT_ASSET_PATTERN.search(asset.name) # print(' asset named %s matches %s' %", "import sys from github3 import GitHub from pprint import pformat", "IN THE DATA. ''' import os import re import sys", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "for stale files in the release assets = rel.assets() print('In", "PRODUCT_ASSET_PATTERN.search(asset.name) print(' asset named %s matches %s' % (asset.name, match.group(1)", "from pprint import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases'", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "(c) 2021, Collins Aerospace. Developed with the sponsorship of Defense", "Look for stale files in the release assets = rel.assets()", "sell copies of the Data, and to permit persons to", "with the sponsorship of Defense Advanced Research Projects Agency (DARPA).", "tags for rel in releases: print('examining rel %d from %s...'", "software or models in source or binary form, as well", "(rel.tag_name)) rel.delete() if rel_tag_ref is not None: print(' deleting tag", "build date release_keys = {x.id : x.created_at for x in", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS,", "permit persons to whom the Data is furnished to do", "date release_keys = {x.id : x.created_at for x in releases", "Data, and to permit persons to whom the Data is", "''' Copyright (c) 2021, Collins Aerospace. Developed with the sponsorship", "keys to delete delete_keys = [v[0] for v in sorted_keys[2:]]", "sorted(release_keys.items(), reverse=True, key=lambda x: x[1]) print('%s' % (pformat(sorted_keys))) # filter", "str(rel.created_at))) if rel.id in delete_keys and rel.tag_name is not None:", "SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "%s.' % (rel.id, rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name)) rel.delete()", "Aerospace. Developed with the sponsorship of Defense Advanced Research Projects", "for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "THE USE OR OTHER DEALINGS IN THE DATA. ''' import", "assets if PRODUCT_ASSET_PATTERN.search(x.name)]) latest_build_time = build_times[-1] if build_times else None", "persons to whom the Data is furnished to do so,", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA", "and documentation (collectively &quot;the Data&quot;), to deal in the Data", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "found assets:' % (rel.name)) for asset in assets: match =", "if match is not None: asset_build_time = match.group(1) if asset_build_time", "copy of this data, including any software or models in", "to obtain the keys to delete delete_keys = [v[0] for", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "deal in the Data without restriction, including without limitation the", "= re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds matching %s' % (sname))", "rel_tag_ref is not None: print(' deleting tag %s' % (rel_tag_ref.ref))", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "including any software or models in source or binary form,", "this permission notice shall be included in all copies or", "= repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if rel_tag_ref is not None:", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "charge, to any person obtaining a copy of this data,", "above copyright notice and this permission notice shall be included", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "in delete_keys and rel.tag_name is not None: print(' deleting release", "of releases releases = repository.releases() # extract keys and sort", "x[1]) print('%s' % (pformat(sorted_keys))) # filter to obtain the keys", "% (rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look for stale files in", "NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT", "to deal in the Data without restriction, including without limitation", "DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "OR IN CONNECTION WITH THE DATA OR THE USE OR", "DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND,", "re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds matching %s' % (sname)) #", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "Developed with the sponsorship of Defense Advanced Research Projects Agency", "is furnished to do so, subject to the following conditions:", "release_keys = {x.id : x.created_at for x in releases if", "builds matching %s' % (sname)) # obtain git handle gh", "if rel_tag_ref is not None: print(' deleting tag %s' %", "and to permit persons to whom the Data is furnished", "named %s matches %s' % (asset.name, match.group(1) if match is", "asset_build_time = match.group(1) if asset_build_time != latest_build_time: print('deleting stale asset", "shall be included in all copies or substantial portions of", "or substantial portions of the Data. THE DATA IS PROVIDED", "rel_tag_ref.delete() else: # Look for stale files in the release", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "# get list of releases releases = repository.releases() # extract", "in sorted_keys[2:]] print('Deleting releases: %s' % (pformat(delete_keys))) # iterate, deleting", "IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS", "and this permission notice shall be included in all copies", "GitHub from pprint import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES =", "distribute, sublicense, and/or sell copies of the Data, and to", "%d from %s...' % (rel.id, str(rel.created_at))) if rel.id in delete_keys", "REPOSITORY_OWNER = 'loonwerks' REPOSITORY_REPO = 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def", "rel.tag_name)) rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name)) rel.delete() if rel_tag_ref is", "import GitHub from pprint import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES", "GITHUB_RELEASES = 'releases' AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys()", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "delete_keys and rel.tag_name is not None: print(' deleting release id", "= 'AGREE' PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\\d+\\.\\d+\\.\\d+(-(\\d{12}))?-.*') def manage_daily_builds(sname): print('Managing builds matching", "&quot;the Data&quot;), to deal in the Data without restriction, including", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "free of charge, to any person obtaining a copy of", "OUT OF OR IN CONNECTION WITH THE DATA OR THE", "the release assets = rel.assets() print('In release %s found assets:'", "# filter to obtain the keys to delete delete_keys =", "is not None: print(' deleting release id %d and tag", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)])", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "obtaining a copy of this data, including any software or", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "pprint import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN", "releases if sname in x.name} sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda", "print(' deleting release id %d and tag %s.' % (rel.id,", "rel.id in delete_keys and rel.tag_name is not None: print(' deleting", "corresponding tags for rel in releases: print('examining rel %d from", "without restriction, including without limitation the rights to use, copy,", "time is %s' % (latest_build_time)) for asset in assets: match", "DEALINGS IN THE DATA. ''' import os import re import", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "files in the release assets = rel.assets() print('In release %s", "x: x[1]) print('%s' % (pformat(sorted_keys))) # filter to obtain the", "%s found assets:' % (rel.name)) for asset in assets: match", "AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE FOR", "None else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "filter to obtain the keys to delete delete_keys = [v[0]", "IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR", "else: # Look for stale files in the release assets", "= {x.id : x.created_at for x in releases if sname", "# Look for stale files in the release assets =", "WITH THE DATA OR THE USE OR OTHER DEALINGS IN", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "granted, free of charge, to any person obtaining a copy", "THE DATA. ''' import os import re import sys from", "to delete delete_keys = [v[0] for v in sorted_keys[2:]] print('Deleting", "SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE", "print('deleting stale asset %s' % (asset.name)) asset.delete() if __name__ ==", "% (asset.name, match.group(1) if match is not None else 'None'))", "of charge, to any person obtaining a copy of this", "(collectively &quot;the Data&quot;), to deal in the Data without restriction,", "handle gh = GitHub(GITHUB_API, token=AUTH_TOKEN) repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO) #", "Permission is hereby granted, free of charge, to any person", "repository.releases() # extract keys and sort by build date release_keys", "(asset.name, match.group(1) if match is not None else 'None')) build_times", "THE DATA OR THE USE OR OTHER DEALINGS IN THE", "the Data. THE DATA IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY", "The above copyright notice and this permission notice shall be", "deleting tag %s' % (rel_tag_ref.ref)) rel_tag_ref.delete() else: # Look for", "to any person obtaining a copy of this data, including", "the Data is furnished to do so, subject to the", "# print(' asset named %s matches %s' % (asset.name, match.group(1)", "in the release assets = rel.assets() print('In release %s found", "import pformat GITHUB_API = 'https://api.github.com/repos' GITHUB_RELEASES = 'releases' AUTH_TOKEN =", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "this data, including any software or models in source or", "if match is not None else 'None')) build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1)", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "# Drop the 'folder' column, which contains a system-dependent path", "False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": False, }, \"2_1_5\": {", "under the MIT license found in the # LICENSE file", "the root directory of this source tree. \"\"\" Test components", "import check_stdout class TestAnalysis(unittest.TestCase): \"\"\" Test the analysis code for", "the saved results file is what it should be sort_columns", "a file of gold annotations gold_annotations = { \"1_0_5\": {", "f: json.dump(gold_annotations, f) # Run compilation of results parser =", "TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase): \"\"\" Test", "LICENSE file in the root directory of this source tree.", "f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except ImportError: pass if __name__ == \"__main__\":", "expected_results = ( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) #", "compiler.compile_results() actual_stdout = output.getvalue() # Check the output against what", "Run compilation of results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders':", "actual_results = ( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if", "column, which contains a system-dependent path string actual_results_rel_path = [", "Check that the saved results file is what it should", "unittest import pandas as pd import parlai.utils.testing as testing_utils try:", "affiliates. # This source code is licensed under the MIT", "the MIT license found in the # LICENSE file in", "as pd import parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import", "as tmpdir: # Define expected stdout # Paths analysis_samples_folder =", "\"none_all_good\": False, }, \"2_0_5\": { \"bucket_0\": False, \"bucket_1\": True, \"bucket_2\":", "= { \"1_0_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False,", "license found in the # LICENSE file in the root", "tasks. \"\"\" import json import os import unittest import pandas", "pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) # Drop the 'folder'", "analysis code for the static turn annotations task. \"\"\" def", "\"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False,", "parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout class", "False, }, \"2_0_5\": { \"bucket_0\": False, \"bucket_1\": True, \"bucket_2\": False,", "compiling results on a dummy set of data. \"\"\" with", "False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\":", "expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results = ( pd.read_csv(expected_results_path)", "testing_utils.capture_output() as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS", "\"\"\" import json import os import unittest import pandas as", "against what it should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) #", "in the root directory of this source tree. \"\"\" Test", "\"\"\" Test components of specific crowdsourcing tasks. \"\"\" import json", "actual_stdout = output.getvalue() # Check the output against what it", "\"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": True, }, \"1_1_5\":", "class TestAnalysis(unittest.TestCase): \"\"\" Test the analysis code for the static", "= ['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv'", "it should be sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path", "is what it should be sort_columns = ['hit_id', 'worker_id', 'conversation_id',", "from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout", "parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase): \"\"\" Test the analysis code", "False, \"bucket_4\": False, \"none_all_good\": False, }, \"2_1_5\": { \"bucket_0\": False,", "in os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results", "of gold annotations gold_annotations = { \"1_0_5\": { \"bucket_0\": False,", "\"\"\" Test compiling results on a dummy set of data.", "**{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ),", "axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) # Drop the 'folder' column, which", "\"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": True,", "for the static turn annotations task. \"\"\" def test_compile_results(self): \"\"\"", "False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": True, },", "analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json' ) #", "\"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, }, \"2_0_5\":", "= os.path.join(tmpdir, actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns)", "Check the output against what it should be check_stdout( actual_stdout=actual_stdout,", "if not actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' )", "False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\":", "not actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except", "\"1_0_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False,", "( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if not actual_results.equals(expected_results):", "from parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase): \"\"\" Test the analysis", "= 3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout = output.getvalue() #", "}, \"2_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\":", "\"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False,", ") temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json' ) # Save a", "os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt' )", "open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations, f) # Run compilation of", "obj in os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path)", "expected stdout # Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' )", "task. \"\"\" def test_compile_results(self): \"\"\" Test compiling results on a", "# Run compilation of results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{", "Test components of specific crowdsourcing tasks. \"\"\" import json import", "testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests", "source tree. \"\"\" Test components of specific crowdsourcing tasks. \"\"\"", "= os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json'", "of specific crowdsourcing tasks. \"\"\" import json import os import", "its affiliates. # This source code is licensed under the", "dummy set of data. \"\"\" with testing_utils.tempdir() as tmpdir: #", "that the saved results file is what it should be", "actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path) .drop('folder', axis=1)", "the 'folder' column, which contains a system-dependent path string actual_results_rel_path", "'folder' column, which contains a system-dependent path string actual_results_rel_path =", "False, \"none_all_good\": False, }, \"2_1_5\": { \"bucket_0\": False, \"bucket_1\": False,", "as f: json.dump(gold_annotations, f) # Run compilation of results parser", "False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, },", "annotations gold_annotations = { \"1_0_5\": { \"bucket_0\": False, \"bucket_1\": False,", "import pandas as pd import parlai.utils.testing as testing_utils try: from", "['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv' )", "'conversation_id', 'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results =", "}, } with open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations, f) #", "'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, } ) args", ") expected_results = ( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) )", "os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results =", "[ obj for obj in os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path", "of this source tree. \"\"\" Test components of specific crowdsourcing", "results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except ImportError: pass if __name__ ==", "False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, }, } with", "test_compile_results(self): \"\"\" Test compiling results on a dummy set of", "'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, }", "'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path =", "(c) Facebook, Inc. and its affiliates. # This source code", "False, }, \"2_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False,", "# LICENSE file in the root directory of this source", "{ \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\":", "found in the # LICENSE file in the root directory", "f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except ImportError: pass if __name__", "with testing_utils.tempdir() as tmpdir: # Define expected stdout # Paths", "be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check that the saved", "3 compiler.compile_results() actual_stdout = output.getvalue() # Check the output against", ") analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join(", "a dummy set of data. \"\"\" with testing_utils.tempdir() as tmpdir:", "\"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": False, }, \"2_1_5\":", "'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, } ) args = parser.parse_args([]) with", "of data. \"\"\" with testing_utils.tempdir() as tmpdir: # Define expected", "= parser.parse_args([]) with testing_utils.capture_output() as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS", "pd import parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import (", "os import unittest import pandas as pd import parlai.utils.testing as", "os.path.join( tmpdir, 'gold_annotations.json' ) # Save a file of gold", "tmpdir, 'gold_annotations.json' ) # Save a file of gold annotations", "Define expected stdout # Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples'", "os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json' )", "# Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder =", "Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder = os.path.join(", "'analysis_samples' ) analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path =", "this source tree. \"\"\" Test components of specific crowdsourcing tasks.", "static turn annotations task. \"\"\" def test_compile_results(self): \"\"\" Test compiling", "crowdsourcing tasks. \"\"\" import json import os import unittest import", "\"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, }, } with open(temp_gold_annotations_path,", "TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout =", "what it should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check", "results:\\n{actual_results.to_csv()}' ) except ImportError: pass if __name__ == \"__main__\": unittest.main()", "compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout = output.getvalue() # Check the", "False, }, } with open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations, f)", "'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results = (", "This source code is licensed under the MIT license found", "analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, } ) args = parser.parse_args([])", "), 'gold_annotations_file': temp_gold_annotations_path, } ) args = parser.parse_args([]) with testing_utils.capture_output()", "try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import", "False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": True, }, \"1_1_5\": {", "def test_compile_results(self): \"\"\" Test compiling results on a dummy set", "= os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis',", "import os import unittest import pandas as pd import parlai.utils.testing", "# Check that the saved results file is what it", "pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if not actual_results.equals(expected_results): raise", "\"none_all_good\": False, }, \"2_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\":", "licensed under the MIT license found in the # LICENSE", "= 3 compiler.compile_results() actual_stdout = output.getvalue() # Check the output", "}, \"1_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\":", "\"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": True, }, \"1_1_5\": { \"bucket_0\":", "True, \"none_all_good\": False, }, } with open(temp_gold_annotations_path, 'w') as f:", "= os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt'", "gold_annotations = { \"1_0_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\":", "analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)),", "with testing_utils.capture_output() as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3", "should be sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path =", "MIT license found in the # LICENSE file in the", "os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, } ) args =", "False, \"none_all_good\": True, }, \"1_1_5\": { \"bucket_0\": False, \"bucket_1\": False,", ".drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if not actual_results.equals(expected_results): raise ValueError(", "file of gold annotations gold_annotations = { \"1_0_5\": { \"bucket_0\":", "= [ obj for obj in os.listdir(tmpdir) if obj.startswith('results') ][0]", "args = parser.parse_args([]) with testing_utils.capture_output() as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args))", "actual_results_rel_path = [ obj for obj in os.listdir(tmpdir) if obj.startswith('results')", "actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check that the saved results file", "} ) args = parser.parse_args([]) with testing_utils.capture_output() as output: compiler", "tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path, } )", "output against what it should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, )", "if obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results = (", ") expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path = os.path.join(", "'gold_annotations.json' ) # Save a file of gold annotations gold_annotations", "# Save a file of gold annotations gold_annotations = {", "expected_stdout_path=expected_stdout_path, ) # Check that the saved results file is", "os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path", "# Copyright (c) Facebook, Inc. and its affiliates. # This", "Copyright (c) Facebook, Inc. and its affiliates. # This source", "in the # LICENSE file in the root directory of", "output.getvalue() # Check the output against what it should be", "system-dependent path string actual_results_rel_path = [ obj for obj in", "TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder,", "= TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout", "data. \"\"\" with testing_utils.tempdir() as tmpdir: # Define expected stdout", "#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates.", "\"none_all_good\": True, }, \"1_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\":", "( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) # Drop the", "\"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": True,", "check_stdout class TestAnalysis(unittest.TestCase): \"\"\" Test the analysis code for the", ") # Save a file of gold annotations gold_annotations =", "{ \"1_0_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\":", "contains a system-dependent path string actual_results_rel_path = [ obj for", "# Check the output against what it should be check_stdout(", "Drop the 'folder' column, which contains a system-dependent path string", "analysis_outputs_folder, 'expected_results.csv' ) expected_results = ( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns)", "code for the static turn annotations task. \"\"\" def test_compile_results(self):", "} with open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations, f) # Run", ".drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) # Drop the 'folder' column,", "tmpdir: # Define expected stdout # Paths analysis_samples_folder = os.path.join(", "Inc. and its affiliates. # This source code is licensed", ") args = parser.parse_args([]) with testing_utils.capture_output() as output: compiler =", "compilation of results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder,", "TestAnalysis(unittest.TestCase): \"\"\" Test the analysis code for the static turn", "expected_stdout_path = os.path.join( analysis_outputs_folder, 'test_stdout.txt' ) temp_gold_annotations_path = os.path.join( tmpdir,", "the analysis code for the static turn annotations task. \"\"\"", "( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase): \"\"\"", "results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir,", "path string actual_results_rel_path = [ obj for obj in os.listdir(tmpdir)", "file is what it should be sort_columns = ['hit_id', 'worker_id',", "actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) )", "be sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path = os.path.join(", "python3 # Copyright (c) Facebook, Inc. and its affiliates. #", "results file is what it should be sort_columns = ['hit_id',", "tree. \"\"\" Test components of specific crowdsourcing tasks. \"\"\" import", "os.path.join(tmpdir, actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True)", "gold annotations gold_annotations = { \"1_0_5\": { \"bucket_0\": False, \"bucket_1\":", "json.dump(gold_annotations, f) # Run compilation of results parser = TurnAnnotationsStaticResultsCompiler.setup_args()", "the # LICENSE file in the root directory of this", "Test compiling results on a dummy set of data. \"\"\"", "os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', )", "it should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check that", "annotations task. \"\"\" def test_compile_results(self): \"\"\" Test compiling results on", "\"\"\" def test_compile_results(self): \"\"\" Test compiling results on a dummy", "}, \"2_0_5\": { \"bucket_0\": False, \"bucket_1\": True, \"bucket_2\": False, \"bucket_3\":", "\"bucket_1\": True, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": False,", ".sort_values(sort_columns) .reset_index(drop=True) ) if not actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}'", "specific crowdsourcing tasks. \"\"\" import json import os import unittest", "obj for obj in os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path =", "as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, ) from", "\"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, }, }", "import json import os import unittest import pandas as pd", "should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check that the", "\"1_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False,", "code is licensed under the MIT license found in the", "os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path", "\"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": False, }, \"2_1_5\": { \"bucket_0\":", "'worker_id', 'conversation_id', 'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results", "parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl'", "source code is licensed under the MIT license found in", "import ( TurnAnnotationsStaticResultsCompiler, ) from parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase):", "what it should be sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx']", "= ( pd.read_csv(actual_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if not", "\"\"\" with testing_utils.tempdir() as tmpdir: # Define expected stdout #", "as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS =", "# This source code is licensed under the MIT license", "analysis_outputs_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'test_turn_annotations_static_analysis', ) expected_stdout_path = os.path.join( analysis_outputs_folder,", "3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout = output.getvalue() # Check", "][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path) .drop('folder',", "True, }, \"1_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False,", "parser.parse_args([]) with testing_utils.capture_output() as output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS =", "set of data. \"\"\" with testing_utils.tempdir() as tmpdir: # Define", "= TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join(", ") from parlai.crowdsourcing.utils.tests import check_stdout class TestAnalysis(unittest.TestCase): \"\"\" Test the", "with open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations, f) # Run compilation", "<filename>tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its", "obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir, actual_results_rel_path) actual_results = ( pd.read_csv(actual_results_path)", "\"bucket_4\": True, \"none_all_good\": False, }, } with open(temp_gold_annotations_path, 'w') as", "False, \"bucket_4\": True, \"none_all_good\": False, }, \"2_0_5\": { \"bucket_0\": False,", "analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file': temp_gold_annotations_path,", "True, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\": False, },", "'test_stdout.txt' ) temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json' ) # Save", "directory of this source tree. \"\"\" Test components of specific", "os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results = ( pd.read_csv(expected_results_path) .drop('folder', axis=1)", "temp_gold_annotations_path = os.path.join( tmpdir, 'gold_annotations.json' ) # Save a file", "root directory of this source tree. \"\"\" Test components of", "'w') as f: json.dump(gold_annotations, f) # Run compilation of results", "= os.path.join( tmpdir, 'gold_annotations.json' ) # Save a file of", "saved results file is what it should be sort_columns =", "components of specific crowdsourcing tasks. \"\"\" import json import os", "\"\"\" Test the analysis code for the static turn annotations", "= ( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) # Drop", "json import os import unittest import pandas as pd import", "import parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler,", ") # Drop the 'folder' column, which contains a system-dependent", "string actual_results_rel_path = [ obj for obj in os.listdir(tmpdir) if", "sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx'] expected_results_path = os.path.join( analysis_outputs_folder,", "check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path, ) # Check that the saved results", "raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except ImportError: pass", "file in the root directory of this source tree. \"\"\"", "ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except ImportError: pass if", "'gold_annotations_file': temp_gold_annotations_path, } ) args = parser.parse_args([]) with testing_utils.capture_output() as", "temp_gold_annotations_path, } ) args = parser.parse_args([]) with testing_utils.capture_output() as output:", ".sort_values(sort_columns) .reset_index(drop=True) ) # Drop the 'folder' column, which contains", "parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import ( TurnAnnotationsStaticResultsCompiler, )", "\"bucket_0\": False, \"bucket_1\": True, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False,", "parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file':", "False, \"bucket_4\": True, \"none_all_good\": False, }, } with open(temp_gold_annotations_path, 'w')", "output: compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS = 3", "\"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, }, \"2_0_5\": { \"bucket_0\":", "# Define expected stdout # Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)),", "f) # Run compilation of results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults(", "pandas as pd import parlai.utils.testing as testing_utils try: from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results", "is licensed under the MIT license found in the #", "\"bucket_4\": False, \"none_all_good\": False, }, \"2_1_5\": { \"bucket_0\": False, \"bucket_1\":", "on a dummy set of data. \"\"\" with testing_utils.tempdir() as", ".reset_index(drop=True) ) if not actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual", "a system-dependent path string actual_results_rel_path = [ obj for obj", "axis=1) .sort_values(sort_columns) .reset_index(drop=True) ) if not actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected", "which contains a system-dependent path string actual_results_rel_path = [ obj", "\"2_0_5\": { \"bucket_0\": False, \"bucket_1\": True, \"bucket_2\": False, \"bucket_3\": False,", ") if not actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}'", "False, \"bucket_4\": False, \"none_all_good\": True, }, \"1_1_5\": { \"bucket_0\": False,", "the output against what it should be check_stdout( actual_stdout=actual_stdout, expected_stdout_path=expected_stdout_path,", "Save a file of gold annotations gold_annotations = { \"1_0_5\":", "= os.path.join( analysis_outputs_folder, 'expected_results.csv' ) expected_results = ( pd.read_csv(expected_results_path) .drop('folder',", ".reset_index(drop=True) ) # Drop the 'folder' column, which contains a", "Test the analysis code for the static turn annotations task.", "\"none_all_good\": False, }, } with open(temp_gold_annotations_path, 'w') as f: json.dump(gold_annotations,", "and its affiliates. # This source code is licensed under", ") # Check that the saved results file is what", "of results parser = TurnAnnotationsStaticResultsCompiler.setup_args() parser.set_defaults( **{ 'results_folders': analysis_samples_folder, 'output_folder':", "False, \"bucket_3\": False, \"bucket_4\": True, \"none_all_good\": False, }, \"2_0_5\": {", "compiler = TurnAnnotationsStaticResultsCompiler(vars(args)) compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results()", "results on a dummy set of data. \"\"\" with testing_utils.tempdir()", "testing_utils.tempdir() as tmpdir: # Define expected stdout # Paths analysis_samples_folder", "'expected_results.csv' ) expected_results = ( pd.read_csv(expected_results_path) .drop('folder', axis=1) .sort_values(sort_columns) .reset_index(drop=True)", "stdout # Paths analysis_samples_folder = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'analysis_samples' ) analysis_outputs_folder", "= output.getvalue() # Check the output against what it should", "\"2_1_5\": { \"bucket_0\": False, \"bucket_1\": False, \"bucket_2\": False, \"bucket_3\": False,", "the static turn annotations task. \"\"\" def test_compile_results(self): \"\"\" Test", "'results_folders': analysis_samples_folder, 'output_folder': tmpdir, 'onboarding_in_flight_data_file': os.path.join( analysis_samples_folder, 'onboarding_in_flight.jsonl' ), 'gold_annotations_file':", "False, \"bucket_1\": True, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\": False, \"none_all_good\":", "Facebook, Inc. and its affiliates. # This source code is", "{ \"bucket_0\": False, \"bucket_1\": True, \"bucket_2\": False, \"bucket_3\": False, \"bucket_4\":", "for obj in os.listdir(tmpdir) if obj.startswith('results') ][0] actual_results_path = os.path.join(tmpdir,", "turn annotations task. \"\"\" def test_compile_results(self): \"\"\" Test compiling results", "import unittest import pandas as pd import parlai.utils.testing as testing_utils", "\"bucket_4\": False, \"none_all_good\": True, }, \"1_1_5\": { \"bucket_0\": False, \"bucket_1\":", "\"bucket_4\": True, \"none_all_good\": False, }, \"2_0_5\": { \"bucket_0\": False, \"bucket_1\":", "compiler.NUM_SUBTASKS = 3 compiler.NUM_ANNOTATIONS = 3 compiler.compile_results() actual_stdout = output.getvalue()", "True, \"none_all_good\": False, }, \"2_0_5\": { \"bucket_0\": False, \"bucket_1\": True,", "actual_results.equals(expected_results): raise ValueError( f'\\n\\n\\tExpected results:\\n{expected_results.to_csv()}' f'\\n\\n\\tActual results:\\n{actual_results.to_csv()}' ) except ImportError:" ]
[]
[ "not in the names list required: false default: \"no\" choices:", "is free software: you can redistribute it and/or modify #", "rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names: if name not in", "args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd", "a custom install prefix to a Rabbit required: false version_added:", "= [] disabled = [] if state == 'enabled': if", "else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False): if", "the GNU General Public License # along with Ansible. If", "[enabled, disabled] prefix: description: - Specify a custom install prefix", "description: - Specify a custom install prefix to a Rabbit", "return out.splitlines() return list() def get_all(self): return self._exec(['list', '-E', '-m'],", "if plugins are to be enabled or disabled required: false", "WITHOUT ANY WARRANTY; without even the implied warranty of #", "of plugin names required: true default: null aliases: [name] new_only:", "if state == 'enabled': if not new_only: for plugin in", "under the terms of the GNU General Public License as", "] state: description: - Specify if plugins are to be", "options: names: description: - Comma-separated list of plugin names required:", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False): if not", "list() def get_all(self): return self._exec(['list', '-E', '-m'], True) def enable(self,", "or # (at your option) any later version. # #", "in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in enabled_plugins: if", "# Ansible is distributed in the hope that it will", "in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) > 0 or", "module: rabbitmq_plugin short_description: Adds or removes plugins to RabbitMQ description:", "be enabled or disabled required: false default: enabled choices: [enabled,", "# GNU General Public License for more details. # #", "# -*- coding: utf-8 -*- # (c) 2013, Chatham Financial", "Does not disable plugins that are not in the names", "Foundation, either version 3 of the License, or # (at", "plugin in enabled_plugins: if plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin)", "# # This file is part of Ansible # #", "in the names list required: false default: \"no\" choices: [", "type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module = AnsibleModule(", "+ args, check_rc=True) return out.splitlines() return list() def get_all(self): return", "[self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd + args, check_rc=True) return", "plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object): def __init__(self,", "new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module =", "True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode", "prefix to a Rabbit required: false version_added: \"1.3\" default: null", "General Public License for more details. # # You should", "Specify if plugins are to be enabled or disabled required:", "arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']),", "self.module = module if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] + \"/sbin/rabbitmq-plugins\"", "A PARTICULAR PURPOSE. See the # GNU General Public License", "the Free Software Foundation, either version 3 of the License,", "description: - Enables or disables RabbitMQ plugins version_added: \"1.1\" author:", "state: description: - Specify if plugins are to be enabled", "name]) def disable(self, name): self._exec(['disable', name]) def main(): arg_spec =", "disabled.append(plugin) for name in names: if name not in enabled_plugins:", "AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',') new_only = module.params['new_only']", "null aliases: [name] new_only: description: - Only enable missing plugins", "def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and", "rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in enabled_plugins: if plugin in", "def get_all(self): return self._exec(['list', '-E', '-m'], True) def enable(self, name):", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "''' # Enables the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled", "free software: you can redistribute it and/or modify # it", "Financial <<EMAIL>> # # This file is part of Ansible", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "list of plugin names required: true default: null aliases: [name]", "plugins version_added: \"1.1\" author: <NAME> options: names: description: - Comma-separated", "state == 'enabled': if not new_only: for plugin in enabled_plugins:", "of the GNU General Public License as published by #", "run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd =", "see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description: Adds", "- Does not disable plugins that are not in the", "main(): arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled',", "can redistribute it and/or modify # it under the terms", "new_only = module.params['new_only'] state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins", "return self._exec(['list', '-E', '-m'], True) def enable(self, name): self._exec(['enable', name])", "by # the Free Software Foundation, either version 3 of", "plugin in enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed", "Enables the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class", "= module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def", "# This file is part of Ansible # # Ansible", "err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list()", "- rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object): def __init__(self, module):", "of Ansible # # Ansible is free software: you can", "plugins to RabbitMQ description: - Enables or disables RabbitMQ plugins", "else: for plugin in enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin)", "_exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):", "rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object): def", "state=enabled ''' class RabbitMqPlugins(object): def __init__(self, module): self.module = module", "'-m'], True) def enable(self, name): self._exec(['enable', name]) def disable(self, name):", "License for more details. # # You should have received", "True) def enable(self, name): self._exec(['enable', name]) def disable(self, name): self._exec(['disable',", "''' --- module: rabbitmq_plugin short_description: Adds or removes plugins to", "custom install prefix to a Rabbit required: false version_added: \"1.3\"", "This file is part of Ansible # # Ansible is", "with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' ---", "the License, or # (at your option) any later version.", "names: if name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for", "= module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled =", "\"yes\", \"no\" ] state: description: - Specify if plugins are", "PARTICULAR PURPOSE. See the # GNU General Public License for", "names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) > 0 or len(disabled)", "''' class RabbitMqPlugins(object): def __init__(self, module): self.module = module if", "# Enables the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled '''", "modify # it under the terms of the GNU General", "terms of the GNU General Public License as published by", "details. # # You should have received a copy of", "not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description:", "If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_plugin", "Only enable missing plugins - Does not disable plugins that", "published by # the Free Software Foundation, either version 3", "\"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False):", "= ''' # Enables the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management", "not disable plugins that are not in the names list", "module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins',", "RabbitMQ description: - Enables or disables RabbitMQ plugins version_added: \"1.1\"", "short_description: Adds or removes plugins to RabbitMQ description: - Enables", "aliases: [name] new_only: description: - Only enable missing plugins -", "module if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins", "are not in the names list required: false default: \"no\"", "your option) any later version. # # Ansible is distributed", "RabbitMqPlugins(object): def __init__(self, module): self.module = module if module.params['prefix']: self._rabbitmq_plugins", "option) any later version. # # Ansible is distributed in", "received a copy of the GNU General Public License #", "License as published by # the Free Software Foundation, either", "is part of Ansible # # Ansible is free software:", "See the # GNU General Public License for more details.", "def disable(self, name): self._exec(['disable', name]) def main(): arg_spec = dict(", "= ''' --- module: rabbitmq_plugin short_description: Adds or removes plugins", "- Specify if plugins are to be enabled or disabled", "description: - Only enable missing plugins - Does not disable", "Specify a custom install prefix to a Rabbit required: false", "that it will be useful, # but WITHOUT ANY WARRANTY;", "new_only: description: - Only enable missing plugins - Does not", "plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) > 0", "either version 3 of the License, or # (at your", "state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module = AnsibleModule( argument_spec=arg_spec,", "disable plugins that are not in the names list required:", "argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',') new_only = module.params['new_only'] state", "self._exec(['enable', name]) def disable(self, name): self._exec(['disable', name]) def main(): arg_spec", "rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled = [] disabled", "more details. # # You should have received a copy", "name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in", "be useful, # but WITHOUT ANY WARRANTY; without even the", "enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled)", "not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc,", "enable missing plugins - Does not disable plugins that are", "- Only enable missing plugins - Does not disable plugins", "if name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin", "default: \"no\" choices: [ \"yes\", \"no\" ] state: description: -", "choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True", "DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description: Adds or removes", "[ \"yes\", \"no\" ] state: description: - Specify if plugins", "if plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in", "Comma-separated list of plugin names required: true default: null aliases:", "required: false version_added: \"1.3\" default: null ''' EXAMPLES = '''", "self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out,", "# but WITHOUT ANY WARRANTY; without even the implied warranty", "Free Software Foundation, either version 3 of the License, or", "= module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode", "and/or modify # it under the terms of the GNU", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "plugins that are not in the names list required: false", "true default: null aliases: [name] new_only: description: - Only enable", "check_rc=True) return out.splitlines() return list() def get_all(self): return self._exec(['list', '-E',", "> 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module snippets from", "= [] if state == 'enabled': if not new_only: for", "0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module snippets from ansible.module_utils.basic", "out.splitlines() return list() def get_all(self): return self._exec(['list', '-E', '-m'], True)", "Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module:", "2013, Chatham Financial <<EMAIL>> # # This file is part", "it and/or modify # it under the terms of the", "# # Ansible is distributed in the hope that it", "# (at your option) any later version. # # Ansible", "out, err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return", "if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) >", "a Rabbit required: false version_added: \"1.3\" default: null ''' EXAMPLES", "enabled_plugins: if plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name", "Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>.", "required: false default: \"no\" choices: [ \"yes\", \"no\" ] state:", "-*- # (c) 2013, Chatham Financial <<EMAIL>> # # This", "it will be useful, # but WITHOUT ANY WARRANTY; without", "of the GNU General Public License # along with Ansible.", "GNU General Public License # along with Ansible. If not,", "EXAMPLES = ''' # Enables the rabbitmq_management plugin - rabbitmq_plugin:", "- Specify a custom install prefix to a Rabbit required:", "if not new_only: for plugin in enabled_plugins: if plugin not", ") names = module.params['names'].split(',') new_only = module.params['new_only'] state = module.params['state']", "in enabled_plugins: if plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for", "args, check_rc=True) return out.splitlines() return list() def get_all(self): return self._exec(['list',", "# it under the terms of the GNU General Public", "of the License, or # (at your option) any later", "hope that it will be useful, # but WITHOUT ANY", "or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out, err", "it under the terms of the GNU General Public License", ") module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',')", "''' EXAMPLES = ''' # Enables the rabbitmq_management plugin -", "names required: true default: null aliases: [name] new_only: description: -", "the GNU General Public License as published by # the", "required: false default: enabled choices: [enabled, disabled] prefix: description: -", "false default: \"no\" choices: [ \"yes\", \"no\" ] state: description:", "Chatham Financial <<EMAIL>> # # This file is part of", "changed = len(enabled) > 0 or len(disabled) > 0 module.exit_json(changed=changed,", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "Enables or disables RabbitMQ plugins version_added: \"1.1\" author: <NAME> options:", "that are not in the names list required: false default:", "General Public License as published by # the Free Software", "\"1.3\" default: null ''' EXAMPLES = ''' # Enables the", "names = module.params['names'].split(',') new_only = module.params['new_only'] state = module.params['state'] rabbitmq_plugins", "General Public License # along with Ansible. If not, see", "will be useful, # but WITHOUT ANY WARRANTY; without even", "rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed = len(enabled) > 0 or len(disabled) >", "names: description: - Comma-separated list of plugin names required: true", "missing plugins - Does not disable plugins that are not", "'enabled': if not new_only: for plugin in enabled_plugins: if plugin", "__init__(self, module): self.module = module if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix']", "default: null ''' EXAMPLES = ''' # Enables the rabbitmq_management", "any later version. # # Ansible is distributed in the", "enabled=enabled, disabled=disabled) # import module snippets from ansible.module_utils.basic import *", "coding: utf-8 -*- # (c) 2013, Chatham Financial <<EMAIL>> #", "# along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION =", "to RabbitMQ description: - Enables or disables RabbitMQ plugins version_added:", "PURPOSE. See the # GNU General Public License for more", "part of Ansible # # Ansible is free software: you", "(self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out, err =", "FOR A PARTICULAR PURPOSE. See the # GNU General Public", "the # GNU General Public License for more details. #", "= module.params['names'].split(',') new_only = module.params['new_only'] state = module.params['state'] rabbitmq_plugins =", "\"no\" choices: [ \"yes\", \"no\" ] state: description: - Specify", "if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins =", "[name] new_only: description: - Only enable missing plugins - Does", "RabbitMQ plugins version_added: \"1.1\" author: <NAME> options: names: description: -", "for more details. # # You should have received a", "self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list() def get_all(self):", "redistribute it and/or modify # it under the terms of", "to be enabled or disabled required: false default: enabled choices:", "= rabbitmq_plugins.get_all() enabled = [] disabled = [] if state", "Ansible # # Ansible is free software: you can redistribute", "<http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_plugin short_description: Adds or", "plugin names required: true default: null aliases: [name] new_only: description:", "plugin not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names:", "aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) ) module", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled = []", "rc, out, err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines()", "= dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False,", "# Ansible is free software: you can redistribute it and/or", "for plugin in enabled_plugins: if plugin not in names: rabbitmq_plugins.disable(plugin)", "the hope that it will be useful, # but WITHOUT", "rabbitmq_plugin short_description: Adds or removes plugins to RabbitMQ description: -", "removes plugins to RabbitMQ description: - Enables or disables RabbitMQ", "software: you can redistribute it and/or modify # it under", "module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',') new_only", "= module if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else:", "name): self._exec(['enable', name]) def disable(self, name): self._exec(['disable', name]) def main():", "len(enabled) > 0 or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled)", "enabled_plugins = rabbitmq_plugins.get_all() enabled = [] disabled = [] if", "copy of the GNU General Public License # along with", "names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names: if name not", "in enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) changed =", "not new_only: for plugin in enabled_plugins: if plugin not in", "prefix: description: - Specify a custom install prefix to a", "distributed in the hope that it will be useful, #", "License, or # (at your option) any later version. #", "version_added: \"1.3\" default: null ''' EXAMPLES = ''' # Enables", "Rabbit required: false version_added: \"1.3\" default: null ''' EXAMPLES =", "-*- coding: utf-8 -*- # (c) 2013, Chatham Financial <<EMAIL>>", "author: <NAME> options: names: description: - Comma-separated list of plugin", "len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module snippets", "= [self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd + args, check_rc=True)", "You should have received a copy of the GNU General", "disabled] prefix: description: - Specify a custom install prefix to", "enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in enabled_plugins: if plugin", "useful, # but WITHOUT ANY WARRANTY; without even the implied", "or removes plugins to RabbitMQ description: - Enables or disables", "self._exec(['list', '-E', '-m'], True) def enable(self, name): self._exec(['enable', name]) def", "disabled = [] if state == 'enabled': if not new_only:", "you can redistribute it and/or modify # it under the", "- Comma-separated list of plugin names required: true default: null", "module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or", "module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module snippets from ansible.module_utils.basic import", "false default: enabled choices: [enabled, disabled] prefix: description: - Specify", "# You should have received a copy of the GNU", "get_all(self): return self._exec(['list', '-E', '-m'], True) def enable(self, name): self._exec(['enable',", "disable(self, name): self._exec(['disable', name]) def main(): arg_spec = dict( names=dict(required=True,", "= self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list() def", "in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names: if name", "class RabbitMqPlugins(object): def __init__(self, module): self.module = module if module.params['prefix']:", "should have received a copy of the GNU General Public", "name]) def main(): arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'),", "false version_added: \"1.3\" default: null ''' EXAMPLES = ''' #", "enabled or disabled required: false default: enabled choices: [enabled, disabled]", "plugins - Does not disable plugins that are not in", "> 0 or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) #", "= RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled = [] disabled =", "default: enabled choices: [enabled, disabled] prefix: description: - Specify a", "return list() def get_all(self): return self._exec(['list', '-E', '-m'], True) def", "null ''' EXAMPLES = ''' # Enables the rabbitmq_management plugin", "GNU General Public License as published by # the Free", "<NAME> options: names: description: - Comma-separated list of plugin names", "install prefix to a Rabbit required: false version_added: \"1.3\" default:", "names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None) )", "enabled = [] disabled = [] if state == 'enabled':", "# the Free Software Foundation, either version 3 of the", "not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else: for plugin in enabled_plugins:", "self._exec(['disable', name]) def main(): arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no',", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "or disables RabbitMQ plugins version_added: \"1.1\" author: <NAME> options: names:", "description: - Specify if plugins are to be enabled or", "disabled=disabled) # import module snippets from ansible.module_utils.basic import * main()", "\"no\" ] state: description: - Specify if plugins are to", "or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import module", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "version_added: \"1.1\" author: <NAME> options: names: description: - Comma-separated list", "default: null aliases: [name] new_only: description: - Only enable missing", "choices: [ \"yes\", \"no\" ] state: description: - Specify if", "Public License for more details. # # You should have", "plugins are to be enabled or disabled required: false default:", "description: - Comma-separated list of plugin names required: true default:", "the terms of the GNU General Public License as published", "utf-8 -*- # (c) 2013, Chatham Financial <<EMAIL>> # #", "required: true default: null aliases: [name] new_only: description: - Only", "[] if state == 'enabled': if not new_only: for plugin", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "Software Foundation, either version 3 of the License, or #", "disables RabbitMQ plugins version_added: \"1.1\" author: <NAME> options: names: description:", "module): self.module = module if module.params['prefix']: self._rabbitmq_plugins = module.params['prefix'] +", "RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled = [] disabled = []", "dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled', choices=['enabled', 'disabled']), prefix=dict(required=False, default=None)", "to a Rabbit required: false version_added: \"1.3\" default: null '''", "0 or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled, disabled=disabled) # import", "+ \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self, args,", "\"1.1\" author: <NAME> options: names: description: - Comma-separated list of", "default=None) ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names =", "supports_check_mode=True ) names = module.params['names'].split(',') new_only = module.params['new_only'] state =", "choices: [enabled, disabled] prefix: description: - Specify a custom install", "name): self._exec(['disable', name]) def main(): arg_spec = dict( names=dict(required=True, aliases=['name']),", "as published by # the Free Software Foundation, either version", "version 3 of the License, or # (at your option)", "file is part of Ansible # # Ansible is free", "enabled choices: [enabled, disabled] prefix: description: - Specify a custom", "Ansible is distributed in the hope that it will be", "# (c) 2013, Chatham Financial <<EMAIL>> # # This file", "Ansible is free software: you can redistribute it and/or modify", "(c) 2013, Chatham Financial <<EMAIL>> # # This file is", "enabled.append(name) else: for plugin in enabled_plugins: if plugin in names:", "in names: if name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name) else:", "module.params['names'].split(',') new_only = module.params['new_only'] state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module)", "'disabled']), prefix=dict(required=False, default=None) ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True )", "a copy of the GNU General Public License # along", "Adds or removes plugins to RabbitMQ description: - Enables or", "run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd +", "Public License as published by # the Free Software Foundation,", "(at your option) any later version. # # Ansible is", "--- module: rabbitmq_plugin short_description: Adds or removes plugins to RabbitMQ", "enable(self, name): self._exec(['enable', name]) def disable(self, name): self._exec(['disable', name]) def", "names list required: false default: \"no\" choices: [ \"yes\", \"no\"", "later version. # # Ansible is distributed in the hope", "have received a copy of the GNU General Public License", "- Enables or disables RabbitMQ plugins version_added: \"1.1\" author: <NAME>", "or disabled required: false default: enabled choices: [enabled, disabled] prefix:", "[] disabled = [] if state == 'enabled': if not", "rabbitmq_plugins.get_all() enabled = [] disabled = [] if state ==", "prefix=dict(required=False, default=None) ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names", "in the hope that it will be useful, # but", "'-E', '-m'], True) def enable(self, name): self._exec(['enable', name]) def disable(self,", "names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object): def __init__(self, module): self.module =", "def enable(self, name): self._exec(['enable', name]) def disable(self, name): self._exec(['disable', name])", "module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) def _exec(self,", "along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = '''", "the names list required: false default: \"no\" choices: [ \"yes\",", "def __init__(self, module): self.module = module if module.params['prefix']: self._rabbitmq_plugins =", "rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object): def __init__(self, module): self.module", "list required: false default: \"no\" choices: [ \"yes\", \"no\" ]", "for name in names: if name not in enabled_plugins: rabbitmq_plugins.enable(name)", "for plugin in enabled_plugins: if plugin in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin)", "the rabbitmq_management plugin - rabbitmq_plugin: names=rabbitmq_management state=enabled ''' class RabbitMqPlugins(object):", "self._rabbitmq_plugins = module.params['prefix'] + \"/sbin/rabbitmq-plugins\" else: self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)", "new_only: for plugin in enabled_plugins: if plugin not in names:", "<<EMAIL>> # # This file is part of Ansible #", "= AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) names = module.params['names'].split(',') new_only =", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "module.params['new_only'] state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all()", "= module.params['new_only'] state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins =", "state = module.params['state'] rabbitmq_plugins = RabbitMqPlugins(module) enabled_plugins = rabbitmq_plugins.get_all() enabled", "#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chatham", "version. # # Ansible is distributed in the hope that", "disabled.append(plugin) changed = len(enabled) > 0 or len(disabled) > 0", "GNU General Public License for more details. # # You", "License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION", "disabled required: false default: enabled choices: [enabled, disabled] prefix: description:", "name in names: if name not in enabled_plugins: rabbitmq_plugins.enable(name) enabled.append(name)", "is distributed in the hope that it will be useful,", "3 of the License, or # (at your option) any", "= len(enabled) > 0 or len(disabled) > 0 module.exit_json(changed=changed, enabled=enabled,", "not in names: rabbitmq_plugins.disable(plugin) disabled.append(plugin) for name in names: if", "cmd = [self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd + args,", "# # You should have received a copy of the", "# # Ansible is free software: you can redistribute it", "if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmq_plugins]", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "def main(): arg_spec = dict( names=dict(required=True, aliases=['name']), new_only=dict(default='no', type='bool'), state=dict(default='enabled',", "without even the implied warranty of # MERCHANTABILITY or FITNESS", "== 'enabled': if not new_only: for plugin in enabled_plugins: if", "are to be enabled or disabled required: false default: enabled", "and run_in_check_mode): cmd = [self._rabbitmq_plugins] rc, out, err = self.module.run_command(cmd" ]
[ "search_entity[StaticFields.ID]: return entity @staticmethod def _equal_entities(old_entity, new_entity): # TODO(iafek): compare", "self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return self.entities_cache", "old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities", "new_entities: old_entity = self._find_entity(new_entity, self.entities_cache) if old_entity: # Add modified", "entities for entity in entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\", "= {} for entity in entities: cls._pack_entity(entities_dict, entity) for rel", "from itertools import chain from six.moves import reduce from oslo_log", "entities, relationships): entities_dict = {} for entity in entities: cls._pack_entity(entities_dict,", "file_utils LOG = log.getLogger(__name__) class StaticDriver(DriverBase): # base fields are", "entities, others are treated as metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE,", "'host.nova', 'id': 1} result={'relationship_type': 'attached', 'source': 's1', 'target': {'static_id': 'h1',", "as metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self, conf):", "'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "static entities for entity in entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE]", "conf): super(StaticDriver, self).__init__() self.cfg = conf self.entities_cache = [] @staticmethod", "specific language governing permissions and limitations # under the License.", "# not use this file except in compliance with the", "= file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path) for path in", "from vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants import GraphAction", "_get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path) for", "entity[StaticFields.STATIC_ID] if static_id not in entities_dict: metadata = {key: value", "class StaticDriver(DriverBase): # base fields are required for all entities,", "in compliance with the License. You may obtain # a", "# Copyright 2016 - Nokia, ZTE # # Licensed under", "import file as file_utils LOG = log.getLogger(__name__) class StaticDriver(DriverBase): #", "cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod def _pack_entity(cls, entities_dict, entity): static_id", "not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: # Add new entities changed_entities.append(new_entity.copy())", "new_entities return changed_entities @classmethod def _get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path)", "You may obtain # a copy of the License at", "# Add new entities changed_entities.append(new_entity.copy()) # Add deleted entities for", "None return rel @staticmethod def _find_entity(search_entity, entities): # naive implementation", "entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]: return entity @staticmethod def _equal_entities(old_entity, new_entity):", "file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning(\"Skipped invalid config (possible obsoleted): {}\"", "reuse template validation return StaticFields.DEFINITIONS in config @staticmethod def get_event_types():", "\\ old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME) == \\", "rel @staticmethod def _find_entity(search_entity, entities): # naive implementation since we", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# Add deleted entities for old_entity in self.entities_cache: if not", "import StaticFields from vitrage.utils import file as file_utils LOG =", "= entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] = metadata else: LOG.warning(\"Skipped", "relationships): entities_dict = {} for entity in entities: cls._pack_entity(entities_dict, entity)", "files = file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path) for path", "'.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path) for path in files], []))", "and entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]: return entity @staticmethod def _equal_entities(old_entity,", "under the License is distributed on an \"AS IS\" BASIS,", "DSProps from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import DriverBase from", "# Add modified entities if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else:", "rel = rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor", "return rel @staticmethod def _find_entity(search_entity, entities): # naive implementation since", "# naive implementation since we don't expect many static entities", "this file except in compliance with the License. You may", "language governing permissions and limitations # under the License. from", "@staticmethod def _expand_neighbor(rel, neighbor): \"\"\"Expand config id to neighbor entity", "metadata else: LOG.warning(\"Skipped duplicated entity: {}\".format(entity)) @classmethod def _pack_rel(cls, entities_dict,", "def enrich_event(self, event, event_type): pass def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(),", "chain from six.moves import reduce from oslo_log import log from", "def _equal_entities(old_entity, new_entity): # TODO(iafek): compare also the relationships return", "rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]:", "config @staticmethod def get_event_types(): return [] def enrich_event(self, event, event_type):", "yaml schema or reuse template validation return StaticFields.DEFINITIONS in config", "fields are required for all entities, others are treated as", "= file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning(\"Skipped invalid config (possible obsoleted):", "'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type': 'attached', 'source':", "entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target))", "for relationship {}\" .format(neighbor, rel)) return None return rel @staticmethod", "new entities changed_entities.append(new_entity.copy()) # Add deleted entities for old_entity in", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "invalid config (possible obsoleted): {}\" .format(path)) return [] definitions =", "entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel, neighbor): \"\"\"Expand", "enrich_event(self, event, event_type): pass def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE,", "old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID)", "DatasourceProperties as DSProps from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import", "'target': {'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}} \"\"\" rel =", "file except in compliance with the License. You may obtain", "entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]: return", "= rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if source_id == target_id: #", "'s1', 'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id':", "we don't expect many static entities for entity in entities:", "self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: # Add new entities changed_entities.append(new_entity.copy()) #", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "value in entity.items() if key not in cls.BASE_FIELDS} entities_dict[static_id] =", "'id': 1} result={'relationship_type': 'attached', 'source': 's1', 'target': {'static_id': 'h1', 'vitrage_type':", "in entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID] ==", "'id': 1}} \"\"\" rel = rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]:", "under the Apache License, Version 2.0 (the \"License\"); you may", "entities changed_entities.append(new_entity.copy()) # Add deleted entities for old_entity in self.entities_cache:", "def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action):", "= rel[StaticFields.TARGET] if source_id == target_id: # self pointing relationship", "= log.getLogger(__name__) class StaticDriver(DriverBase): # base fields are required for", "if key not in cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] =", "compare also the relationships return old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE) and", "@classmethod def _pack(cls, entities, relationships): entities_dict = {} for entity", "'vitrage_type': 'host.nova', 'id': 1}} \"\"\" rel = rel.copy() if rel[StaticFields.SOURCE]", "\\ and entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]: return entity @staticmethod def", "'attached', 'source': 's1', 'target': {'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}}", "= neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else:", "new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache =", "else: # TODO(yujunz) raise exception and ignore invalid relationship LOG.error(\"Invalid", "return cls._pack(entities, relationships) @classmethod def _pack(cls, entities, relationships): entities_dict =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "else: LOG.warning(\"Skipped duplicated entity: {}\".format(entity)) @classmethod def _pack_rel(cls, entities_dict, rel):", "[self._get_entities_from_file(path) for path in files], [])) def _get_and_cache_changed_entities(self): changed_entities =", "obsoleted): {}\" .format(path)) return [] definitions = config[StaticFields.DEFINITIONS] entities =", "== neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else: # TODO(yujunz) raise exception", "cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] = metadata", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "to in writing, software # distributed under the License is", "{} for entity in entities: cls._pack_entity(entities_dict, entity) for rel in", "datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities()", "Add deleted entities for old_entity in self.entities_cache: if not self._find_entity(old_entity,", "@staticmethod def _equal_entities(old_entity, new_entity): # TODO(iafek): compare also the relationships", "== \\ new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME) and", "since we don't expect many static entities for entity in", "cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel, neighbor): \"\"\"Expand config id to", "from oslo_log import log from vitrage.common.constants import DatasourceProperties as DSProps", "{}\".format(entity)) @classmethod def _pack_rel(cls, entities_dict, rel): source_id = rel[StaticFields.SOURCE] target_id", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache =", "StaticDriver(DriverBase): # base fields are required for all entities, others", "changed_entities @classmethod def _get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path) if not", "rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor", "datasource_action) def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self):", "entities_dict, rel): source_id = rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if source_id", "Apache License, Version 2.0 (the \"License\"); you may # not", "neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else: # TODO(yujunz) raise exception and", "vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from vitrage.utils import", "relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel,", "from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static", "@classmethod def _pack_entity(cls, entities_dict, entity): static_id = entity[StaticFields.STATIC_ID] if static_id", "agreed to in writing, software # distributed under the License", "validation return StaticFields.DEFINITIONS in config @staticmethod def get_event_types(): return []", "= definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod def _pack(cls, entities, relationships):", "log.getLogger(__name__) class StaticDriver(DriverBase): # base fields are required for all", "distributed under the License is distributed on an \"AS IS\"", "event_type): pass def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def", "entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] = metadata else: LOG.warning(\"Skipped duplicated", "{key: value for key, value in entity.items() if key not", "new_entity): changed_entities.append(new_entity.copy()) else: # Add new entities changed_entities.append(new_entity.copy()) # Add", "base fields are required for all entities, others are treated", "old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities return changed_entities", "if old_entity: # Add modified entities if not self._equal_entities(old_entity, new_entity):", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "for entity in entities: cls._pack_entity(entities_dict, entity) for rel in relationships:", "[])) def _get_and_cache_changed_entities(self): changed_entities = [] new_entities = self._get_all_entities() for", "= [] @staticmethod def _is_valid_config(config): \"\"\"check for validity of configuration\"\"\"", "not use this file except in compliance with the License.", "don't expect many static entities for entity in entities: if", "= old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities return", "for key, value in entity.items() if key not in cls.BASE_FIELDS}", "writing, software # distributed under the License is distributed on", "entity in entities: cls._pack_entity(entities_dict, entity) for rel in relationships: cls._pack_rel(entities_dict,", "datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(),", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities return changed_entities @classmethod def _get_entities_from_file(cls,", "config = file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning(\"Skipped invalid config (possible", "= rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif", "the License. You may obtain # a copy of the", "vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "use this file except in compliance with the License. You", "= config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities,", "check with yaml schema or reuse template validation return StaticFields.DEFINITIONS", "return entities_dict.values() @classmethod def _pack_entity(cls, entities_dict, entity): static_id = entity[StaticFields.STATIC_ID]", "in new_entities: old_entity = self._find_entity(new_entity, self.entities_cache) if old_entity: # Add", "changed_entities.append(new_entity.copy()) # Add deleted entities for old_entity in self.entities_cache: if", "in entities_dict: metadata = {key: value for key, value in", "self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target = entities_dict[source_id], entities_dict[target_id]", "def _pack_entity(cls, entities_dict, entity): static_id = entity[StaticFields.STATIC_ID] if static_id not", "duplicated entity: {}\".format(entity)) @classmethod def _pack_rel(cls, entities_dict, rel): source_id =", "vitrage.utils import file as file_utils LOG = log.getLogger(__name__) class StaticDriver(DriverBase):", "as file_utils LOG = log.getLogger(__name__) class StaticDriver(DriverBase): # base fields", "entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID] == \\", "= entity[StaticFields.STATIC_ID] if static_id not in entities_dict: metadata = {key:", "{}\" .format(neighbor, rel)) return None return rel @staticmethod def _find_entity(search_entity,", "STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def", "files], [])) def _get_and_cache_changed_entities(self): changed_entities = [] new_entities = self._get_all_entities()", "old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME) and \\ old_entity.get(StaticFields.STATE) == \\ new_entity.get(StaticFields.STATE)", "Nokia, ZTE # # Licensed under the Apache License, Version", "@staticmethod def _is_valid_config(config): \"\"\"check for validity of configuration\"\"\" # TODO(yujunz)", "the relationships return old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID)", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "KIND, either express or implied. See the # License for", "# TODO(yujunz) raise exception and ignore invalid relationship LOG.error(\"Invalid neighbor", "'host.nova', 'id': 1}} \"\"\" rel = rel.copy() if rel[StaticFields.SOURCE] ==", "in config @staticmethod def get_event_types(): return [] def enrich_event(self, event,", "expect many static entities for entity in entities: if entity[StaticFields.TYPE]", "neighbor else: # TODO(yujunz) raise exception and ignore invalid relationship", "\"License\"); you may # not use this file except in", "LOG.warning(\"Skipped duplicated entity: {}\".format(entity)) @classmethod def _pack_rel(cls, entities_dict, rel): source_id", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "'h1', 'vitrage_type': 'host.nova', 'id': 1}} \"\"\" rel = rel.copy() if", "entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] = metadata else:", "express or implied. See the # License for the specific", "metadata = {key: value for key, value in entity.items() if", "the Apache License, Version 2.0 (the \"License\"); you may #", "[] entity[StaticFields.METADATA] = metadata else: LOG.warning(\"Skipped duplicated entity: {}\".format(entity)) @classmethod", "oslo_log import log from vitrage.common.constants import DatasourceProperties as DSProps from", "def _get_and_cache_changed_entities(self): changed_entities = [] new_entities = self._get_all_entities() for new_entity", "\"\"\" rel = rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] =", "import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields", "_expand_neighbor(rel, neighbor): \"\"\"Expand config id to neighbor entity rel={'source': 's1',", "StaticFields.ID} def __init__(self, conf): super(StaticDriver, self).__init__() self.cfg = conf self.entities_cache", "value for key, value in entity.items() if key not in", "neighbor): \"\"\"Expand config id to neighbor entity rel={'source': 's1', 'target':", "for rel in relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod def", "\\ new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME) and \\", "See the # License for the specific language governing permissions", "are required for all entities, others are treated as metadata", "relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod def _pack_entity(cls, entities_dict, entity):", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "self._get_all_entities() return self.entities_cache def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml', True)", "self.entities_cache def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain,", "entity) for rel in relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod", "in entities: cls._pack_entity(entities_dict, entity) for rel in relationships: cls._pack_rel(entities_dict, rel)", "source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel, neighbor): \"\"\"Expand config id", "\\ new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID) and \\", "return [] definitions = config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships =", "import log from vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants", "@staticmethod def _find_entity(search_entity, entities): # naive implementation since we don't", "law or agreed to in writing, software # distributed under", "ignore invalid relationship LOG.error(\"Invalid neighbor {} for relationship {}\" .format(neighbor,", "neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else: #", "old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities return changed_entities @classmethod", "of configuration\"\"\" # TODO(yujunz) check with yaml schema or reuse", "rel) return entities_dict.values() @classmethod def _pack_entity(cls, entities_dict, entity): static_id =", "and limitations # under the License. from itertools import chain", "\\ search_entity[StaticFields.ID]: return entity @staticmethod def _equal_entities(old_entity, new_entity): # TODO(iafek):", "config id to neighbor entity rel={'source': 's1', 'target': 'r1', 'relationship_type':", "\\ old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME) and \\ old_entity.get(StaticFields.STATE) == \\", "for entity in entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\ and", "or reuse template validation return StaticFields.DEFINITIONS in config @staticmethod def", "implied. See the # License for the specific language governing", "@staticmethod def get_event_types(): return [] def enrich_event(self, event, event_type): pass", "itertools import chain from six.moves import reduce from oslo_log import", "relationships) @classmethod def _pack(cls, entities, relationships): entities_dict = {} for", "event, event_type): pass def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action)", "self._get_all_entities() for new_entity in new_entities: old_entity = self._find_entity(new_entity, self.entities_cache) if", "def _expand_neighbor(rel, neighbor): \"\"\"Expand config id to neighbor entity rel={'source':", "[] @staticmethod def _is_valid_config(config): \"\"\"check for validity of configuration\"\"\" #", "entity: {}\".format(entity)) @classmethod def _pack_rel(cls, entities_dict, rel): source_id = rel[StaticFields.SOURCE]", "vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants import GraphAction from", "changed_entities = [] new_entities = self._get_all_entities() for new_entity in new_entities:", ".format(path)) return [] definitions = config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships", "if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET] ==", "super(StaticDriver, self).__init__() self.cfg = conf self.entities_cache = [] @staticmethod def", "for all entities, others are treated as metadata BASE_FIELDS =", "# base fields are required for all entities, others are", "rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if source_id == target_id: # self", "Copyright 2016 - Nokia, ZTE # # Licensed under the", "import DatasourceProperties as DSProps from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base", "in files], [])) def _get_and_cache_changed_entities(self): changed_entities = [] new_entities =", "config (possible obsoleted): {}\" .format(path)) return [] definitions = config[StaticFields.DEFINITIONS]", "== neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET]", "raise exception and ignore invalid relationship LOG.error(\"Invalid neighbor {} for", "self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action)", "validity of configuration\"\"\" # TODO(yujunz) check with yaml schema or", "return old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID) == \\", "def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache", "static_id = entity[StaticFields.STATIC_ID] if static_id not in entities_dict: metadata =", "# self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target = entities_dict[source_id],", "= {key: value for key, value in entity.items() if key", "entities_dict = {} for entity in entities: cls._pack_entity(entities_dict, entity) for", "= [] entity[StaticFields.METADATA] = metadata else: LOG.warning(\"Skipped duplicated entity: {}\".format(entity))", "entities_dict: metadata = {key: value for key, value in entity.items()", "def _is_valid_config(config): \"\"\"check for validity of configuration\"\"\" # TODO(yujunz) check", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "[] new_entities = self._get_all_entities() for new_entity in new_entities: old_entity =", "relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod def _pack(cls, entities,", "elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else: # TODO(yujunz)", "# # Licensed under the Apache License, Version 2.0 (the", "return [] def enrich_event(self, event, event_type): pass def get_all(self, datasource_action):", "def get_event_types(): return [] def enrich_event(self, event, event_type): pass def", "self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache", "= {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self, conf): super(StaticDriver, self).__init__() self.cfg", "entities: cls._pack_entity(entities_dict, entity) for rel in relationships: cls._pack_rel(entities_dict, rel) return", "path in files], [])) def _get_and_cache_changed_entities(self): changed_entities = [] new_entities", "relationship LOG.error(\"Invalid neighbor {} for relationship {}\" .format(neighbor, rel)) return", "{} for relationship {}\" .format(neighbor, rel)) return None return rel", "to neighbor entity rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id':", "self.entities_cache: if not self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] =", "reduce from oslo_log import log from vitrage.common.constants import DatasourceProperties as", "obtain # a copy of the License at # #", "def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return self.entities_cache def _get_all_entities(self): files", "also the relationships return old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE) and \\", "else: source, target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod", "required for all entities, others are treated as metadata BASE_FIELDS", "and ignore invalid relationship LOG.error(\"Invalid neighbor {} for relationship {}\"", "entities for old_entity in self.entities_cache: if not self._find_entity(old_entity, new_entities): old_entity_copy", "Version 2.0 (the \"License\"); you may # not use this", "modified entities if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: # Add", "else: # Add new entities changed_entities.append(new_entity.copy()) # Add deleted entities", "'s1', 'target': {'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}} \"\"\" rel", "neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type': 'attached', 'source': 's1',", "and \\ old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME) and \\ old_entity.get(StaticFields.STATE) ==", "License for the specific language governing permissions and limitations #", "not cls._is_valid_config(config): LOG.warning(\"Skipped invalid config (possible obsoleted): {}\" .format(path)) return", "vitrage.datasources.static import StaticFields from vitrage.utils import file as file_utils LOG", "rel[StaticFields.TARGET] if source_id == target_id: # self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel)", "under the License. from itertools import chain from six.moves import", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "many static entities for entity in entities: if entity[StaticFields.TYPE] ==", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= self._get_all_entities() for new_entity in new_entities: old_entity = self._find_entity(new_entity, self.entities_cache)", "file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path) for path in files],", "if not cls._is_valid_config(config): LOG.warning(\"Skipped invalid config (possible obsoleted): {}\" .format(path))", "== target_id: # self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target", "id to neighbor entity rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'}", "schema or reuse template validation return StaticFields.DEFINITIONS in config @staticmethod", "= self._get_all_entities() return self.entities_cache def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml',", "'h1', 'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type': 'attached', 'source': 's1', 'target':", "for new_entity in new_entities: old_entity = self._find_entity(new_entity, self.entities_cache) if old_entity:", "deleted entities for old_entity in self.entities_cache: if not self._find_entity(old_entity, new_entities):", "new_entities = self._get_all_entities() for new_entity in new_entities: old_entity = self._find_entity(new_entity,", "invalid relationship LOG.error(\"Invalid neighbor {} for relationship {}\" .format(neighbor, rel))", "# under the License. from itertools import chain from six.moves", "'source': 's1', 'target': {'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}} \"\"\"", "from vitrage.utils import file as file_utils LOG = log.getLogger(__name__) class", "cls._is_valid_config(config): LOG.warning(\"Skipped invalid config (possible obsoleted): {}\" .format(path)) return []", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "= entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel, neighbor):", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "for validity of configuration\"\"\" # TODO(yujunz) check with yaml schema", "TODO(yujunz) raise exception and ignore invalid relationship LOG.error(\"Invalid neighbor {}", "target_id: # self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target =", "path): config = file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning(\"Skipped invalid config", "entities_dict.values() @classmethod def _pack_entity(cls, entities_dict, entity): static_id = entity[StaticFields.STATIC_ID] if", "definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod def _pack(cls,", "and \\ old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME) ==", "new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME) and \\ old_entity.get(StaticFields.STATE)", "in entity.items() if key not in cls.BASE_FIELDS} entities_dict[static_id] = entity", "the License. from itertools import chain from six.moves import reduce", "def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml', True) return list(reduce(chain, [self._get_entities_from_file(path)", "compliance with the License. You may obtain # a copy", "not in cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA]", "'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type':", "new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME)", "neighbor {} for relationship {}\" .format(neighbor, rel)) return None return", "cls._pack(entities, relationships) @classmethod def _pack(cls, entities, relationships): entities_dict = {}", "STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from vitrage.utils import file as", "self.entities_cache = [] @staticmethod def _is_valid_config(config): \"\"\"check for validity of", "== \\ new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID) and", "get_event_types(): return [] def enrich_event(self, event, event_type): pass def get_all(self,", "the # License for the specific language governing permissions and", "self.entities_cache) if old_entity: # Add modified entities if not self._equal_entities(old_entity,", "# # Unless required by applicable law or agreed to", "= [] new_entities = self._get_all_entities() for new_entity in new_entities: old_entity", "[] def enrich_event(self, event, event_type): pass def get_all(self, datasource_action): return", "return changed_entities @classmethod def _get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path) if", "rel)) return None return rel @staticmethod def _find_entity(search_entity, entities): #", "result={'relationship_type': 'attached', 'source': 's1', 'target': {'static_id': 'h1', 'vitrage_type': 'host.nova', 'id':", "BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self, conf): super(StaticDriver, self).__init__()", "== search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]: return entity", "return self.entities_cache def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory, '.yaml', True) return", "source_id == target_id: # self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source,", "2.0 (the \"License\"); you may # not use this file", "STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return self.entities_cache def", "@classmethod def _get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path) if not cls._is_valid_config(config):", "from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static", "TODO(yujunz) check with yaml schema or reuse template validation return", "by applicable law or agreed to in writing, software #", "if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: # Add new entities", "template validation return StaticFields.DEFINITIONS in config @staticmethod def get_event_types(): return", "self.entities_cache = new_entities return changed_entities @classmethod def _get_entities_from_file(cls, path): config", "\"\"\"Expand config id to neighbor entity rel={'source': 's1', 'target': 'r1',", "new_entity): # TODO(iafek): compare also the relationships return old_entity.get(StaticFields.TYPE) ==", "_pack_rel(cls, entities_dict, rel): source_id = rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if", "rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova',", "rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] = neighbor else: # TODO(yujunz) raise", "in cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] =", "2016 - Nokia, ZTE # # Licensed under the Apache", "old_entity.get(StaticFields.ID) == \\ new_entity.get(StaticFields.ID) and \\ old_entity.get(StaticFields.NAME) == \\ new_entity.get(StaticFields.NAME)", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "import GraphAction from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE", "entities if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: # Add new", "with yaml schema or reuse template validation return StaticFields.DEFINITIONS in", "pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else: source, target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append(", "1} result={'relationship_type': 'attached', 'source': 's1', 'target': {'static_id': 'h1', 'vitrage_type': 'host.nova',", "StaticFields from vitrage.utils import file as file_utils LOG = log.getLogger(__name__)", "return entity @staticmethod def _equal_entities(old_entity, new_entity): # TODO(iafek): compare also", "= metadata else: LOG.warning(\"Skipped duplicated entity: {}\".format(entity)) @classmethod def _pack_rel(cls,", "Add new entities changed_entities.append(new_entity.copy()) # Add deleted entities for old_entity", "def __init__(self, conf): super(StaticDriver, self).__init__() self.cfg = conf self.entities_cache =", "_find_entity(search_entity, entities): # naive implementation since we don't expect many", "(possible obsoleted): {}\" .format(path)) return [] definitions = config[StaticFields.DEFINITIONS] entities", "import chain from six.moves import reduce from oslo_log import log", "License. from itertools import chain from six.moves import reduce from", "not in entities_dict: metadata = {key: value for key, value", "Add modified entities if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy()) else: #", "not self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy)", "_get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning(\"Skipped invalid", "'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type': 'attached',", "may obtain # a copy of the License at #", "rel[StaticFields.TARGET] = neighbor else: # TODO(yujunz) raise exception and ignore", "in self.entities_cache: if not self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE]", "vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import", "= neighbor else: # TODO(yujunz) raise exception and ignore invalid", "Unless required by applicable law or agreed to in writing,", "file as file_utils LOG = log.getLogger(__name__) class StaticDriver(DriverBase): # base", "new_entity in new_entities: old_entity = self._find_entity(new_entity, self.entities_cache) if old_entity: #", "if static_id not in entities_dict: metadata = {key: value for", "as DSProps from vitrage.common.constants import GraphAction from vitrage.datasources.driver_base import DriverBase", "relationship {}\" .format(neighbor, rel)) return None return rel @staticmethod def", "limitations # under the License. from itertools import chain from", "neighbor entity rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1',", "config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships)", "applicable law or agreed to in writing, software # distributed", "self).__init__() self.cfg = conf self.entities_cache = [] @staticmethod def _is_valid_config(config):", "{'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}} \"\"\" rel = rel.copy()", "== \\ search_entity[StaticFields.ID]: return entity @staticmethod def _equal_entities(old_entity, new_entity): #", "for old_entity in self.entities_cache: if not self._find_entity(old_entity, new_entities): old_entity_copy =", "OF ANY KIND, either express or implied. See the #", "entities_dict, entity): static_id = entity[StaticFields.STATIC_ID] if static_id not in entities_dict:", "neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.TARGET] =", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "LOG.error(\"Invalid neighbor {} for relationship {}\" .format(neighbor, rel)) return None", "exception and ignore invalid relationship LOG.error(\"Invalid neighbor {} for relationship", "self._find_entity(new_entity, self.entities_cache) if old_entity: # Add modified entities if not", "changed_entities.append(old_entity_copy) self.entities_cache = new_entities return changed_entities @classmethod def _get_entities_from_file(cls, path):", "= definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod def", "old_entity: # Add modified entities if not self._equal_entities(old_entity, new_entity): changed_entities.append(new_entity.copy())", "1}} \"\"\" rel = rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE]", "import reduce from oslo_log import log from vitrage.common.constants import DatasourceProperties", "pass def get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self,", "{StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self, conf): super(StaticDriver, self).__init__() self.cfg =", "entity[StaticFields.METADATA] = metadata else: LOG.warning(\"Skipped duplicated entity: {}\".format(entity)) @classmethod def", "if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]:", "search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID] == \\ search_entity[StaticFields.ID]: return entity @staticmethod", ".format(neighbor, rel)) return None return rel @staticmethod def _find_entity(search_entity, entities):", "key, value in entity.items() if key not in cls.BASE_FIELDS} entities_dict[static_id]", "_pack_entity(cls, entities_dict, entity): static_id = entity[StaticFields.STATIC_ID] if static_id not in", "= conf self.entities_cache = [] @staticmethod def _is_valid_config(config): \"\"\"check for", "relationships return old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE) and \\ old_entity.get(StaticFields.ID) ==", "GraphAction from vitrage.datasources.driver_base import DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from", "\"\"\"check for validity of configuration\"\"\" # TODO(yujunz) check with yaml", "definitions = config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return", "datasource_action) def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return self.entities_cache def _get_all_entities(self):", "ZTE # # Licensed under the Apache License, Version 2.0", "- Nokia, ZTE # # Licensed under the Apache License,", "entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel, neighbor): \"\"\"Expand config", "six.moves import reduce from oslo_log import log from vitrage.common.constants import", "= new_entities return changed_entities @classmethod def _get_entities_from_file(cls, path): config =", "either express or implied. See the # License for the", "entity[StaticFields.RELATIONSHIPS] = [] entity[StaticFields.METADATA] = metadata else: LOG.warning(\"Skipped duplicated entity:", "old_entity in self.entities_cache: if not self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy()", "@classmethod def _pack_rel(cls, entities_dict, rel): source_id = rel[StaticFields.SOURCE] target_id =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "may # not use this file except in compliance with", "key not in cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS] = []", "old_entity = self._find_entity(new_entity, self.entities_cache) if old_entity: # Add modified entities", "# License for the specific language governing permissions and limitations", "with the License. You may obtain # a copy of", "in relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod def _pack_entity(cls, entities_dict,", "StaticFields.DEFINITIONS in config @staticmethod def get_event_types(): return [] def enrich_event(self,", "= GraphAction.DELETE_ENTITY changed_entities.append(old_entity_copy) self.entities_cache = new_entities return changed_entities @classmethod def", "[] definitions = config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS]", "target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def _expand_neighbor(rel,", "entities): # naive implementation since we don't expect many static", "metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self, conf): super(StaticDriver,", "are treated as metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def", "you may # not use this file except in compliance", "return None return rel @staticmethod def _find_entity(search_entity, entities): # naive", "conf self.entities_cache = [] @staticmethod def _is_valid_config(config): \"\"\"check for validity", "entity.items() if key not in cls.BASE_FIELDS} entities_dict[static_id] = entity entity[StaticFields.RELATIONSHIPS]", "for path in files], [])) def _get_and_cache_changed_entities(self): changed_entities = []", "treated as metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID} def __init__(self,", "StaticFields.TYPE, StaticFields.ID} def __init__(self, conf): super(StaticDriver, self).__init__() self.cfg = conf", "# TODO(iafek): compare also the relationships return old_entity.get(StaticFields.TYPE) == \\", "list(reduce(chain, [self._get_entities_from_file(path) for path in files], [])) def _get_and_cache_changed_entities(self): changed_entities", "self.entities_cache = self._get_all_entities() return self.entities_cache def _get_all_entities(self): files = file_utils.list_files(self.cfg.static.directory,", "all entities, others are treated as metadata BASE_FIELDS = {StaticFields.STATIC_ID,", "def _pack_rel(cls, entities_dict, rel): source_id = rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET]", "if not self._find_entity(old_entity, new_entities): old_entity_copy = old_entity.copy() old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY", "rel): source_id = rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if source_id ==", "DriverBase from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "target_id = rel[StaticFields.TARGET] if source_id == target_id: # self pointing", "source_id = rel[StaticFields.SOURCE] target_id = rel[StaticFields.TARGET] if source_id == target_id:", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "naive implementation since we don't expect many static entities for", "log from vitrage.common.constants import DatasourceProperties as DSProps from vitrage.common.constants import", "= self._find_entity(new_entity, self.entities_cache) if old_entity: # Add modified entities if", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "'vitrage_type': 'host.nova', 'id': 1} result={'relationship_type': 'attached', 'source': 's1', 'target': {'static_id':", "_equal_entities(old_entity, new_entity): # TODO(iafek): compare also the relationships return old_entity.get(StaticFields.TYPE)", "_get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return self.entities_cache def _get_all_entities(self): files =", "entity in entities: if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \\ and entity[StaticFields.ID]", "get_all(self, datasource_action): return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action): return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "others are treated as metadata BASE_FIELDS = {StaticFields.STATIC_ID, StaticFields.TYPE, StaticFields.ID}", "return self.make_pickleable(self._get_and_cache_all_entities(), STATIC_DATASOURCE, datasource_action) def get_changes(self, datasource_action): return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE,", "governing permissions and limitations # under the License. from itertools", "from vitrage.datasources.static import StaticFields from vitrage.utils import file as file_utils", "if source_id == target_id: # self pointing relationship entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel) else:", "_get_and_cache_changed_entities(self): changed_entities = [] new_entities = self._get_all_entities() for new_entity in", "return self.make_pickleable(self._get_and_cache_changed_entities(), STATIC_DATASOURCE, datasource_action) def _get_and_cache_all_entities(self): self.entities_cache = self._get_all_entities() return", "source, target = entities_dict[source_id], entities_dict[target_id] source[StaticFields.RELATIONSHIPS].append( cls._expand_neighbor(rel, target)) @staticmethod def", "for the specific language governing permissions and limitations # under", "from vitrage.datasources.static import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from vitrage.utils", "def _pack(cls, entities, relationships): entities_dict = {} for entity in", "__init__(self, conf): super(StaticDriver, self).__init__() self.cfg = conf self.entities_cache = []", "from six.moves import reduce from oslo_log import log from vitrage.common.constants", "entity rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'} neighbor={'static_id': 'h1', 'vitrage_type':", "entity @staticmethod def _equal_entities(old_entity, new_entity): # TODO(iafek): compare also the", "TODO(iafek): compare also the relationships return old_entity.get(StaticFields.TYPE) == \\ new_entity.get(StaticFields.TYPE)", "except in compliance with the License. You may obtain #", "# TODO(yujunz) check with yaml schema or reuse template validation", "LOG.warning(\"Skipped invalid config (possible obsoleted): {}\" .format(path)) return [] definitions", "entities = definitions[StaticFields.ENTITIES] relationships = definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod", "License. You may obtain # a copy of the License", "def _get_entities_from_file(cls, path): config = file_utils.load_yaml_file(path) if not cls._is_valid_config(config): LOG.warning(\"Skipped", "entity): static_id = entity[StaticFields.STATIC_ID] if static_id not in entities_dict: metadata", "implementation since we don't expect many static entities for entity", "ANY KIND, either express or implied. See the # License", "# distributed under the License is distributed on an \"AS", "changed_entities.append(new_entity.copy()) else: # Add new entities changed_entities.append(new_entity.copy()) # Add deleted", "# Unless required by applicable law or agreed to in", "return list(reduce(chain, [self._get_entities_from_file(path) for path in files], [])) def _get_and_cache_changed_entities(self):", "LOG = log.getLogger(__name__) class StaticDriver(DriverBase): # base fields are required", "return StaticFields.DEFINITIONS in config @staticmethod def get_event_types(): return [] def", "True) return list(reduce(chain, [self._get_entities_from_file(path) for path in files], [])) def", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "configuration\"\"\" # TODO(yujunz) check with yaml schema or reuse template", "def _find_entity(search_entity, entities): # naive implementation since we don't expect", "static_id not in entities_dict: metadata = {key: value for key,", "definitions[StaticFields.RELATIONSHIPS] return cls._pack(entities, relationships) @classmethod def _pack(cls, entities, relationships): entities_dict", "_pack(cls, entities, relationships): entities_dict = {} for entity in entities:", "permissions and limitations # under the License. from itertools import", "import STATIC_DATASOURCE from vitrage.datasources.static import StaticFields from vitrage.utils import file", "_is_valid_config(config): \"\"\"check for validity of configuration\"\"\" # TODO(yujunz) check with", "cls._pack_entity(entities_dict, entity) for rel in relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values()", "{}\" .format(path)) return [] definitions = config[StaticFields.DEFINITIONS] entities = definitions[StaticFields.ENTITIES]", "target)) @staticmethod def _expand_neighbor(rel, neighbor): \"\"\"Expand config id to neighbor", "rel.copy() if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]: rel[StaticFields.SOURCE] = neighbor elif rel[StaticFields.TARGET]", "self.cfg = conf self.entities_cache = [] @staticmethod def _is_valid_config(config): \"\"\"check", "rel in relationships: cls._pack_rel(entities_dict, rel) return entities_dict.values() @classmethod def _pack_entity(cls,", "or implied. See the # License for the specific language" ]
[ "of each triangle. Must be a permutation of (0, ...,", "2)) self.vertices_centers = np.empty((0, 2)) self.vertices_offsets = np.empty((0, 2)) self.vertices_index", "= np.empty((0, 2), dtype=int) self.triangles = np.empty((0, 3), dtype=np.uint32) self.triangles_index", "triangles_colors : np.ndarray Px4 array of the rgba color of", "Qx2 array of vertices of all triangles for shapes including", "vertices of triangles for shapes. For vertices corresponding to faces", "of centers of vertices of triangles for shapes. For vertices", "np.ndarray Qx2 array of vertices of all triangles for shapes", "and faces vertices_centers : np.ndarray Qx2 array of centers of", "to the width of the edge vertices_offsets : np.ndarray Qx2", "dtype=int) self.triangles = np.empty((0, 3), dtype=np.uint32) self.triangles_index = np.empty((0, 2),", "be scaled and added to the `vertices_centers` to get the", "data \"\"\" self.vertices = np.empty((0, 2)) self.vertices_centers = np.empty((0, 2))", "a scaled `vertices_offsets` to get the actual vertex positions. The", "\"\"\"Resets mesh data \"\"\" self.vertices = np.empty((0, 2)) self.vertices_centers =", "of each shape that each triangle corresponds and the mesh", "edge vertices_index : np.ndarray Qx2 array of the index (0,", "rgba color of each triangle triangles_z_order : np.ndarray Length P", "= np.empty((0, 2)) self.vertices_offsets = np.empty((0, 2)) self.vertices_index = np.empty((0,", "array of vertex indices that form the mesh triangles triangles_index", "np.empty((0, 2), dtype=int) self.triangles_colors = np.empty((0, 4)) self.triangles_z_order = np.empty((0),", "np class Mesh: \"\"\"Contains meshses of shapes that will ultimately", "of the edge vertices_offsets : np.ndarray Qx2 array of offsets", "the edge vertices_index : np.ndarray Qx2 array of the index", "Px4 array of the rgba color of each triangle triangles_z_order", "the different mesh types corresponding to faces and edges \"\"\"", "= np.empty((0, 2)) self.vertices_index = np.empty((0, 2), dtype=int) self.triangles =", "the mesh type (0, 1) for face or edge. triangles_colors", "vertices of all triangles for shapes including edges and faces", "np.ndarray Qx2 array of offsets of vertices of triangles for", "2)) self.vertices_index = np.empty((0, 2), dtype=int) self.triangles = np.empty((0, 3),", "<reponame>marshuang80/napari import numpy as np class Mesh: \"\"\"Contains meshses of", "\"\"\" _types = ['face', 'edge'] def __init__(self): self.clear() def clear(self):", "array of centers of vertices of triangles for shapes. For", "these are the same as the actual vertices. For vertices", "corresponds and the mesh type (0, 1) for face or", "as the actual vertices. For vertices corresponding to edges these", "edge. triangles_colors : np.ndarray Px4 array of the rgba color", "= np.empty((0, 3), dtype=np.uint32) self.triangles_index = np.empty((0, 2), dtype=int) self.triangles_colors", "to edges these values should be added to a scaled", "= np.empty((0, 2), dtype=int) self.triangles_colors = np.empty((0, 4)) self.triangles_z_order =", "values should be added to a scaled `vertices_offsets` to get", "def __init__(self): self.clear() def clear(self): \"\"\"Resets mesh data \"\"\" self.vertices", "triangle. Must be a permutation of (0, ..., P-1) Extended", "np.ndarray Px2 array of the index (0, ..., N-1) of", "For vertices corresponding to edges these values should be scaled", "np.ndarray Qx2 array of centers of vertices of triangles for", "clear(self): \"\"\"Resets mesh data \"\"\" self.vertices = np.empty((0, 2)) self.vertices_centers", "values should be scaled and added to the `vertices_centers` to", "edges \"\"\" _types = ['face', 'edge'] def __init__(self): self.clear() def", "vertices corresponding to faces these are 0. For vertices corresponding", "form the mesh triangles triangles_index : np.ndarray Px2 array of", "self.vertices_centers = np.empty((0, 2)) self.vertices_offsets = np.empty((0, 2)) self.vertices_index =", "faces these are the same as the actual vertices. For", "shapes. For vertices corresponding to faces these are the same", "or edge. triangles_colors : np.ndarray Px4 array of the rgba", "P-1) Extended Summary ---------- _types : list Length two list", "list Length two list of the different mesh types corresponding", "a permutation of (0, ..., P-1) Extended Summary ---------- _types", "vertices corresponding to edges these values should be scaled and", "the width of the edge vertices_index : np.ndarray Qx2 array", "to get the actual vertex positions. The scaling corresponds to", "to a scaled `vertices_offsets` to get the actual vertex positions.", "mesh type (0, 1) for face or edge. triangles :", "edges and faces vertices_centers : np.ndarray Qx2 array of centers", "scaling corresponds to the width of the edge vertices_offsets :", "of the different mesh types corresponding to faces and edges", "1) for face or edge. triangles : np.ndarray Px3 array", "For vertices corresponding to faces these are the same as", "index (0, ..., N-1) of each shape that each vertex", "z order of each triangle. Must be a permutation of", "type (0, 1) for face or edge. triangles_colors : np.ndarray", "be added to a scaled `vertices_offsets` to get the actual", "self.clear() def clear(self): \"\"\"Resets mesh data \"\"\" self.vertices = np.empty((0,", ": np.ndarray Qx2 array of centers of vertices of triangles", "triangles for shapes including edges and faces vertices_centers : np.ndarray", "vertex corresponds and the mesh type (0, 1) for face", "mesh types corresponding to faces and edges \"\"\" _types =", "the actual vertices. For vertices corresponding to edges these values", "np.ndarray Px3 array of vertex indices that form the mesh", "corresponding to faces these are the same as the actual", "\"\"\" self.vertices = np.empty((0, 2)) self.vertices_centers = np.empty((0, 2)) self.vertices_offsets", "added to the `vertices_centers` to get the actual vertex positions.", "triangles_index : np.ndarray Px2 array of the index (0, ...,", "The scaling corresponds to the width of the edge vertices_offsets", "np.ndarray Px4 array of the rgba color of each triangle", "each triangle triangles_z_order : np.ndarray Length P array of the", "each triangle. Must be a permutation of (0, ..., P-1)", "scaled `vertices_offsets` to get the actual vertex positions. The scaling", "of the rgba color of each triangle triangles_z_order : np.ndarray", "and the mesh type (0, 1) for face or edge.", "the rgba color of each triangle triangles_z_order : np.ndarray Length", ": np.ndarray Length P array of the z order of", "0. For vertices corresponding to edges these values should be", "shape that each vertex corresponds and the mesh type (0,", "two list of the different mesh types corresponding to faces", "of vertices of triangles for shapes. For vertices corresponding to", "triangles for shapes. For vertices corresponding to faces these are", "the width of the edge vertices_offsets : np.ndarray Qx2 array", "scaling corresponds to the width of the edge vertices_index :", "permutation of (0, ..., P-1) Extended Summary ---------- _types :", "triangles_z_order : np.ndarray Length P array of the z order", "actual vertex positions. The scaling corresponds to the width of", "3), dtype=np.uint32) self.triangles_index = np.empty((0, 2), dtype=int) self.triangles_colors = np.empty((0,", "mesh data \"\"\" self.vertices = np.empty((0, 2)) self.vertices_centers = np.empty((0,", "to the width of the edge vertices_index : np.ndarray Qx2", "that form the mesh triangles triangles_index : np.ndarray Px2 array", "be a permutation of (0, ..., P-1) Extended Summary ----------", "types corresponding to faces and edges \"\"\" _types = ['face',", "self.triangles_index = np.empty((0, 2), dtype=int) self.triangles_colors = np.empty((0, 4)) self.triangles_z_order", "(0, ..., N-1) of each shape that each vertex corresponds", "def clear(self): \"\"\"Resets mesh data \"\"\" self.vertices = np.empty((0, 2))", "`vertices_centers` to get the actual vertex positions. The scaling corresponds", "of the z order of each triangle. Must be a", "should be scaled and added to the `vertices_centers` to get", "of each shape that each vertex corresponds and the mesh", "Attributes ---------- vertices : np.ndarray Qx2 array of vertices of", "Qx2 array of the index (0, ..., N-1) of each", "of the edge vertices_index : np.ndarray Qx2 array of the", "triangle corresponds and the mesh type (0, 1) for face", "width of the edge vertices_index : np.ndarray Qx2 array of", "N-1) of each shape that each vertex corresponds and the", ": np.ndarray Px4 array of the rgba color of each", ": np.ndarray Px3 array of vertex indices that form the", "= ['face', 'edge'] def __init__(self): self.clear() def clear(self): \"\"\"Resets mesh", ": np.ndarray Qx2 array of the index (0, ..., N-1)", "..., N-1) of each shape that each triangle corresponds and", "class Mesh: \"\"\"Contains meshses of shapes that will ultimately get", "the z order of each triangle. Must be a permutation", "of vertex indices that form the mesh triangles triangles_index :", "the `vertices_centers` to get the actual vertex positions. The scaling", "to faces and edges \"\"\" _types = ['face', 'edge'] def", "'edge'] def __init__(self): self.clear() def clear(self): \"\"\"Resets mesh data \"\"\"", "or edge. triangles : np.ndarray Px3 array of vertex indices", "P array of the z order of each triangle. Must", "triangles triangles_index : np.ndarray Px2 array of the index (0,", "these values should be added to a scaled `vertices_offsets` to", "vertices : np.ndarray Qx2 array of vertices of all triangles", "faces these are 0. For vertices corresponding to edges these", "mesh triangles triangles_index : np.ndarray Px2 array of the index", "self.vertices_index = np.empty((0, 2), dtype=int) self.triangles = np.empty((0, 3), dtype=np.uint32)", "import numpy as np class Mesh: \"\"\"Contains meshses of shapes", "_types : list Length two list of the different mesh", "each triangle corresponds and the mesh type (0, 1) for", "triangle triangles_z_order : np.ndarray Length P array of the z", "to the `vertices_centers` to get the actual vertex positions. The", "np.empty((0, 2), dtype=int) self.triangles = np.empty((0, 3), dtype=np.uint32) self.triangles_index =", "`vertices_offsets` to get the actual vertex positions. The scaling corresponds", "For vertices corresponding to faces these are 0. For vertices", "that each vertex corresponds and the mesh type (0, 1)", "each shape that each triangle corresponds and the mesh type", "array of vertices of all triangles for shapes including edges", "edge. triangles : np.ndarray Px3 array of vertex indices that", ": list Length two list of the different mesh types", "np.ndarray Qx2 array of the index (0, ..., N-1) of", "the index (0, ..., N-1) of each shape that each", "positions. The scaling corresponds to the width of the edge", "ultimately get rendered. Attributes ---------- vertices : np.ndarray Qx2 array", "= np.empty((0, 2)) self.vertices_centers = np.empty((0, 2)) self.vertices_offsets = np.empty((0,", "corresponding to edges these values should be added to a", "vertices corresponding to faces these are the same as the", "actual vertices. For vertices corresponding to edges these values should", "of offsets of vertices of triangles for shapes. For vertices", "added to a scaled `vertices_offsets` to get the actual vertex", "Px2 array of the index (0, ..., N-1) of each", "self.vertices = np.empty((0, 2)) self.vertices_centers = np.empty((0, 2)) self.vertices_offsets =", "self.vertices_offsets = np.empty((0, 2)) self.vertices_index = np.empty((0, 2), dtype=int) self.triangles", "vertices_index : np.ndarray Qx2 array of the index (0, ...,", ": np.ndarray Px2 array of the index (0, ..., N-1)", "color of each triangle triangles_z_order : np.ndarray Length P array", "get rendered. Attributes ---------- vertices : np.ndarray Qx2 array of", "mesh type (0, 1) for face or edge. triangles_colors :", "---------- _types : list Length two list of the different", "2), dtype=int) self.triangles = np.empty((0, 3), dtype=np.uint32) self.triangles_index = np.empty((0,", "Length P array of the z order of each triangle.", "the same as the actual vertices. For vertices corresponding to", "Summary ---------- _types : list Length two list of the", "the mesh triangles triangles_index : np.ndarray Px2 array of the", "that will ultimately get rendered. Attributes ---------- vertices : np.ndarray", "(0, 1) for face or edge. triangles_colors : np.ndarray Px4", "different mesh types corresponding to faces and edges \"\"\" _types", "including edges and faces vertices_centers : np.ndarray Qx2 array of", "centers of vertices of triangles for shapes. For vertices corresponding", "vertices_centers : np.ndarray Qx2 array of centers of vertices of", "vertices. For vertices corresponding to edges these values should be", "__init__(self): self.clear() def clear(self): \"\"\"Resets mesh data \"\"\" self.vertices =", "vertex positions. The scaling corresponds to the width of the", "array of the z order of each triangle. Must be", "2), dtype=int) self.triangles_colors = np.empty((0, 4)) self.triangles_z_order = np.empty((0), dtype=int)", "Must be a permutation of (0, ..., P-1) Extended Summary", "['face', 'edge'] def __init__(self): self.clear() def clear(self): \"\"\"Resets mesh data", "edges these values should be added to a scaled `vertices_offsets`", "shapes including edges and faces vertices_centers : np.ndarray Qx2 array", "to faces these are 0. For vertices corresponding to edges", "corresponds to the width of the edge vertices_index : np.ndarray", "to faces these are the same as the actual vertices.", "corresponding to edges these values should be scaled and added", "and added to the `vertices_centers` to get the actual vertex", "face or edge. triangles_colors : np.ndarray Px4 array of the", "array of the rgba color of each triangle triangles_z_order :", "corresponding to faces these are 0. For vertices corresponding to", "indices that form the mesh triangles triangles_index : np.ndarray Px2", "of shapes that will ultimately get rendered. Attributes ---------- vertices", "1) for face or edge. triangles_colors : np.ndarray Px4 array", "order of each triangle. Must be a permutation of (0,", "faces and edges \"\"\" _types = ['face', 'edge'] def __init__(self):", "all triangles for shapes including edges and faces vertices_centers :", "meshses of shapes that will ultimately get rendered. Attributes ----------", "The scaling corresponds to the width of the edge vertices_index", "to edges these values should be scaled and added to", "face or edge. triangles : np.ndarray Px3 array of vertex", "dtype=np.uint32) self.triangles_index = np.empty((0, 2), dtype=int) self.triangles_colors = np.empty((0, 4))", "vertex indices that form the mesh triangles triangles_index : np.ndarray", "corresponds to the width of the edge vertices_offsets : np.ndarray", "type (0, 1) for face or edge. triangles : np.ndarray", "Mesh: \"\"\"Contains meshses of shapes that will ultimately get rendered.", "of all triangles for shapes including edges and faces vertices_centers", "np.empty((0, 2)) self.vertices_index = np.empty((0, 2), dtype=int) self.triangles = np.empty((0,", "Qx2 array of offsets of vertices of triangles for shapes.", "self.triangles = np.empty((0, 3), dtype=np.uint32) self.triangles_index = np.empty((0, 2), dtype=int)", "numpy as np class Mesh: \"\"\"Contains meshses of shapes that", "of (0, ..., P-1) Extended Summary ---------- _types : list", "for shapes. For vertices corresponding to faces these are 0.", "Extended Summary ---------- _types : list Length two list of", "---------- vertices : np.ndarray Qx2 array of vertices of all", "..., P-1) Extended Summary ---------- _types : list Length two", "for shapes including edges and faces vertices_centers : np.ndarray Qx2", "the edge vertices_offsets : np.ndarray Qx2 array of offsets of", "np.ndarray Length P array of the z order of each", "for shapes. For vertices corresponding to faces these are the", "that each triangle corresponds and the mesh type (0, 1)", "should be added to a scaled `vertices_offsets` to get the", "get the actual vertex positions. The scaling corresponds to the", "for face or edge. triangles_colors : np.ndarray Px4 array of", "vertices corresponding to edges these values should be added to", "array of offsets of vertices of triangles for shapes. For", "triangles : np.ndarray Px3 array of vertex indices that form", "edge vertices_offsets : np.ndarray Qx2 array of offsets of vertices", "\"\"\"Contains meshses of shapes that will ultimately get rendered. Attributes", "each shape that each vertex corresponds and the mesh type", "(0, ..., P-1) Extended Summary ---------- _types : list Length", "and edges \"\"\" _types = ['face', 'edge'] def __init__(self): self.clear()", "_types = ['face', 'edge'] def __init__(self): self.clear() def clear(self): \"\"\"Resets", "np.empty((0, 2)) self.vertices_centers = np.empty((0, 2)) self.vertices_offsets = np.empty((0, 2))", "edges these values should be scaled and added to the", "corresponding to faces and edges \"\"\" _types = ['face', 'edge']", "2)) self.vertices_offsets = np.empty((0, 2)) self.vertices_index = np.empty((0, 2), dtype=int)", "faces vertices_centers : np.ndarray Qx2 array of centers of vertices", "of triangles for shapes. For vertices corresponding to faces these", "vertices_offsets : np.ndarray Qx2 array of offsets of vertices of", "..., N-1) of each shape that each vertex corresponds and", "array of the index (0, ..., N-1) of each shape", "index (0, ..., N-1) of each shape that each triangle", "(0, ..., N-1) of each shape that each triangle corresponds", "of each triangle triangles_z_order : np.ndarray Length P array of", "shape that each triangle corresponds and the mesh type (0,", "will ultimately get rendered. Attributes ---------- vertices : np.ndarray Qx2", "For vertices corresponding to edges these values should be added", "the mesh type (0, 1) for face or edge. triangles", "(0, 1) for face or edge. triangles : np.ndarray Px3", "the actual vertex positions. The scaling corresponds to the width", "same as the actual vertices. For vertices corresponding to edges", "np.empty((0, 3), dtype=np.uint32) self.triangles_index = np.empty((0, 2), dtype=int) self.triangles_colors =", "shapes that will ultimately get rendered. Attributes ---------- vertices :", "are 0. For vertices corresponding to edges these values should", "Px3 array of vertex indices that form the mesh triangles", "of vertices of all triangles for shapes including edges and", "these values should be scaled and added to the `vertices_centers`", "rendered. Attributes ---------- vertices : np.ndarray Qx2 array of vertices", "as np class Mesh: \"\"\"Contains meshses of shapes that will", "Qx2 array of centers of vertices of triangles for shapes.", "width of the edge vertices_offsets : np.ndarray Qx2 array of", "offsets of vertices of triangles for shapes. For vertices corresponding", "of the index (0, ..., N-1) of each shape that", "these are 0. For vertices corresponding to edges these values", ": np.ndarray Qx2 array of vertices of all triangles for", "Length two list of the different mesh types corresponding to", ": np.ndarray Qx2 array of offsets of vertices of triangles", "np.empty((0, 2)) self.vertices_offsets = np.empty((0, 2)) self.vertices_index = np.empty((0, 2),", "for face or edge. triangles : np.ndarray Px3 array of", "list of the different mesh types corresponding to faces and", "scaled and added to the `vertices_centers` to get the actual", "are the same as the actual vertices. For vertices corresponding", "N-1) of each shape that each triangle corresponds and the", "shapes. For vertices corresponding to faces these are 0. For", "each vertex corresponds and the mesh type (0, 1) for" ]
[ "<gh_stars>0 import sys def start_parameter(text, i): if len(sys.argv) > i:", "sys def start_parameter(text, i): if len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i]))", "start_parameter(text, i): if len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i])) return float(sys.argv[i])", "import sys def start_parameter(text, i): if len(sys.argv) > i: print('{0}{1}'.format(text,", "len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i])) return float(sys.argv[i]) else: return float(raw_input(text))", "if len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i])) return float(sys.argv[i]) else: return", "i): if len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i])) return float(sys.argv[i]) else:", "def start_parameter(text, i): if len(sys.argv) > i: print('{0}{1}'.format(text, sys.argv[i])) return" ]
[ "'--dist', action = 'store_true', default = False, help = 'run", "-t para['version'] = args.upstreamversion # -u para['print_version'] = args.version #", "= args.spec # -s para['tar'] = args.tar # -t para['version']", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "the Debian package revision', metavar = 'revision') p.add_argument( '-z', '--targz',", "to \"bin\" for the compiled ELF binary.', metavar = 'binarypackage[:type]')", "default = False, # help = 'run GUI configuration') #", "action = 'store', help = 'generate extra configuration files as", "para['copyright'] >=4: para['copyright'] = 3 - para['copyright'] # 0: debian/copyright,", "def env(var): try: return os.environ[var] except KeyError: return '' #######################################################################", "'use the upstream source tarball directly (-p, -u, -z: overridden)',", "args.archive: para['archive'] = True para['tarball'] = args.archive else: para['archive'] =", "# 0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive para['dist']", "args.license -> para['license'] as set if args.license == '': para['license']", "args.extra # -x para['yes'] = min(args.yes, 2) # -y #", "the shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage for", "default = '', help = 'set binary package specs as", "before creating files in the debian directory') p.add_argument( '-s', '--spec',", "and identify file paths') p.add_argument( '-l', '--license', default = '',", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "e-mail address', metavar = '<EMAIL>') p.add_argument( '-f', '--fullname', action =", "= pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line setting ####################################################################### p =", "notice shall be included in all copies or substantial portions", "default = '', help = 'use the upstream source tarball", "dpkg-buildpackage (usually from its wrapper debuild or pdebuild) is invoked", "name; and optional \"type\" is chosen from \"bin\", \"data\", \"dbg\",", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" import argparse import os", "The upstream tarball is downloaded as the package-version.tar.gz file. *", "'[01234]') p.add_argument( '-y', '--yes', action = 'count', default = 0,", "= 0, help = 'compare debian/copyright with the source and", "para['revision'] = args.revision # -r para['spec'] = args.spec # -s", "* The upstream tarball is downloaded as the package-version.tar.gz file.", "min(args.yes, 2) # -y # 0: ask, 1: yes, 2:", "'-x', '--extra', default = '', action = 'store', help =", "'-r', '--revision', action = 'store', default = '', help =", "0: ask, 1: yes, 2: no para['targz'] = args.targz #", "p.add_argument( '-y', '--yes', action = 'count', default = 0, help", "default = False, help = 'run \"tar\" to generate upstream", "action = 'store_true', default = False, help = 'use upstream", ".orig.tar.gz') sp.add_argument( '-a', '--archive', type = str, action = 'store',", "############################################# para['binaryspec'] = args.binaryspec # -b para['copyright'] = min(args.copyright, 6)", "False, help = 'show version information') p.add_argument( '-w', '--with', action", "= args.package.lower() # -p ############################################# para['quitearly'] = args.quitearly # -q", "possibly without any arguments. * Files in the package-version/debian/ directory", "default = 0, help = 'scan source for copyright+license text", "+ '@localhost' debemail = pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname = env('DEBFULLNAME')", "if para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge # -k ############################################# -l", "in the package-version/debian/ directory are manually adjusted. * dpkg-buildpackage (usually", "# -s para['tar'] = args.tar # -t para['version'] = args.upstreamversion", "default = False, help = 'run \"make dist\" equivalent first", "any arguments. * Files in the package-version/debian/ directory are manually", "source tarball directly (-p, -u, -z: overridden)', metavar = 'package-version.tar.gz')", "list of \"binarypackage\":\"type\" pairs, e.g., in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or", "= False, help = 'run \"tar\" to generate upstream tarball", "'-a', '--archive', type = str, action = 'store', default =", "= 'read optional parameters from \"file\"', metavar = '\"file\"') p.add_argument(", "# -c if para['copyright'] >=4: para['copyright'] = 3 - para['copyright']", ">=4: para['copyright'] = 3 - para['copyright'] # 0: debian/copyright, +/-1:", "= 'run \"tar\" to generate upstream tarball and use it')", "default = False, help = 'make a native source package", "para['archive'] = True para['tarball'] = args.archive else: para['archive'] = False", "'store', default = '', help = 'set the upstream package", "manually adjusted. * dpkg-buildpackage (usually from its wrapper debuild or", "# -P para['tutorial'] = args.tutorial # -T ############################################# -o if", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import", "debmake.read ########################################################################### # undefined environment variable -> '' def env(var):", "be non-multiarch') p.add_argument( '-o', '--option', default = '', action =", "-g para['invoke'] = args.invoke # -i para['judge'] = args.judge #", "= args.kludge # -k ############################################# -l # --license: args.license ->", "it is set by \"binarypackage\". Otherwise it is set to", "-T ############################################# -o if args.option: exec(debmake.read.read(args.option)) ####################################################################### # return command", "separated list of \"binarypackage\":\"type\" pairs, e.g., in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\"", "- para['copyright'] # 0: debian/copyright, +/-1: simple, +/-2: standard +/-3:", "'store', help = 'add formatted license to debian/copyright', metavar =", "= min(args.yes, 2) # -y # 0: ask, 1: yes,", "2: no para['targz'] = args.targz # -z para['local'] = args.local", "Software without restriction, including without limitation the rights to use,", "--with\" option arguments', metavar = 'args') p.add_argument( '-x', '--extra', default", "'': para['dh_with'] = set() # default is empty set else:", "args.gui # -g para['invoke'] = args.invoke # -i para['judge'] =", "0, help = 'scan source for copyright+license text and exit')", "action = 'store', default = '', help = 'set binary", "= 'count', default = 0, help = 'scan source for", "####################################################################### def para(para): debmail = env('DEBEMAIL') if not debmail: #debmail", "description = '''\\ {0}: make Debian source package Version: {1}", "p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action = 'count', default = 0,", "'make a native source package without .orig.tar.gz') sp.add_argument( '-a', '--archive',", "and optional \"type\" is chosen from \"bin\", \"data\", \"dbg\", \"dev\",", "chosen from \"bin\", \"data\", \"dbg\", \"dev\", \"doc\", \"lib\", \"perl\", \"python\",", "= set(args.withargs.split(',')) ############################################# para['extra'] = args.extra # -x para['yes'] =", "parameters ####################################################################### return para ####################################################################### # Test code ####################################################################### if", "make debian packages. Argument may need to be quoted to", "binary package specs as comma separated list of \"binarypackage\":\"type\" pairs,", "all prompts') p.add_argument( '-L', '--local', action = 'store_true', default =", "= False, help='quit early before creating files in the debian", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "copies of the Software, and to permit persons to whom", "False, help = 'make a native source package without .orig.tar.gz')", "make Debian source package Version: {1} {2} {0} helps to", "hereby granted, free of charge, to any person obtaining a", "= 'make a native source package without .orig.tar.gz') sp.add_argument( '-a',", "to deal in the Software without restriction, including without limitation", "= 'version') p.add_argument( '-r', '--revision', action = 'store', default =", "'', help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar =", "with the source and exit') sp = p.add_mutually_exclusive_group() sp.add_argument( '-n',", "'set the Debian package revision', metavar = 'revision') p.add_argument( '-z',", "package name; and optional \"type\" is chosen from \"bin\", \"data\",", "not debfullname: # os.getlogin may not work well: #769392 #debfullname", "'run \"dpkg-depcheck\" to judge build dependencies and identify file paths')", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "'--with', action = 'store', default = '', dest = 'withargs',", "= args.tar # -t para['version'] = args.upstreamversion # -u para['print_version']", "sts=4 ts=4 et ai: \"\"\" Copyright © 2014 <NAME> Permission", "follows: * The upstream tarball is downloaded as the package-version.tar.gz", "source package without .orig.tar.gz') sp.add_argument( '-a', '--archive', type = str,", "metavar = 'package') p.add_argument( '-u', '--upstreamversion', action = 'store', default", "'store_true', default = False, help='pedantically check auto-generated files') p.add_argument( '-T',", "para['dh_with'] as set if args.withargs == '': para['dh_with'] = set()", "for all prompts') p.add_argument( '-L', '--local', action = 'store_true', default", "= False, help = 'use upstream spec') p.add_argument( '-v', '--version',", "-o if args.option: exec(debmake.read.read(args.option)) ####################################################################### # return command line parameters", "to be non-multiarch') p.add_argument( '-o', '--option', default = '', action", "help='quit early before creating files in the debian directory') p.add_argument(", "debemail = pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname = env('DEBFULLNAME') if not", "# command line setting ####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description", "'generate extra configuration files as templates', metavar = '[01234]') p.add_argument(", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "2014 <NAME> Permission is hereby granted, free of charge, to", "command line setting ####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description =", "# -d para['email'] = args.email # -e para['fullname'] = args.fullname", "-i para['judge'] = args.judge # -j if para['judge']: para['override'].update({'judge'}) para['kludge']", "'store_true', default = False, help = 'run \"make dist\" equivalent", "default = '', action = 'store', help = 'read optional", "= args.upstreamversion # -u para['print_version'] = args.version # -v #############################################", "= args.dist # -d para['email'] = args.email # -e para['fullname']", "equivalent first to generate upstream tarball and use it') sp.add_argument(", "0, help = '\"force yes\" for all prompts') p.add_argument( '-L',", "first to generate upstream tarball and use it') sp.add_argument( '-t',", "p.add_argument( '-P', '--pedantic', action = 'store_true', default = False, help='pedantically", "= args.revision # -r para['spec'] = args.spec # -s para['tar']", "default = '', help = 'set the Debian package revision',", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "\"binarypackage\":\"type\" pairs, e.g., in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short", "action = 'store', help = 'invoke package build tool', metavar", "= p.parse_args() ####################################################################### # Set parameter values ####################################################################### ############################################# -a", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "= 'package') p.add_argument( '-u', '--upstreamversion', action = 'store', default =", "= '', help = 'set binary package specs as comma", "= args.invoke # -i para['judge'] = args.judge # -j if", "-h : used by argparse for --help ep = p.add_mutually_exclusive_group()", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "Here, \"binarypackage\" is the binary package name; and optional \"type\"", "in short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\" is the binary", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "= pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname = env('DEBFULLNAME') if not debfullname:", "metavar = 'revision') p.add_argument( '-z', '--targz', action = 'store', default", "\"file\"', metavar = '\"file\"') p.add_argument( '-q', '--quitearly', action = 'store_true',", "= 'store', default = '', help = 'use the upstream", "persons to whom the Software is furnished to do so,", "paths') p.add_argument( '-l', '--license', default = '', action = 'store',", "= '\"force yes\" for all prompts') p.add_argument( '-L', '--local', action", "limitation the rights to use, copy, modify, merge, publish, distribute,", "subject to the following conditions: The above copyright notice and", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "local package') p.add_argument( '-P', '--pedantic', action = 'store_true', default =", "+/-3: extensive para['dist'] = args.dist # -d para['email'] = args.email", "* Files in the package-version/debian/ directory are manually adjusted. *", "'count', default = 0, help = 'compare debian/copyright with the", "= args.gui # -g para['invoke'] = args.invoke # -i para['judge']", "set if args.license == '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) #", "== '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license']", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "tool', metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action = 'store_true',", "debmake(1) manpage for more.') ck = p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright',", "help = 'generate extra configuration files as templates', metavar =", "-L para['pedantic'] = args.pedantic # -P para['tutorial'] = args.tutorial #", "it') p.add_argument( '-p', '--package', action = 'store', default = '',", "\"bin\", \"data\", \"dbg\", \"dev\", \"doc\", \"lib\", \"perl\", \"python\", \"python3\", \"ruby\",", "'store', default = '', help = 'use the upstream source", "create many files under the package-version/ directory. * {0} is", "'store', default = debfullname, help = 'set the fullname', metavar", "= debfullname, help = 'set the fullname', metavar = '\"firstname", "= True para['tarball'] = args.archive else: para['archive'] = False para['tarball']", "= False, help = 'make a native source package without", "= pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line setting", "= '' ############################################# para['binaryspec'] = args.binaryspec # -b para['copyright'] =", "auto-generated files') p.add_argument( '-T', '--tutorial', action = 'store_true', default =", "= args.binaryspec # -b para['copyright'] = min(args.copyright, 6) # -c", "default = debfullname, help = 'set the fullname', metavar =", "action = 'store_true', default = False, help='quit early before creating", "'set e-mail address', metavar = '<EMAIL>') p.add_argument( '-f', '--fullname', action", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "= args.email # -e para['fullname'] = args.fullname # -f #", "packages. Argument may need to be quoted to protect from", "fullname', metavar = '\"firstname lastname\"') # p.add_argument( # '-g', #", "'--targz', action = 'store', default = '', help = 'set", "import os import pwd import sys import time import debmake.read", "action = 'count', default = 0, help = 'scan source", "'binarypackage[:type]') p.add_argument( '-e', '--email', action = 'store', default = debmail,", "= '', action = 'store', help = 'read optional parameters", "Software is furnished to do so, subject to the following", "return command line parameters ####################################################################### return para ####################################################################### # Test", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "default = 0, help = 'compare debian/copyright with the source", "default = False, help = 'force packages to be non-multiarch')", "sp.add_argument( '-a', '--archive', type = str, action = 'store', default", "'package') p.add_argument( '-u', '--upstreamversion', action = 'store', default = '',", "-> '' def env(var): try: return os.environ[var] except KeyError: return", "the package-version/ directory. * {0} is invoked in the package-version/", "= 'store', default = debfullname, help = 'set the fullname',", "except KeyError: return '' ####################################################################### # Initialize parameters ####################################################################### def", "'scan source for copyright+license text and exit') ck.add_argument( '-k', '--kludge',", "help = 'use the upstream source tarball directly (-p, -u,", "help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension')", "is chosen from \"bin\", \"data\", \"dbg\", \"dev\", \"doc\", \"lib\", \"perl\",", "sell copies of the Software, and to permit persons to", "'--judge', action = 'store_true', default = False, help = 'run", "############################################# -l # --license: args.license -> para['license'] as set if", "para['spec'] = args.spec # -s para['tar'] = args.tar # -t", "import sys import time import debmake.read ########################################################################### # undefined environment", "Test code ####################################################################### if __name__ == '__main__': for p, v", "para['gui'] = args.gui # -g para['invoke'] = args.invoke # -i", "= str, action = 'store', default = '', help =", "more.') ck = p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action = 'count',", "included in all copies or substantial portions of the Software.", "#769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command", "manpage for more.') ck = p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action", "# default else: para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch", "p.add_argument( '-w', '--with', action = 'store', default = '', dest", "from \"file\"', metavar = '\"file\"') p.add_argument( '-q', '--quitearly', action =", "para['dist'] = args.dist # -d para['email'] = args.email # -e", "package from the upstream source. Normally, this is done as", "help = 'set the Debian package revision', metavar = 'revision')", "= 'revision') p.add_argument( '-z', '--targz', action = 'store', default =", "# --license: args.license -> para['license'] as set if args.license ==", "= args.local # -L para['pedantic'] = args.pedantic # -P para['tutorial']", "comma separated list of \"binarypackage\":\"type\" pairs, e.g., in full form", "for the compiled ELF binary.', metavar = 'binarypackage[:type]') p.add_argument( '-e',", "upstream spec') p.add_argument( '-v', '--version', action = 'store_true', default =", "'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument( '-b',", "SOFTWARE. \"\"\" import argparse import os import pwd import sys", "= p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action = 'count', default =", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "generate upstream tarball and use it') p.add_argument( '-p', '--package', action", "is set by \"binarypackage\". Otherwise it is set to \"bin\"", "help = 'set the fullname', metavar = '\"firstname lastname\"') #", "ask, 1: yes, 2: no para['targz'] = args.targz # -z", "####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\ {0}: make", "dist\" equivalent first to generate upstream tarball and use it')", "'<EMAIL>') p.add_argument( '-f', '--fullname', action = 'store', default = debfullname,", "= 'invoke package build tool', metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j',", "####################################################################### # Set parameter values ####################################################################### ############################################# -a if args.archive:", "source. Normally, this is done as follows: * The upstream", "default = '', help = 'set the Debian package name',", "by argparse for --help ep = p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke',", "yes, 2: no para['targz'] = args.targz # -z para['local'] =", "\"dh --with\" option arguments', metavar = 'args') p.add_argument( '-x', '--extra',", "sp.add_argument( '-d', '--dist', action = 'store_true', default = False, help", "obvious, it is set by \"binarypackage\". Otherwise it is set", "args.pedantic # -P para['tutorial'] = args.tutorial # -T ############################################# -o", "{0} helps to build the Debian package from the upstream", "= 'store_true', # default = False, # help = 'run", "'', action = 'store', help = 'invoke package build tool',", "import debmake.read ########################################################################### # undefined environment variable -> '' def", "furnished to do so, subject to the following conditions: The", "default = debmail, help = 'set e-mail address', metavar =", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "# '--gui', # action = 'store_true', # default = False,", "= '', action = 'store', help = 'invoke package build", "= '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action = 'store_true', default =", "= '', dest = 'withargs', help = 'set additional \"dh", "6) # -c if para['copyright'] >=4: para['copyright'] = 3 -", "False, help='pedantically check auto-generated files') p.add_argument( '-T', '--tutorial', action =", "args.judge # -j if para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge #", "para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra'] = args.extra # -x para['yes']", "type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument( '-b', '--binaryspec', action =", "Otherwise it is set to \"bin\" for the compiled ELF", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "default = False, help = 'run \"dpkg-depcheck\" to judge build", "publish, distribute, sublicense, and/or sell copies of the Software, and", "revision', metavar = 'revision') p.add_argument( '-z', '--targz', action = 'store',", "'--upstreamversion', action = 'store', default = '', help = 'set", "extensive para['dist'] = args.dist # -d para['email'] = args.email #", "to judge build dependencies and identify file paths') p.add_argument( '-l',", "-n para['package'] = args.package.lower() # -p ############################################# para['quitearly'] = args.quitearly", "the compiled ELF binary.', metavar = 'binarypackage[:type]') p.add_argument( '-e', '--email',", "\"Software\"), to deal in the Software without restriction, including without", "\"dpkg-depcheck\" to judge build dependencies and identify file paths') p.add_argument(", "the package-version/ directory possibly without any arguments. * Files in", "'@localhost' debfullname = env('DEBFULLNAME') if not debfullname: # os.getlogin may", "= set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch # -m para['native'] =", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "'--tutorial', action = 'store_true', default = False, help='output tutorial comment", "dependencies and identify file paths') p.add_argument( '-l', '--license', default =", "'-u', '--upstreamversion', action = 'store', default = '', help =", "= 'set additional \"dh --with\" option arguments', metavar = 'args')", "debmail = env('DEBEMAIL') if not debmail: #debmail = os.getlogin() +", "= 'store', default = debmail, help = 'set e-mail address',", "standard +/-3: extensive para['dist'] = args.dist # -d para['email'] =", "a native source package without .orig.tar.gz') sp.add_argument( '-a', '--archive', type", "tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument( '-b', '--binaryspec', action", "for --help ep = p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default =", "If \"type\" is not specified but obvious, it is set", "False, help='output tutorial comment lines in template files') args =", "-x para['yes'] = min(args.yes, 2) # -y # 0: ask,", "be quoted to protect from the shell. '''.format( para['program_name'], para['program_version'],", "args.archive else: para['archive'] = False para['tarball'] = '' ############################################# para['binaryspec']", "-e para['fullname'] = args.fullname # -f # para['gui'] = args.gui", "help='output tutorial comment lines in template files') args = p.parse_args()", "be included in all copies or substantial portions of the", "p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\ {0}: make Debian", "'-i', '--invoke', default = '', action = 'store', help =", "line parameters ####################################################################### return para ####################################################################### # Test code #######################################################################", "debian directory') p.add_argument( '-s', '--spec', action = 'store_true', default =", "help = 'set binary package specs as comma separated list", "import argparse import os import pwd import sys import time", "####################################################################### # Initialize parameters ####################################################################### def para(para): debmail = env('DEBEMAIL')", "debian/copyright with the source and exit') sp = p.add_mutually_exclusive_group() sp.add_argument(", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "downloaded as the package-version.tar.gz file. * It is untared to", "full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here,", "default = 0, help = '\"force yes\" for all prompts')", "for more.') ck = p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action =", "para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch # -m para['native']", "2) # -y # 0: ask, 1: yes, 2: no", "'' ####################################################################### # Initialize parameters ####################################################################### def para(para): debmail =", "= argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\ {0}: make Debian source", "directory. * {0} is invoked in the package-version/ directory possibly", "wrapper debuild or pdebuild) is invoked in the package-version/ directory", "upstream source tarball directly (-p, -u, -z: overridden)', metavar =", "address', metavar = '<EMAIL>') p.add_argument( '-f', '--fullname', action = 'store',", "= '<EMAIL>') p.add_argument( '-f', '--fullname', action = 'store', default =", "# vim:se tw=0 sts=4 ts=4 et ai: \"\"\" Copyright ©", "metavar = 'binarypackage[:type]') p.add_argument( '-e', '--email', action = 'store', default", "'set the fullname', metavar = '\"firstname lastname\"') # p.add_argument( #", "else: para['archive'] = False para['tarball'] = '' ############################################# para['binaryspec'] =", "\"\"\" import argparse import os import pwd import sys import", "= 'store_true', default = False, help = 'use upstream spec')", "of \"binarypackage\":\"type\" pairs, e.g., in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in", "ck.add_argument( '-c', '--copyright', action = 'count', default = 0, help", "'--archive', type = str, action = 'store', default = '',", "para['invoke'] = args.invoke # -i para['judge'] = args.judge # -j", "the debian directory') p.add_argument( '-s', '--spec', action = 'store_true', default", "'extension') p.add_argument( '-b', '--binaryspec', action = 'store', default = '',", "Version: {1} {2} {0} helps to build the Debian package", "default = False, help='output tutorial comment lines in template files')", "packages to be non-multiarch') p.add_argument( '-o', '--option', default = '',", "quoted to protect from the shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']),", "= args.version # -v ############################################# -w # --with: args.withargs ->", "shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage for more.')", "the following conditions: The above copyright notice and this permission", "= args.targz # -z para['local'] = args.local # -L para['pedantic']", "files (the \"Software\"), to deal in the Software without restriction,", "= '\"file\"') p.add_argument( '-q', '--quitearly', action = 'store_true', default =", "is not specified but obvious, it is set by \"binarypackage\".", "from its wrapper debuild or pdebuild) is invoked in the", "parameter values ####################################################################### ############################################# -a if args.archive: para['archive'] = True", "= env('DEBFULLNAME') if not debfullname: # os.getlogin may not work", "'--copyright', action = 'count', default = 0, help = 'scan", "metavar = 'extension') p.add_argument( '-b', '--binaryspec', action = 'store', default", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "'run \"make dist\" equivalent first to generate upstream tarball and", "default = '', help = 'set the upstream package version',", "default = False, help = 'use upstream spec') p.add_argument( '-v',", "sp.add_argument( '-n', '--native', action = 'store_true', default = False, help", "args.revision # -r para['spec'] = args.spec # -s para['tar'] =", "args.option: exec(debmake.read.read(args.option)) ####################################################################### # return command line parameters ####################################################################### return", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "and use it') p.add_argument( '-p', '--package', action = 'store', default", "lastname\"') # p.add_argument( # '-g', # '--gui', # action =", "action = 'store', default = '', dest = 'withargs', help", "not work well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0]", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import argparse", "'invoke package build tool', metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge',", "configuration') # # -h : used by argparse for --help", "# 0: ask, 1: yes, 2: no para['targz'] = args.targz", "default = '', dest = 'withargs', help = 'set additional", "software and associated documentation files (the \"Software\"), to deal in", "to debian/copyright', metavar = '\"license_file\"') p.add_argument( '-m', '--monoarch', action =", "= '[01234]') p.add_argument( '-y', '--yes', action = 'count', default =", "False para['tarball'] = '' ############################################# para['binaryspec'] = args.binaryspec # -b", "to be quoted to protect from the shell. '''.format( para['program_name'],", "sp = p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action = 'store_true', default", "debian/copyright', metavar = '\"license_file\"') p.add_argument( '-m', '--monoarch', action = 'store_true',", "para['tar'] = args.tar # -t para['version'] = args.upstreamversion # -u", "is set to \"bin\" for the compiled ELF binary.', metavar", "notice and this permission notice shall be included in all", "files as templates', metavar = '[01234]') p.add_argument( '-y', '--yes', action", "is hereby granted, free of charge, to any person obtaining", "= 'store_true', default = False, help='generate configuration files for the", "'-v', '--version', action = 'store_true', default = False, help =", "# -m para['native'] = args.native # -n para['package'] = args.package.lower()", "# -v ############################################# -w # --with: args.withargs -> para['dh_with'] as", "is invoked in the package-version/ directory possibly without any arguments.", "'-g', # '--gui', # action = 'store_true', # default =", "pdebuild) is invoked in the package-version/ directory to make debian", "use it') sp.add_argument( '-t', '--tar', action = 'store_true', default =", "the upstream source. Normally, this is done as follows: *", "metavar = 'version') p.add_argument( '-r', '--revision', action = 'store', default", "'-t', '--tar', action = 'store_true', default = False, help =", "'--extra', default = '', action = 'store', help = 'generate", "Debian package name', metavar = 'package') p.add_argument( '-u', '--upstreamversion', action", "'store', default = '', help = 'set the Debian package", "para['license'] as set if args.license == '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*',", "to the following conditions: The above copyright notice and this", "conditions: The above copyright notice and this permission notice shall", "package Version: {1} {2} {0} helps to build the Debian", "the Software without restriction, including without limitation the rights to", "'-k', '--kludge', action = 'count', default = 0, help =", "action = 'store_true', default = False, help = 'make a", "--with: args.withargs -> para['dh_with'] as set if args.withargs == '':", "command line parameters ####################################################################### return para ####################################################################### # Test code", "= env('DEBEMAIL') if not debmail: #debmail = os.getlogin() + '@localhost'", "it is set to \"bin\" for the compiled ELF binary.',", "text and exit') ck.add_argument( '-k', '--kludge', action = 'count', default", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "= '''\\ {0}: make Debian source package Version: {1} {2}", "generate upstream tarball and use it') sp.add_argument( '-t', '--tar', action", "Set parameter values ####################################################################### ############################################# -a if args.archive: para['archive'] =", "-b para['copyright'] = min(args.copyright, 6) # -c if para['copyright'] >=4:", "############################################# para['quitearly'] = args.quitearly # -q para['revision'] = args.revision #", "{2} {0} helps to build the Debian package from the", "and/or sell copies of the Software, and to permit persons", "= '', help = 'set the Debian package name', metavar", "identify file paths') p.add_argument( '-l', '--license', default = '', action", "permit persons to whom the Software is furnished to do", "package') p.add_argument( '-P', '--pedantic', action = 'store_true', default = False,", "False, help = 'run \"dpkg-depcheck\" to judge build dependencies and", "do so, subject to the following conditions: The above copyright", "directory possibly without any arguments. * Files in the package-version/debian/", "p.add_argument( '-f', '--fullname', action = 'store', default = debfullname, help", "= 'store', default = '', dest = 'withargs', help =", "any person obtaining a copy of this software and associated", "debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line setting ####################################################################### p", "= 3 - para['copyright'] # 0: debian/copyright, +/-1: simple, +/-2:", "try: return os.environ[var] except KeyError: return '' ####################################################################### # Initialize", "tarball and use it') sp.add_argument( '-t', '--tar', action = 'store_true',", "'-y', '--yes', action = 'count', default = 0, help =", "= 'store', help = 'invoke package build tool', metavar =", "copyright+license text and exit') ck.add_argument( '-k', '--kludge', action = 'count',", "'--spec', action = 'store_true', default = False, help = 'use", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "False, help = 'use upstream spec') p.add_argument( '-v', '--version', action", "'''\\ {0}: make Debian source package Version: {1} {2} {0}", "formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\ {0}: make Debian source package Version:", "'-w', '--with', action = 'store', default = '', dest =", "protect from the shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1)", "########################################################################### # undefined environment variable -> '' def env(var): try:", "invoked in the package-version/ directory possibly without any arguments. *", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "default = False, help='pedantically check auto-generated files') p.add_argument( '-T', '--tutorial',", "return '' ####################################################################### # Initialize parameters ####################################################################### def para(para): debmail", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "'-d', '--dist', action = 'store_true', default = False, help =", "'-n', '--native', action = 'store_true', default = False, help =", "binary.', metavar = 'binarypackage[:type]') p.add_argument( '-e', '--email', action = 'store',", "exit') sp = p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action = 'store_true',", "directory to make debian packages. Argument may need to be", "source for copyright+license text and exit') ck.add_argument( '-k', '--kludge', action", "= 'scan source for copyright+license text and exit') ck.add_argument( '-k',", "metavar = 'package-version.tar.gz') sp.add_argument( '-d', '--dist', action = 'store_true', default", "= 'set the fullname', metavar = '\"firstname lastname\"') # p.add_argument(", "default = False, help='quit early before creating files in the", "= args.monoarch # -m para['native'] = args.native # -n para['package']", "copy of this software and associated documentation files (the \"Software\"),", "-m para['native'] = args.native # -n para['package'] = args.package.lower() #", "else: para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra'] = args.extra # -x", "= False, help='pedantically check auto-generated files') p.add_argument( '-T', '--tutorial', action", "para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge # -k ############################################# -l #", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "= p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action = 'store_true', default =", "not debmail: #debmail = os.getlogin() + '@localhost' debemail = pwd.getpwuid(os.getuid())[0]", "= '', help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar", "'[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action = 'store_true', default = False,", "including without limitation the rights to use, copy, modify, merge,", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "package specs as comma separated list of \"binarypackage\":\"type\" pairs, e.g.,", "'-L', '--local', action = 'store_true', default = False, help='generate configuration", "'-z', '--targz', action = 'store', default = '', help =", "set(args.withargs.split(',')) ############################################# para['extra'] = args.extra # -x para['yes'] = min(args.yes,", "'-f', '--fullname', action = 'store', default = debfullname, help =", "lines in template files') args = p.parse_args() ####################################################################### # Set", "file. * It is untared to create many files under", "action = 'store_true', default = False, help='pedantically check auto-generated files')", "'--revision', action = 'store', default = '', help = 'set", "default = False, help='generate configuration files for the local package')", "binary package name; and optional \"type\" is chosen from \"bin\",", "comment lines in template files') args = p.parse_args() ####################################################################### #", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "\"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\" is", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "the package-version/ directory to make debian packages. Argument may need", "p.parse_args() ####################################################################### # Set parameter values ####################################################################### ############################################# -a if", "args.binaryspec # -b para['copyright'] = min(args.copyright, 6) # -c if", "para['kludge'] = args.kludge # -k ############################################# -l # --license: args.license", "'compare debian/copyright with the source and exit') sp = p.add_mutually_exclusive_group()", "para['package'] = args.package.lower() # -p ############################################# para['quitearly'] = args.quitearly #", "args.tar # -t para['version'] = args.upstreamversion # -u para['print_version'] =", "p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default = '', action = 'store',", "build dependencies and identify file paths') p.add_argument( '-l', '--license', default", "not specified but obvious, it is set by \"binarypackage\". Otherwise", "set to \"bin\" for the compiled ELF binary.', metavar =", "specified but obvious, it is set by \"binarypackage\". Otherwise it", "is downloaded as the package-version.tar.gz file. * It is untared", "= args.archive else: para['archive'] = False para['tarball'] = '' #############################################", "= args.native # -n para['package'] = args.package.lower() # -p #############################################", "= 'store_true', default = False, help = 'make a native", "= 'store_true', default = False, help = 'run \"make dist\"", "extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument( '-b', '--binaryspec', action = 'store',", "name', metavar = 'package') p.add_argument( '-u', '--upstreamversion', action = 'store',", "debmail, help = 'set e-mail address', metavar = '<EMAIL>') p.add_argument(", "'store_true', default = False, help='generate configuration files for the local", "\"doc\", \"lib\", \"perl\", \"python\", \"python3\", \"ruby\", and \"script\". If \"type\"", "set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch']", "without limitation the rights to use, copy, modify, merge, publish,", "= False, # help = 'run GUI configuration') # #", "parameters ####################################################################### def para(para): debmail = env('DEBEMAIL') if not debmail:", "# default = False, # help = 'run GUI configuration')", "action = 'store_true', default = False, help = 'force packages", "(usually from its wrapper debuild or pdebuild) is invoked in", "args.kludge # -k ############################################# -l # --license: args.license -> para['license']", "libfoo-dev\". Here, \"binarypackage\" is the binary package name; and optional", "# Initialize parameters ####################################################################### def para(para): debmail = env('DEBEMAIL') if", "line setting ####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\", "action = 'store_true', default = False, help = 'run \"tar\"", "is invoked in the package-version/ directory to make debian packages.", "para['judge'] = args.judge # -j if para['judge']: para['override'].update({'judge'}) para['kludge'] =", "args.package.lower() # -p ############################################# para['quitearly'] = args.quitearly # -q para['revision']", "# -e para['fullname'] = args.fullname # -f # para['gui'] =", "help = 'run \"tar\" to generate upstream tarball and use", "restriction, including without limitation the rights to use, copy, modify,", "the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument( '-b', '--binaryspec',", "-z para['local'] = args.local # -L para['pedantic'] = args.pedantic #", "set() # default is empty set else: para['dh_with'] = set(args.withargs.split(','))", "or pdebuild) is invoked in the package-version/ directory to make", "to permit persons to whom the Software is furnished to", "= 'set the Debian package name', metavar = 'package') p.add_argument(", "(-p, -u, -z: overridden)', metavar = 'package-version.tar.gz') sp.add_argument( '-d', '--dist',", "'\"file\"') p.add_argument( '-q', '--quitearly', action = 'store_true', default = False,", "<reponame>MarioCarrilloA/stx-packaging #!/usr/bin/python3 # vim:se tw=0 sts=4 ts=4 et ai: \"\"\"", "* {0} is invoked in the package-version/ directory possibly without", "action = 'store_true', default = False, help='generate configuration files for", "para['override'].update({'judge'}) para['kludge'] = args.kludge # -k ############################################# -l # --license:", "check auto-generated files') p.add_argument( '-T', '--tutorial', action = 'store_true', default", "para['monoarch'] = args.monoarch # -m para['native'] = args.native # -n", "para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage for more.') ck =", "IN THE SOFTWARE. \"\"\" import argparse import os import pwd", "para['yes'] = min(args.yes, 2) # -y # 0: ask, 1:", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "Debian source package Version: {1} {2} {0} helps to build", "debuild or pdebuild) is invoked in the package-version/ directory to", "'--kludge', action = 'count', default = 0, help = 'compare", "metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action = 'store_true', default", "# -n para['package'] = args.package.lower() # -p ############################################# para['quitearly'] =", "arguments', metavar = 'args') p.add_argument( '-x', '--extra', default = '',", "args.version # -v ############################################# -w # --with: args.withargs -> para['dh_with']", "directory') p.add_argument( '-s', '--spec', action = 'store_true', default = False,", "-a if args.archive: para['archive'] = True para['tarball'] = args.archive else:", "= set() # default is empty set else: para['dh_with'] =", "adjusted. * dpkg-buildpackage (usually from its wrapper debuild or pdebuild)", "= 'package-version.tar.gz') sp.add_argument( '-d', '--dist', action = 'store_true', default =", "Debian package from the upstream source. Normally, this is done", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "-w # --with: args.withargs -> para['dh_with'] as set if args.withargs", "deal in the Software without restriction, including without limitation the", "files for the local package') p.add_argument( '-P', '--pedantic', action =", "min(args.copyright, 6) # -c if para['copyright'] >=4: para['copyright'] = 3", "template files') args = p.parse_args() ####################################################################### # Set parameter values", "dest = 'withargs', help = 'set additional \"dh --with\" option", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "False, # help = 'run GUI configuration') # # -h", "# -t para['version'] = args.upstreamversion # -u para['print_version'] = args.version", "ck = p.add_mutually_exclusive_group() ck.add_argument( '-c', '--copyright', action = 'count', default", "the fullname', metavar = '\"firstname lastname\"') # p.add_argument( # '-g',", "setting ####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\ {0}:", "'set the Debian package name', metavar = 'package') p.add_argument( '-u',", "the package-version/debian/ directory are manually adjusted. * dpkg-buildpackage (usually from", "= 'run GUI configuration') # # -h : used by", "default = '', help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)',", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "para['extra'] = args.extra # -x para['yes'] = min(args.yes, 2) #", "distribute, sublicense, and/or sell copies of the Software, and to", "'show version information') p.add_argument( '-w', '--with', action = 'store', default", "para['email'] = args.email # -e para['fullname'] = args.fullname # -f", "#debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line", "True para['tarball'] = args.archive else: para['archive'] = False para['tarball'] =", "'-p', '--package', action = 'store', default = '', help =", "-> para['license'] as set if args.license == '': para['license'] =", "optional parameters from \"file\"', metavar = '\"file\"') p.add_argument( '-q', '--quitearly',", "#!/usr/bin/python3 # vim:se tw=0 sts=4 ts=4 et ai: \"\"\" Copyright", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "# os.getlogin may not work well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0]", "{0}: make Debian source package Version: {1} {2} {0} helps", "para['local'] = args.local # -L para['pedantic'] = args.pedantic # -P", "\"binarypackage\" is the binary package name; and optional \"type\" is", "'--option', default = '', action = 'store', help = 'read", "untared to create many files under the package-version/ directory. *", "files under the package-version/ directory. * {0} is invoked in", "'run GUI configuration') # # -h : used by argparse", "time import debmake.read ########################################################################### # undefined environment variable -> ''", "'-b', '--binaryspec', action = 'store', default = '', help =", "THE SOFTWARE. \"\"\" import argparse import os import pwd import", "pwd import sys import time import debmake.read ########################################################################### # undefined", "= 'withargs', help = 'set additional \"dh --with\" option arguments',", "= 'run \"dpkg-depcheck\" to judge build dependencies and identify file", "the Software, and to permit persons to whom the Software", "and associated documentation files (the \"Software\"), to deal in the", "ep = p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default = '', action", "need to be quoted to protect from the shell. '''.format(", "False, help = 'force packages to be non-multiarch') p.add_argument( '-o',", "pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname = env('DEBFULLNAME') if not debfullname: #", "{1} {2} {0} helps to build the Debian package from", "the upstream package version', metavar = 'version') p.add_argument( '-r', '--revision',", "return os.environ[var] except KeyError: return '' ####################################################################### # Initialize parameters", "3 - para['copyright'] # 0: debian/copyright, +/-1: simple, +/-2: standard", "# -j if para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge # -k", "invoked in the package-version/ directory to make debian packages. Argument", "'store', default = debmail, help = 'set e-mail address', metavar", "help = 'run GUI configuration') # # -h : used", "package-version/ directory possibly without any arguments. * Files in the", "####################################################################### # Test code ####################################################################### if __name__ == '__main__': for", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "argparse import os import pwd import sys import time import", "as follows: * The upstream tarball is downloaded as the", "to whom the Software is furnished to do so, subject", "from \"bin\", \"data\", \"dbg\", \"dev\", \"doc\", \"lib\", \"perl\", \"python\", \"python3\",", "action = 'store_true', default = False, help = 'run \"dpkg-depcheck\"", "args.dist # -d para['email'] = args.email # -e para['fullname'] =", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "default = '', action = 'store', help = 'invoke package", "may need to be quoted to protect from the shell.", "= p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default = '', action =", "help = 'set the upstream package version', metavar = 'version')", "args.fullname # -f # para['gui'] = args.gui # -g para['invoke']", "'--email', action = 'store', default = debmail, help = 'set", "default is empty set else: para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra']", "creating files in the debian directory') p.add_argument( '-s', '--spec', action", "-l # --license: args.license -> para['license'] as set if args.license", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "to generate upstream tarball and use it') p.add_argument( '-p', '--package',", "= 'set the Debian package revision', metavar = 'revision') p.add_argument(", "= 'args') p.add_argument( '-x', '--extra', default = '', action =", "debmail: #debmail = os.getlogin() + '@localhost' debemail = pwd.getpwuid(os.getuid())[0] +", "= 'store', help = 'add formatted license to debian/copyright', metavar", "'[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch'] =", "help = 'compare debian/copyright with the source and exit') sp", "debfullname, help = 'set the fullname', metavar = '\"firstname lastname\"')", "exec(debmake.read.read(args.option)) ####################################################################### # return command line parameters ####################################################################### return para", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "'-P', '--pedantic', action = 'store_true', default = False, help='pedantically check", "# action = 'store_true', # default = False, # help", "build the Debian package from the upstream source. Normally, this", "False, help = 'run \"tar\" to generate upstream tarball and", "action = 'store', help = 'read optional parameters from \"file\"',", "# Test code ####################################################################### if __name__ == '__main__': for p,", "'store_true', default = False, help = 'make a native source", "-u para['print_version'] = args.version # -v ############################################# -w # --with:", "arguments. * Files in the package-version/debian/ directory are manually adjusted.", "####################################################################### return para ####################################################################### # Test code ####################################################################### if __name__", "to protect from the shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See", "= 'store_true', default = False, help='pedantically check auto-generated files') p.add_argument(", "para['dh_with'] = set() # default is empty set else: para['dh_with']", "in the debian directory') p.add_argument( '-s', '--spec', action = 'store_true',", "upstream package version', metavar = 'version') p.add_argument( '-r', '--revision', action", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "'--quitearly', action = 'store_true', default = False, help='quit early before", "'-o', '--option', default = '', action = 'store', help =", "to make debian packages. Argument may need to be quoted", "compiled ELF binary.', metavar = 'binarypackage[:type]') p.add_argument( '-e', '--email', action", "action = 'store', default = debmail, help = 'set e-mail", "action = 'count', default = 0, help = '\"force yes\"", "== '': para['dh_with'] = set() # default is empty set", "False, help='generate configuration files for the local package') p.add_argument( '-P',", "args.withargs -> para['dh_with'] as set if args.withargs == '': para['dh_with']", "para['binaryspec'] = args.binaryspec # -b para['copyright'] = min(args.copyright, 6) #", "all copies or substantial portions of the Software. THE SOFTWARE", "tarball and use it') p.add_argument( '-p', '--package', action = 'store',", "= 'use upstream spec') p.add_argument( '-v', '--version', action = 'store_true',", "= 'generate extra configuration files as templates', metavar = '[01234]')", "the upstream source tarball directly (-p, -u, -z: overridden)', metavar", "'': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license'] =", "= args.pedantic # -P para['tutorial'] = args.tutorial # -T #############################################", "help = 'invoke package build tool', metavar = '[debuild|pdebuild|...]') ep.add_argument(", "(the \"Software\"), to deal in the Software without restriction, including", "is untared to create many files under the package-version/ directory.", "env(var): try: return os.environ[var] except KeyError: return '' ####################################################################### #", "'', help = 'use the upstream source tarball directly (-p,", "= False, help='generate configuration files for the local package') p.add_argument(", "package version', metavar = 'version') p.add_argument( '-r', '--revision', action =", "merge, publish, distribute, sublicense, and/or sell copies of the Software,", "its wrapper debuild or pdebuild) is invoked in the package-version/", "debian packages. Argument may need to be quoted to protect", "\"lib\", \"perl\", \"python\", \"python3\", \"ruby\", and \"script\". If \"type\" is", "'\"license_file\"') p.add_argument( '-m', '--monoarch', action = 'store_true', default = False,", "yes\" for all prompts') p.add_argument( '-L', '--local', action = 'store_true',", "so, subject to the following conditions: The above copyright notice", "upstream tarball and use it') sp.add_argument( '-t', '--tar', action =", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "# -r para['spec'] = args.spec # -s para['tar'] = args.tar", "'set additional \"dh --with\" option arguments', metavar = 'args') p.add_argument(", "charge, to any person obtaining a copy of this software", "to do so, subject to the following conditions: The above", "args.upstreamversion # -u para['print_version'] = args.version # -v ############################################# -w", "if not debmail: #debmail = os.getlogin() + '@localhost' debemail =", "if args.archive: para['archive'] = True para['tarball'] = args.archive else: para['archive']", "sp.add_argument( '-t', '--tar', action = 'store_true', default = False, help", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "to create many files under the package-version/ directory. * {0}", "= 'compare debian/copyright with the source and exit') sp =", "'--tar', action = 'store_true', default = False, help = 'run", "following conditions: The above copyright notice and this permission notice", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "\"binarypackage\". Otherwise it is set to \"bin\" for the compiled", "'--binaryspec', action = 'store', default = '', help = 'set", "\"bin\" for the compiled ELF binary.', metavar = 'binarypackage[:type]') p.add_argument(", "configuration files as templates', metavar = '[01234]') p.add_argument( '-y', '--yes',", "it') sp.add_argument( '-t', '--tar', action = 'store_true', default = False,", "-j if para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge # -k #############################################", "args.spec # -s para['tar'] = args.tar # -t para['version'] =", "# default is empty set else: para['dh_with'] = set(args.withargs.split(',')) #############################################", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "args.withargs == '': para['dh_with'] = set() # default is empty", "directly (-p, -u, -z: overridden)', metavar = 'package-version.tar.gz') sp.add_argument( '-d',", "and use it') sp.add_argument( '-t', '--tar', action = 'store_true', default", "metavar = '\"file\"') p.add_argument( '-q', '--quitearly', action = 'store_true', default", "in the Software without restriction, including without limitation the rights", "pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line setting ####################################################################### p = argparse.ArgumentParser(", "without .orig.tar.gz') sp.add_argument( '-a', '--archive', type = str, action =", "permission notice shall be included in all copies or substantial", "but obvious, it is set by \"binarypackage\". Otherwise it is", "p.add_argument( '-s', '--spec', action = 'store_true', default = False, help", "os.getlogin may not work well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname", "set if args.withargs == '': para['dh_with'] = set() # default", "#debmail = os.getlogin() + '@localhost' debemail = pwd.getpwuid(os.getuid())[0] + '@localhost'", "sys import time import debmake.read ########################################################################### # undefined environment variable", "argparse for --help ep = p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default", "# -q para['revision'] = args.revision # -r para['spec'] = args.spec", "'version') p.add_argument( '-r', '--revision', action = 'store', default = '',", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "ELF binary.', metavar = 'binarypackage[:type]') p.add_argument( '-e', '--email', action =", "'store', help = 'invoke package build tool', metavar = '[debuild|pdebuild|...]')", "-p ############################################# para['quitearly'] = args.quitearly # -q para['revision'] = args.revision", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "help = 'set additional \"dh --with\" option arguments', metavar =", "'--yes', action = 'count', default = 0, help = '\"force", "pairs, e.g., in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short form", "p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action = 'store_true', default = False,", "upstream source. Normally, this is done as follows: * The", "\"\"\" Copyright © 2014 <NAME> Permission is hereby granted, free", "'set binary package specs as comma separated list of \"binarypackage\":\"type\"", "args = p.parse_args() ####################################################################### # Set parameter values ####################################################################### #############################################", "'args') p.add_argument( '-x', '--extra', default = '', action = 'store',", "action = 'store', default = '', help = 'use the", ": used by argparse for --help ep = p.add_mutually_exclusive_group() ep.add_argument(", "from the upstream source. Normally, this is done as follows:", "if args.license == '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default", "as the package-version.tar.gz file. * It is untared to create", "overridden)', metavar = 'package-version.tar.gz') sp.add_argument( '-d', '--dist', action = 'store_true',", "help = 'set the Debian package name', metavar = 'package')", "p.add_argument( '-e', '--email', action = 'store', default = debmail, help", "action = 'store', default = debfullname, help = 'set the", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "'store_true', default = False, help = 'run \"dpkg-depcheck\" to judge", "'store', default = '', dest = 'withargs', help = 'set", "# -k ############################################# -l # --license: args.license -> para['license'] as", "para['native'] = args.native # -n para['package'] = args.package.lower() # -p", "para['pedantic'] = args.pedantic # -P para['tutorial'] = args.tutorial # -T", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "para['version'] = args.upstreamversion # -u para['print_version'] = args.version # -v", "# -T ############################################# -o if args.option: exec(debmake.read.read(args.option)) ####################################################################### # return", "epilog='See debmake(1) manpage for more.') ck = p.add_mutually_exclusive_group() ck.add_argument( '-c',", "\"tar\" to generate upstream tarball and use it') p.add_argument( '-p',", "= '', help = 'set the Debian package revision', metavar", "copyright notice and this permission notice shall be included in", "= 'store_true', default = False, help = 'show version information')", "help='pedantically check auto-generated files') p.add_argument( '-T', '--tutorial', action = 'store_true',", "= args.judge # -j if para['judge']: para['override'].update({'judge'}) para['kludge'] = args.kludge", "'-s', '--spec', action = 'store_true', default = False, help =", "-f # para['gui'] = args.gui # -g para['invoke'] = args.invoke", "= '', help = 'set the upstream package version', metavar", "action = 'store', default = '', help = 'set the", "'@localhost' debemail = pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname = env('DEBFULLNAME') if", "\"type\" is not specified but obvious, it is set by", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "'--package', action = 'store', default = '', help = 'set", "tutorial comment lines in template files') args = p.parse_args() #######################################################################", "'' ############################################# para['binaryspec'] = args.binaryspec # -b para['copyright'] = min(args.copyright,", "optional \"type\" is chosen from \"bin\", \"data\", \"dbg\", \"dev\", \"doc\",", "= 'show version information') p.add_argument( '-w', '--with', action = 'store',", "p.add_argument( '-z', '--targz', action = 'store', default = '', help", "= '', action = 'store', help = 'generate extra configuration", "and to permit persons to whom the Software is furnished", "= 'store_true', default = False, help='output tutorial comment lines in", "para['quitearly'] = args.quitearly # -q para['revision'] = args.revision # -r", "para['fullname'] = args.fullname # -f # para['gui'] = args.gui #", "copies or substantial portions of the Software. THE SOFTWARE IS", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "default = False, help = 'show version information') p.add_argument( '-w',", "# para['gui'] = args.gui # -g para['invoke'] = args.invoke #", "= False para['tarball'] = '' ############################################# para['binaryspec'] = args.binaryspec #", "'store_true', default = False, help = 'use upstream spec') p.add_argument(", "files') args = p.parse_args() ####################################################################### # Set parameter values #######################################################################", "-s para['tar'] = args.tar # -t para['version'] = args.upstreamversion #", "para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage for more.') ck = p.add_mutually_exclusive_group()", "'', help = 'set the Debian package name', metavar =", "Normally, this is done as follows: * The upstream tarball", "\"data\", \"dbg\", \"dev\", \"doc\", \"lib\", \"perl\", \"python\", \"python3\", \"ruby\", and", "p.add_argument( '-m', '--monoarch', action = 'store_true', default = False, help", "############################################# -w # --with: args.withargs -> para['dh_with'] as set if", "= args.tutorial # -T ############################################# -o if args.option: exec(debmake.read.read(args.option)) #######################################################################", "as set if args.withargs == '': para['dh_with'] = set() #", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "# '-g', # '--gui', # action = 'store_true', # default", "and exit') sp = p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action =", "'--version', action = 'store_true', default = False, help = 'show", "-u, -z: overridden)', metavar = 'package-version.tar.gz') sp.add_argument( '-d', '--dist', action", "# -i para['judge'] = args.judge # -j if para['judge']: para['override'].update({'judge'})", "p.add_argument( '-L', '--local', action = 'store_true', default = False, help='generate", "para['tarball'] = '' ############################################# para['binaryspec'] = args.binaryspec # -b para['copyright']", "+/-2: standard +/-3: extensive para['dist'] = args.dist # -d para['email']", "'', help = 'set the Debian package revision', metavar =", "is empty set else: para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra'] =", "the source and exit') sp = p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native',", "= 'run \"make dist\" equivalent first to generate upstream tarball", "'''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage for more.') ck", "p.add_argument( '-x', '--extra', default = '', action = 'store', help", "use it') p.add_argument( '-p', '--package', action = 'store', default =", "in the package-version/ directory possibly without any arguments. * Files", "'store_true', default = False, help='output tutorial comment lines in template", "1: yes, 2: no para['targz'] = args.targz # -z para['local']", "* dpkg-buildpackage (usually from its wrapper debuild or pdebuild) is", "package revision', metavar = 'revision') p.add_argument( '-z', '--targz', action =", "whom the Software is furnished to do so, subject to", "__name__ == '__main__': for p, v in para().items(): print(\"para['{}'] =", "<NAME> Permission is hereby granted, free of charge, to any", "variable -> '' def env(var): try: return os.environ[var] except KeyError:", "Files in the package-version/debian/ directory are manually adjusted. * dpkg-buildpackage", "build tool', metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action =", "help = 'show version information') p.add_argument( '-w', '--with', action =", "Initialize parameters ####################################################################### def para(para): debmail = env('DEBEMAIL') if not", "= 'store', help = 'generate extra configuration files as templates',", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "# Set parameter values ####################################################################### ############################################# -a if args.archive: para['archive']", "\"python\", \"python3\", \"ruby\", and \"script\". If \"type\" is not specified", "os import pwd import sys import time import debmake.read ###########################################################################", "to generate upstream tarball and use it') sp.add_argument( '-t', '--tar',", "in all copies or substantial portions of the Software. THE", "obtaining a copy of this software and associated documentation files", "############################################# -o if args.option: exec(debmake.read.read(args.option)) ####################################################################### # return command line", "help = 'run \"make dist\" equivalent first to generate upstream", "this is done as follows: * The upstream tarball is", "'-q', '--quitearly', action = 'store_true', default = False, help='quit early", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "0, help = 'compare debian/copyright with the source and exit')", "the binary package name; and optional \"type\" is chosen from", "= False, help = 'force packages to be non-multiarch') p.add_argument(", "metavar = '\"firstname lastname\"') # p.add_argument( # '-g', # '--gui',", "def para(para): debmail = env('DEBEMAIL') if not debmail: #debmail =", "Copyright © 2014 <NAME> Permission is hereby granted, free of", "metavar = 'args') p.add_argument( '-x', '--extra', default = '', action", "####################################################################### # return command line parameters ####################################################################### return para #######################################################################", "'-l', '--license', default = '', action = 'store', help =", "may not work well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname =", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "a copy of this software and associated documentation files (the", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "args.email # -e para['fullname'] = args.fullname # -f # para['gui']", "= '', help = 'use the upstream source tarball directly", "= os.getlogin() + '@localhost' debemail = pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname", "the package-version.tar.gz file. * It is untared to create many", "--help ep = p.add_mutually_exclusive_group() ep.add_argument( '-i', '--invoke', default = '',", "configuration files for the local package') p.add_argument( '-P', '--pedantic', action", "is done as follows: * The upstream tarball is downloaded", "sublicense, and/or sell copies of the Software, and to permit", "source and exit') sp = p.add_mutually_exclusive_group() sp.add_argument( '-n', '--native', action", "action = 'count', default = 0, help = 'compare debian/copyright", "and \"script\". If \"type\" is not specified but obvious, it", "files') p.add_argument( '-T', '--tutorial', action = 'store_true', default = False,", "############################################# para['monoarch'] = args.monoarch # -m para['native'] = args.native #", "= args.extra # -x para['yes'] = min(args.yes, 2) # -y", "and exit') ck.add_argument( '-k', '--kludge', action = 'count', default =", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "KeyError: return '' ####################################################################### # Initialize parameters ####################################################################### def para(para):", "short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\" is the binary package", "'-j', '--judge', action = 'store_true', default = False, help =", "'add formatted license to debian/copyright', metavar = '\"license_file\"') p.add_argument( '-m',", "'store_true', # default = False, # help = 'run GUI", "helps to build the Debian package from the upstream source.", "'store_true', default = False, help = 'run \"tar\" to generate", "str, action = 'store', default = '', help = 'use", "'--invoke', default = '', action = 'store', help = 'invoke", "-q para['revision'] = args.revision # -r para['spec'] = args.spec #", "# -f # para['gui'] = args.gui # -g para['invoke'] =", "tarball directly (-p, -u, -z: overridden)', metavar = 'package-version.tar.gz') sp.add_argument(", "well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### #", "set by \"binarypackage\". Otherwise it is set to \"bin\" for", "additional \"dh --with\" option arguments', metavar = 'args') p.add_argument( '-x',", "tarball is downloaded as the package-version.tar.gz file. * It is", "package build tool', metavar = '[debuild|pdebuild|...]') ep.add_argument( '-j', '--judge', action", "native source package without .orig.tar.gz') sp.add_argument( '-a', '--archive', type =", "\"ruby\", and \"script\". If \"type\" is not specified but obvious,", "para['targz'] = args.targz # -z para['local'] = args.local # -L", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "'read optional parameters from \"file\"', metavar = '\"file\"') p.add_argument( '-q',", "ep.add_argument( '-i', '--invoke', default = '', action = 'store', help", "= set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license'] = set(args.copyright.split(',')) #############################################", "help = 'scan source for copyright+license text and exit') ck.add_argument(", "many files under the package-version/ directory. * {0} is invoked", "= 'store', default = '', help = 'set binary package", "= 'set binary package specs as comma separated list of", "= 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)', metavar = 'extension') p.add_argument(", "'', dest = 'withargs', help = 'set additional \"dh --with\"", "action = 'store_true', default = False, help = 'run \"make", "'', action = 'store', help = 'generate extra configuration files", "p.add_argument( '-l', '--license', default = '', action = 'store', help", "directory are manually adjusted. * dpkg-buildpackage (usually from its wrapper", "GUI configuration') # # -h : used by argparse for", "para(para): debmail = env('DEBEMAIL') if not debmail: #debmail = os.getlogin()", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "p.add_argument( '-q', '--quitearly', action = 'store_true', default = False, help='quit", "para['tutorial'] = args.tutorial # -T ############################################# -o if args.option: exec(debmake.read.read(args.option))", "p.add_argument( '-v', '--version', action = 'store_true', default = False, help", "this permission notice shall be included in all copies or", "# -y # 0: ask, 1: yes, 2: no para['targz']", "'--fullname', action = 'store', default = debfullname, help = 'set", "\"type\" is chosen from \"bin\", \"data\", \"dbg\", \"dev\", \"doc\", \"lib\",", "no para['targz'] = args.targz # -z para['local'] = args.local #", "# return command line parameters ####################################################################### return para ####################################################################### #", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "'\"firstname lastname\"') # p.add_argument( # '-g', # '--gui', # action", "\"python3\", \"ruby\", and \"script\". If \"type\" is not specified but", "'store', help = 'read optional parameters from \"file\"', metavar =", "# -u para['print_version'] = args.version # -v ############################################# -w #", "above copyright notice and this permission notice shall be included", "= 'use the upstream source tarball directly (-p, -u, -z:", "= False, help='output tutorial comment lines in template files') args", "ts=4 et ai: \"\"\" Copyright © 2014 <NAME> Permission is", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "the Debian package from the upstream source. Normally, this is", "action = 'store_true', default = False, help = 'show version", "done as follows: * The upstream tarball is downloaded as", "'store', default = '', help = 'set the tarball type,", "\"dev\", \"doc\", \"lib\", \"perl\", \"python\", \"python3\", \"ruby\", and \"script\". If", "for the local package') p.add_argument( '-P', '--pedantic', action = 'store_true',", "############################################# -a if args.archive: para['archive'] = True para['tarball'] = args.archive", "-d para['email'] = args.email # -e para['fullname'] = args.fullname #", "import pwd import sys import time import debmake.read ########################################################################### #", "if args.withargs == '': para['dh_with'] = set() # default is", "# -L para['pedantic'] = args.pedantic # -P para['tutorial'] = args.tutorial", "os.getlogin() + '@localhost' debemail = pwd.getpwuid(os.getuid())[0] + '@localhost' debfullname =", "empty set else: para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra'] = args.extra", "'', help = 'set the upstream package version', metavar =", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "'store', help = 'generate extra configuration files as templates', metavar", "code ####################################################################### if __name__ == '__main__': for p, v in", "p.add_argument( '-b', '--binaryspec', action = 'store', default = '', help", "form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\"", "metavar = '\"license_file\"') p.add_argument( '-m', '--monoarch', action = 'store_true', default", "non-multiarch') p.add_argument( '-o', '--option', default = '', action = 'store',", "form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\" is the binary package name;", "option arguments', metavar = 'args') p.add_argument( '-x', '--extra', default =", "values ####################################################################### ############################################# -a if args.archive: para['archive'] = True para['tarball']", "type = str, action = 'store', default = '', help", "\"script\". If \"type\" is not specified but obvious, it is", "formatted license to debian/copyright', metavar = '\"license_file\"') p.add_argument( '-m', '--monoarch',", "package-version.tar.gz file. * It is untared to create many files", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "without any arguments. * Files in the package-version/debian/ directory are", "= 'force packages to be non-multiarch') p.add_argument( '-o', '--option', default", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "is furnished to do so, subject to the following conditions:", "default = '', action = 'store', help = 'generate extra", "= 'count', default = 0, help = '\"force yes\" for", "if not debfullname: # os.getlogin may not work well: #769392", "'', action = 'store', help = 'add formatted license to", "'--pedantic', action = 'store_true', default = False, help='pedantically check auto-generated", "ai: \"\"\" Copyright © 2014 <NAME> Permission is hereby granted,", "debfullname = env('DEBFULLNAME') if not debfullname: # os.getlogin may not", "####################################################################### if __name__ == '__main__': for p, v in para().items():", "####################################################################### # command line setting ####################################################################### p = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter,", "-P para['tutorial'] = args.tutorial # -T ############################################# -o if args.option:", "parameters from \"file\"', metavar = '\"file\"') p.add_argument( '-q', '--quitearly', action", "= False, help = 'run \"make dist\" equivalent first to", "action = 'store_true', # default = False, # help =", "to any person obtaining a copy of this software and", "debfullname: # os.getlogin may not work well: #769392 #debfullname =", "= 'store_true', default = False, help = 'run \"tar\" to", "= 'add formatted license to debian/copyright', metavar = '\"license_file\"') p.add_argument(", "-r para['spec'] = args.spec # -s para['tar'] = args.tar #", "'count', default = 0, help = 'scan source for copyright+license", "set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch # -m para['native'] = args.native", "'force packages to be non-multiarch') p.add_argument( '-o', '--option', default =", "para['archive'] = False para['tarball'] = '' ############################################# para['binaryspec'] = args.binaryspec", "work well: #769392 #debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] #######################################################################", "version', metavar = 'version') p.add_argument( '-r', '--revision', action = 'store',", "'--license', default = '', action = 'store', help = 'add", "shall be included in all copies or substantial portions of", "person obtaining a copy of this software and associated documentation", "# --with: args.withargs -> para['dh_with'] as set if args.withargs ==", "et ai: \"\"\" Copyright © 2014 <NAME> Permission is hereby", "# -x para['yes'] = min(args.yes, 2) # -y # 0:", "environment variable -> '' def env(var): try: return os.environ[var] except", "'--monoarch', action = 'store_true', default = False, help = 'force", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "False, help = 'run \"make dist\" equivalent first to generate", "para['copyright'] = min(args.copyright, 6) # -c if para['copyright'] >=4: para['copyright']", "args.local # -L para['pedantic'] = args.pedantic # -P para['tutorial'] =", "\"make dist\" equivalent first to generate upstream tarball and use", "license to debian/copyright', metavar = '\"license_file\"') p.add_argument( '-m', '--monoarch', action", "pwd.getpwnam(os.getlogin())[4].split(',')[0] debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0] ####################################################################### # command line setting #######################################################################", "-> para['dh_with'] as set if args.withargs == '': para['dh_with'] =", "-z: overridden)', metavar = 'package-version.tar.gz') sp.add_argument( '-d', '--dist', action =", "and this permission notice shall be included in all copies", "else: para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch # -m", "= 'store_true', default = False, help = 'force packages to", "# # -h : used by argparse for --help ep", "+ '@localhost' debfullname = env('DEBFULLNAME') if not debfullname: # os.getlogin", "= 'set the upstream package version', metavar = 'version') p.add_argument(", "debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive para['dist'] = args.dist", "= args.quitearly # -q para['revision'] = args.revision # -r para['spec']", "if para['copyright'] >=4: para['copyright'] = 3 - para['copyright'] # 0:", "'withargs', help = 'set additional \"dh --with\" option arguments', metavar", "as templates', metavar = '[01234]') p.add_argument( '-y', '--yes', action =", "'\"force yes\" for all prompts') p.add_argument( '-L', '--local', action =", "############################################# para['extra'] = args.extra # -x para['yes'] = min(args.yes, 2)", "© 2014 <NAME> Permission is hereby granted, free of charge,", "= 'store_true', default = False, help='quit early before creating files", "files in the debian directory') p.add_argument( '-s', '--spec', action =", "simple, +/-2: standard +/-3: extensive para['dist'] = args.dist # -d", "help = 'force packages to be non-multiarch') p.add_argument( '-o', '--option',", "'', action = 'store', help = 'read optional parameters from", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "help = 'add formatted license to debian/copyright', metavar = '\"license_file\"')", "version information') p.add_argument( '-w', '--with', action = 'store', default =", "= args.fullname # -f # para['gui'] = args.gui # -g", "upstream tarball and use it') p.add_argument( '-p', '--package', action =", "in the package-version/ directory to make debian packages. Argument may", "'-m', '--monoarch', action = 'store_true', default = False, help =", "para['copyright'] = 3 - para['copyright'] # 0: debian/copyright, +/-1: simple,", "'count', default = 0, help = '\"force yes\" for all", "args.quitearly # -q para['revision'] = args.revision # -r para['spec'] =", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "# -h : used by argparse for --help ep =", "source package Version: {1} {2} {0} helps to build the", "{0} is invoked in the package-version/ directory possibly without any", "'--native', action = 'store_true', default = False, help = 'make", "# -b para['copyright'] = min(args.copyright, 6) # -c if para['copyright']", "free of charge, to any person obtaining a copy of", "p.add_argument( '-o', '--option', default = '', action = 'store', help", "default else: para['license'] = set(args.copyright.split(',')) ############################################# para['monoarch'] = args.monoarch #", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "args.native # -n para['package'] = args.package.lower() # -p ############################################# para['quitearly']", "the Debian package name', metavar = 'package') p.add_argument( '-u', '--upstreamversion',", "judge build dependencies and identify file paths') p.add_argument( '-l', '--license',", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "are manually adjusted. * dpkg-buildpackage (usually from its wrapper debuild", "= False, help = 'show version information') p.add_argument( '-w', '--with',", "p.add_argument( '-p', '--package', action = 'store', default = '', help", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "the Software is furnished to do so, subject to the", "Software, and to permit persons to whom the Software is", "if args.option: exec(debmake.read.read(args.option)) ####################################################################### # return command line parameters #######################################################################", "== '__main__': for p, v in para().items(): print(\"para['{}'] = \\\"{}\\\"\".format(p,v))", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "It is untared to create many files under the package-version/", "the local package') p.add_argument( '-P', '--pedantic', action = 'store_true', default", "# p.add_argument( # '-g', # '--gui', # action = 'store_true',", "\"dbg\", \"dev\", \"doc\", \"lib\", \"perl\", \"python\", \"python3\", \"ruby\", and \"script\".", "upstream tarball is downloaded as the package-version.tar.gz file. * It", "'store_true', default = False, help = 'show version information') p.add_argument(", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "if __name__ == '__main__': for p, v in para().items(): print(\"para['{}']", "'-e', '--email', action = 'store', default = debmail, help =", "= min(args.copyright, 6) # -c if para['copyright'] >=4: para['copyright'] =", "from the shell. '''.format( para['program_name'], para['program_version'], para['program_copyright']), epilog='See debmake(1) manpage", "early before creating files in the debian directory') p.add_argument( '-s',", "= '', action = 'store', help = 'add formatted license", "# undefined environment variable -> '' def env(var): try: return", "Argument may need to be quoted to protect from the", "'run \"tar\" to generate upstream tarball and use it') p.add_argument(", "metavar = '<EMAIL>') p.add_argument( '-f', '--fullname', action = 'store', default", "documentation files (the \"Software\"), to deal in the Software without", "= 0, help = '\"force yes\" for all prompts') p.add_argument(", "para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else: para['license'] = set(args.copyright.split(','))", "in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\".", "-k ############################################# -l # --license: args.license -> para['license'] as set", "package name', metavar = 'package') p.add_argument( '-u', '--upstreamversion', action =", "= 'extension') p.add_argument( '-b', '--binaryspec', action = 'store', default =", "default = '', action = 'store', help = 'add formatted", "without restriction, including without limitation the rights to use, copy,", "undefined environment variable -> '' def env(var): try: return os.environ[var]", "= 0, help = 'scan source for copyright+license text and", "p.add_argument( # '-g', # '--gui', # action = 'store_true', #", "= False, help = 'run \"dpkg-depcheck\" to judge build dependencies", "package-version/ directory to make debian packages. Argument may need to", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "= 'store', default = '', help = 'set the upstream", "package without .orig.tar.gz') sp.add_argument( '-a', '--archive', type = str, action", "'-c', '--copyright', action = 'count', default = 0, help =", "action = 'store', help = 'add formatted license to debian/copyright',", "under the package-version/ directory. * {0} is invoked in the", "0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive para['dist'] =", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "specs as comma separated list of \"binarypackage\":\"type\" pairs, e.g., in", "help = 'run \"dpkg-depcheck\" to judge build dependencies and identify", "args.targz # -z para['local'] = args.local # -L para['pedantic'] =", "para['program_copyright']), epilog='See debmake(1) manpage for more.') ck = p.add_mutually_exclusive_group() ck.add_argument(", "return para ####################################################################### # Test code ####################################################################### if __name__ ==", "to build the Debian package from the upstream source. Normally,", "import time import debmake.read ########################################################################### # undefined environment variable ->", "tw=0 sts=4 ts=4 et ai: \"\"\" Copyright © 2014 <NAME>", "action = 'store_true', default = False, help='output tutorial comment lines", "= 'store', default = '', help = 'set the tarball", "os.environ[var] except KeyError: return '' ####################################################################### # Initialize parameters #######################################################################", "'package-version.tar.gz') sp.add_argument( '-d', '--dist', action = 'store_true', default = False,", "-c if para['copyright'] >=4: para['copyright'] = 3 - para['copyright'] #", "= 'count', default = 0, help = 'compare debian/copyright with", "'set the upstream package version', metavar = 'version') p.add_argument( '-r',", "Debian package revision', metavar = 'revision') p.add_argument( '-z', '--targz', action", "for copyright+license text and exit') ck.add_argument( '-k', '--kludge', action =", "vim:se tw=0 sts=4 ts=4 et ai: \"\"\" Copyright © 2014", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "= 'binarypackage[:type]') p.add_argument( '-e', '--email', action = 'store', default =", "= 'store', default = '', help = 'set the Debian", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "exit') ck.add_argument( '-k', '--kludge', action = 'count', default = 0,", "# help = 'run GUI configuration') # # -h :", "help = 'use upstream spec') p.add_argument( '-v', '--version', action =", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "para['print_version'] = args.version # -v ############################################# -w # --with: args.withargs", "args.license == '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default else:", "as comma separated list of \"binarypackage\":\"type\" pairs, e.g., in full", "granted, free of charge, to any person obtaining a copy", "used by argparse for --help ep = p.add_mutually_exclusive_group() ep.add_argument( '-i',", "help = 'read optional parameters from \"file\"', metavar = '\"file\"')", "para['copyright'] # 0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import argparse import", "* It is untared to create many files under the", "####################################################################### ############################################# -a if args.archive: para['archive'] = True para['tarball'] =", "help = 'make a native source package without .orig.tar.gz') sp.add_argument(", "extra configuration files as templates', metavar = '[01234]') p.add_argument( '-y',", "= '\"firstname lastname\"') # p.add_argument( # '-g', # '--gui', #", "help = '\"force yes\" for all prompts') p.add_argument( '-L', '--local',", "ck.add_argument( '-k', '--kludge', action = 'count', default = 0, help", "of charge, to any person obtaining a copy of this", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "'--gui', # action = 'store_true', # default = False, #", "# -p ############################################# para['quitearly'] = args.quitearly # -q para['revision'] =", "False, help='quit early before creating files in the debian directory')", "'revision') p.add_argument( '-z', '--targz', action = 'store', default = '',", "# -g para['invoke'] = args.invoke # -i para['judge'] = args.judge", "Permission is hereby granted, free of charge, to any person", "p.add_argument( '-u', '--upstreamversion', action = 'store', default = '', help", "'store_true', default = False, help='quit early before creating files in", "= 'store', help = 'read optional parameters from \"file\"', metavar", "set else: para['dh_with'] = set(args.withargs.split(',')) ############################################# para['extra'] = args.extra #", "package-version/debian/ directory are manually adjusted. * dpkg-buildpackage (usually from its", "\"perl\", \"python\", \"python3\", \"ruby\", and \"script\". If \"type\" is not", "args.tutorial # -T ############################################# -o if args.option: exec(debmake.read.read(args.option)) ####################################################################### #", "'-T', '--tutorial', action = 'store_true', default = False, help='output tutorial", "-v ############################################# -w # --with: args.withargs -> para['dh_with'] as set", "env('DEBEMAIL') if not debmail: #debmail = os.getlogin() + '@localhost' debemail", "# -z para['local'] = args.local # -L para['pedantic'] = args.pedantic", "The above copyright notice and this permission notice shall be", "= 'set e-mail address', metavar = '<EMAIL>') p.add_argument( '-f', '--fullname',", "information') p.add_argument( '-w', '--with', action = 'store', default = '',", "-y # 0: ask, 1: yes, 2: no para['targz'] =", "para['tarball'] = args.archive else: para['archive'] = False para['tarball'] = ''", "by \"binarypackage\". Otherwise it is set to \"bin\" for the", "help='generate configuration files for the local package') p.add_argument( '-P', '--pedantic',", "file paths') p.add_argument( '-l', '--license', default = '', action =", "para ####################################################################### # Test code ####################################################################### if __name__ == '__main__':", "as set if args.license == '': para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*', '[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'})", "argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description = '''\\ {0}: make Debian source package", "args.monoarch # -m para['native'] = args.native # -n para['package'] =", "spec') p.add_argument( '-v', '--version', action = 'store_true', default = False,", "prompts') p.add_argument( '-L', '--local', action = 'store_true', default = False,", "ep.add_argument( '-j', '--judge', action = 'store_true', default = False, help", "help = 'set e-mail address', metavar = '<EMAIL>') p.add_argument( '-f',", "DEALINGS IN THE SOFTWARE. \"\"\" import argparse import os import", "metavar = '[01234]') p.add_argument( '-y', '--yes', action = 'count', default", "= '\"license_file\"') p.add_argument( '-m', '--monoarch', action = 'store_true', default =", "'use upstream spec') p.add_argument( '-v', '--version', action = 'store_true', default", "--license: args.license -> para['license'] as set if args.license == '':", "'' def env(var): try: return os.environ[var] except KeyError: return ''", "env('DEBFULLNAME') if not debfullname: # os.getlogin may not work well:", "package-version/ directory. * {0} is invoked in the package-version/ directory", "e.g., in full form \"foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev\" or in short form \",-doc,libfoo1,libfoo1-dbg,", "or in short form \",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\" is the", "'store', default = '', help = 'set binary package specs", "\",-doc,libfoo1,libfoo1-dbg, libfoo-dev\". Here, \"binarypackage\" is the binary package name; and", "associated documentation files (the \"Software\"), to deal in the Software", "'store_true', default = False, help = 'force packages to be", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "= debmail, help = 'set e-mail address', metavar = '<EMAIL>')", "p.add_argument( '-T', '--tutorial', action = 'store_true', default = False, help='output", "p.add_argument( '-r', '--revision', action = 'store', default = '', help", "in template files') args = p.parse_args() ####################################################################### # Set parameter", "'--local', action = 'store_true', default = False, help='generate configuration files", "args.invoke # -i para['judge'] = args.judge # -j if para['judge']:", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "templates', metavar = '[01234]') p.add_argument( '-y', '--yes', action = 'count',", "= 'store_true', default = False, help = 'run \"dpkg-depcheck\" to", "+/-1: simple, +/-2: standard +/-3: extensive para['dist'] = args.dist #", "'', help = 'set binary package specs as comma separated", "is the binary package name; and optional \"type\" is chosen", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions,", "y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset, y_func_serializer=y_serializer )", "build_queryset pagination_class = CustomPageNumberPagination filter_class = build_filter_class authentication_classes = ()", "ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, { 'x_column':", "CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory", "= self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data) def", "should be used for validating and deserializing input, and for", "the serializer instance that should be used for validating and", "for validating and deserializing input, and for serializing output. \"\"\"", "rest_framework.response import Response from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import", "request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column',", "__init__(self, *args, **kwargs): if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer')", "= request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') x_field", "in build_actions: def route(self, request): form = action(data=request.data) if not", "serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request): i =", "def reorder(self, request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data)", "self.action == 'group': return GroupSerializer elif self.action == 'retrieve': return", "= self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func =", "model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset,", "ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model = build_model queryset", "status, viewsets, serializers from rest_framework.decorators import list_route from rest_framework.response import", "from django.core.exceptions import NON_FIELD_ERRORS from rest_framework import status, viewsets, serializers", "= ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request):", "y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset, many=True, group_serializer=x_serializer,", "'group': return GroupSerializer elif self.action == 'retrieve': return build_detail_serializer_class else:", "get_serializer_class(self): if self.action == 'aggregate': return AggregateSerializer elif self.action ==", "self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if x_lookup: x_field", "x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer =", "request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column) x_lookup =", "**kwargs) class GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func = serializers.IntegerField() def", "'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset, y_func_serializer=y_serializer", "queryset = AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column': y_column }) serializer", "= self.get_serializer( queryset, y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get']) def group(self,", "if x_lookup: x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)", "x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if x_lookup: x_field = x_lookup('none').output_field x_serializer_class,", "y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if 'y_func_serializer' in", "*args, **kwargs): if 'group_serializer' in kwargs: self.fields['group'] = kwargs.pop('group_serializer') if", "y_field = self.model._meta.get_field(y_column) if x_lookup: x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs", "def aggregate(self, request): queryset = self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column", "ordering_field) class Viewset(viewsets.ModelViewSet): model = build_model queryset = build_queryset pagination_class", "build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model", "pagination_class = CustomPageNumberPagination filter_class = build_filter_class authentication_classes = () permission_classes", "Return the serializer instance that should be used for validating", "from rest_framework.response import Response from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate", "Exception as e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name,", "'group_serializer' in kwargs: self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs:", "and deserializing input, and for serializing output. \"\"\" serializer_class =", "AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer(", "y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, { 'x_column': x_column,", "form = action(data=request.data) if not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset", "aggregate(self, request): queryset = self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column =", "= x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs)", "Response(serializer.data) @list_route(methods=['get']) def group(self, request): queryset = self.filter_queryset(self.get_queryset()) x_column =", "x_field = self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if", "@list_route(methods=['get']) def group(self, request): queryset = self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column']", "= request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column) x_lookup", "serializer.save() return Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request): i = 1", "filter_class = build_filter_class authentication_classes = () permission_classes = (HasProjectPermissions, ModifyNotInDemo)", "from rest_framework import status, viewsets, serializers from rest_framework.decorators import list_route", "kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group =", "instance in build_queryset: setattr(instance, ordering_field, i) instance.save() i += 1", "return Response({'action': form._meta.name, 'result': result}) decorator = list_route(methods=['post']) route =", "class GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func = serializers.IntegerField() def __init__(self,", "Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result': result}) decorator =", "group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data) def get_serializer(self, *args, **kwargs): \"\"\"", "**kwargs): \"\"\" Return the serializer instance that should be used", "reorder(self, request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post'])", "= ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, {", "def __init__(self, *args, **kwargs): if 'y_func_serializer' in kwargs: self.fields['y_func'] =", "import Response from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter", "many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data) def get_serializer(self, *args, **kwargs):", "request): queryset = self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup')", "request): form = action(data=request.data) if not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)", "= request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column,", "y_column }) serializer = self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer )", "= self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post']) def", "kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self, request):", "i += 1 return Response({}) for action in build_actions: def", "if not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try:", "y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup':", "str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result': result}) decorator = list_route(methods=['post'])", "from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory class", "build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model =", "'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset, many=True,", "group = serializers.CharField() y_func = serializers.IntegerField() def __init__(self, *args, **kwargs):", "1 return Response({}) for action in build_actions: def route(self, request):", "import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def __init__(self, *args,", "Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try: result = form.save(queryset) except", "reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def __init__(self, *args, **kwargs):", "from jet_django.filters.model_group import GroupFilter from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions", "self.action == 'aggregate': return AggregateSerializer elif self.action == 'group': return", "form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try: result =", "*args, **kwargs): if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args,", "def group(self, request): queryset = self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name", "x_lookup: x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer", "class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if", "self.action == 'retrieve': return build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get']) def", "y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer =", "request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') x_field =", "**kwargs): if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs)", "instance.save() i += 1 return Response({}) for action in build_actions:", "import status, viewsets, serializers from rest_framework.decorators import list_route from rest_framework.response", "= 1 for instance in build_queryset: setattr(instance, ordering_field, i) instance.save()", "def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer =", "if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class", "status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try: result = form.save(queryset) except Exception", "= kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset,", "else: return build_serializer_class @list_route(methods=['get']) def aggregate(self, request): queryset = self.filter_queryset(self.get_queryset())", "jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder", "list_route from rest_framework.response import Response from rest_framework.serializers import ModelSerializer from", "= y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup': x_lookup,", "y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column)", "GroupSerializer elif self.action == 'retrieve': return build_detail_serializer_class else: return build_serializer_class", "*args, **kwargs): \"\"\" Return the serializer instance that should be", "be used for validating and deserializing input, and for serializing", "= GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup': x_lookup, 'y_func': y_func, 'y_column':", "rest_framework.decorators import list_route from rest_framework.response import Response from rest_framework.serializers import", "== 'group': return GroupSerializer elif self.action == 'retrieve': return build_detail_serializer_class", "= CustomPageNumberPagination filter_class = build_filter_class authentication_classes = () permission_classes =", "build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)", "that should be used for validating and deserializing input, and", "x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs", "result = form.save(queryset) except Exception as e: return Response({NON_FIELD_ERRORS: str(e)},", "'retrieve': return build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get']) def aggregate(self, request):", "= self.model._meta.get_field(y_column) if x_lookup: x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs =", "self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data) def get_serializer(self,", "AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if 'y_func_serializer'", "x_column, 'x_lookup': x_lookup, 'y_func': y_func, 'y_column': y_column }) serializer =", "ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField()", "class Viewset(viewsets.ModelViewSet): model = build_model queryset = build_queryset pagination_class =", "queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data) def get_serializer(self, *args,", "{ 'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset,", "= build_model queryset = build_queryset pagination_class = CustomPageNumberPagination filter_class =", "super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):", "permission_classes = (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if self.action == 'aggregate':", "y_column = request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs =", "and for serializing output. \"\"\" serializer_class = self.get_serializer_class() kwargs['context'] =", "build_model queryset = build_queryset pagination_class = CustomPageNumberPagination filter_class = build_filter_class", "AggregateFilter from jet_django.filters.model_group import GroupFilter from jet_django.pagination import CustomPageNumberPagination from", "(HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if self.action == 'aggregate': return AggregateSerializer", "== 'retrieve': return build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get']) def aggregate(self,", "request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post']) def", "CustomPageNumberPagination filter_class = build_filter_class authentication_classes = () permission_classes = (HasProjectPermissions,", "return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result': result}) decorator", ") return Response(serializer.data) @list_route(methods=['get']) def group(self, request): queryset = self.filter_queryset(self.get_queryset())", "'result': result}) decorator = list_route(methods=['post']) route = decorator(route) setattr(Viewset, action._meta.name,", "NON_FIELD_ERRORS from rest_framework import status, viewsets, serializers from rest_framework.decorators import", "y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset =", "elif self.action == 'retrieve': return build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get'])", "except Exception as e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action':", "serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post'])", "as e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result':", "action(data=request.data) if not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset())", "jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import GroupFilter from jet_django.pagination import", "ordering_field, i) instance.save() i += 1 return Response({}) for action", "self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group = serializers.CharField()", "in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group", "queryset = build_queryset pagination_class = CustomPageNumberPagination filter_class = build_filter_class authentication_classes", "serializers.IntegerField() def __init__(self, *args, **kwargs): if 'y_func_serializer' in kwargs: self.fields['y_func']", "Response({'action': form._meta.name, 'result': result}) decorator = list_route(methods=['post']) route = decorator(route)", "self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class,", "request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)", "from jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def", "group(self, request): queryset = self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name =", "request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs", "y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column':", "ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model = build_model", "x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset", "\"\"\" serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs)", "serializers.IntegerField() def __init__(self, *args, **kwargs): if 'group_serializer' in kwargs: self.fields['group']", "= self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self, request): serializer", "build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet):", "if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def", "ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, { 'y_func':", "import NON_FIELD_ERRORS from rest_framework import status, viewsets, serializers from rest_framework.decorators", "HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func =", "= ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, {", "e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result': result})", "request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field =", "**kwargs): if 'group_serializer' in kwargs: self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer'", "'y_column': y_column }) serializer = self.get_serializer( queryset, y_func_serializer=y_serializer ) return", "= request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column =", "queryset = self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id')", "jet_django.filters.model_group import GroupFilter from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import", "y_column = request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name)", "return Response(serializer.data) @list_route(methods=['get']) def group(self, request): queryset = self.filter_queryset(self.get_queryset()) x_column", "if 'group_serializer' in kwargs: self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer' in", "reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model = build_model queryset = build_queryset", "x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class,", "authentication_classes = () permission_classes = (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if", "self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower()", "in kwargs: self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs: self.fields['y_func']", "build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field) class", "y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get']) def group(self, request): queryset =", "self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self,", "return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try: result = form.save(queryset)", "'id') y_field = self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer", "Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request): i = 1 for instance", "viewsets, serializers from rest_framework.decorators import list_route from rest_framework.response import Response", "= kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func", "rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import", "'id') x_field = self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column)", "import GroupFilter from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions,", "= list_route(methods=['post']) route = decorator(route) setattr(Viewset, action._meta.name, route) return Viewset", "= ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column,", "x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs =", "x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer =", "y_column }) serializer = self.get_serializer( queryset, y_func_serializer=y_serializer ) return Response(serializer.data)", "from rest_framework.decorators import list_route from rest_framework.response import Response from rest_framework.serializers", "return build_serializer_class @list_route(methods=['get']) def aggregate(self, request): queryset = self.filter_queryset(self.get_queryset()) y_func", "y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset,", "jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func = serializers.IntegerField() def __init__(self,", "y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if 'group_serializer' in", "return GroupSerializer elif self.action == 'retrieve': return build_detail_serializer_class else: return", "output. \"\"\" serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args,", "input, and for serializing output. \"\"\" serializer_class = self.get_serializer_class() kwargs['context']", "serializer = self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return Response(serializer.data)", "= x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if x_lookup: x_field = x_lookup('none').output_field", "y_serializer_class(**y_serializer_kwargs) queryset = GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup': x_lookup, 'y_func':", "= build_filter_class authentication_classes = () permission_classes = (HasProjectPermissions, ModifyNotInDemo) def", "deserializing input, and for serializing output. \"\"\" serializer_class = self.get_serializer_class()", "from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group", "def get_serializer_class(self): if self.action == 'aggregate': return AggregateSerializer elif self.action", "'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer):", "Response(serializer.data) def get_serializer(self, *args, **kwargs): \"\"\" Return the serializer instance", "ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import GroupFilter from", "= build_queryset pagination_class = CustomPageNumberPagination filter_class = build_filter_class authentication_classes =", "'x_column': x_column, 'x_lookup': x_lookup, 'y_func': y_func, 'y_column': y_column }) serializer", "GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup': x_lookup, 'y_func': y_func, 'y_column': y_column", "serializers from rest_framework.decorators import list_route from rest_framework.response import Response from", "build_queryset: setattr(instance, ordering_field, i) instance.save() i += 1 return Response({})", "kwargs: self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs: self.fields['y_func'] =", "y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column': y_column })", "**kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field): ReorderSerializer", "Viewset(viewsets.ModelViewSet): model = build_model queryset = build_queryset pagination_class = CustomPageNumberPagination", "try: result = form.save(queryset) except Exception as e: return Response({NON_FIELD_ERRORS:", "self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset", "x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer", "x_lookup_name = request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id')", "import list_route from rest_framework.response import Response from rest_framework.serializers import ModelSerializer", "build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get']) def aggregate(self, request): queryset =", "@list_route(methods=['post']) def reset_order(self, request): i = 1 for instance in", "import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer): y_func", "= reorder_serializer_factory(build_queryset, ordering_field) class Viewset(viewsets.ModelViewSet): model = build_model queryset =", "self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') y_field =", "i) instance.save() i += 1 return Response({}) for action in", "= AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column': y_column }) serializer =", "}) serializer = self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer ) return", "if self.action == 'aggregate': return AggregateSerializer elif self.action == 'group':", "__init__(self, *args, **kwargs): if 'group_serializer' in kwargs: self.fields['group'] = kwargs.pop('group_serializer')", "reset_order(self, request): i = 1 for instance in build_queryset: setattr(instance,", "elif self.action == 'group': return GroupSerializer elif self.action == 'retrieve':", "form.save(queryset) except Exception as e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST) return", "for action in build_actions: def route(self, request): form = action(data=request.data)", "ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request): i", "= request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field", "() permission_classes = (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if self.action ==", "def route(self, request): form = action(data=request.data) if not form.is_valid(): return", "}) serializer = self.get_serializer( queryset, y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get'])", "'aggregate': return AggregateSerializer elif self.action == 'group': return GroupSerializer elif", "in build_queryset: setattr(instance, ordering_field, i) instance.save() i += 1 return", "django.core.exceptions import NON_FIELD_ERRORS from rest_framework import status, viewsets, serializers from", "y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') x_field = self.model._meta.get_field(x_column)", "validating and deserializing input, and for serializing output. \"\"\" serializer_class", "serializers.CharField() y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if 'group_serializer'", "**kwargs) @list_route(methods=['post']) def reorder(self, request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save()", "y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset,", "return Response(serializer.data) def get_serializer(self, *args, **kwargs): \"\"\" Return the serializer", "ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)", "jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import reorder_serializer_factory class AggregateSerializer(serializers.Serializer):", "Response({}) for action in build_actions: def route(self, request): form =", "serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self, request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True)", "= form.save(queryset) except Exception as e: return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)", "request): queryset = self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column',", "= x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field) x_serializer = x_serializer_class(**x_serializer_kwargs)", "build_actions: def route(self, request): form = action(data=request.data) if not form.is_valid():", "serializer = self.get_serializer( queryset, y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get']) def", "y_func_serializer=y_serializer ) return Response(serializer.data) def get_serializer(self, *args, **kwargs): \"\"\" Return", "request): i = 1 for instance in build_queryset: setattr(instance, ordering_field,", "\"\"\" Return the serializer instance that should be used for", "1 for instance in build_queryset: setattr(instance, ordering_field, i) instance.save() i", "GroupFilter from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo", "from jet_django.pagination import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from", "'y_column': y_column }) serializer = self.get_serializer( queryset, many=True, group_serializer=x_serializer, y_func_serializer=y_serializer", "from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import GroupFilter from jet_django.pagination", "= serializers.CharField() y_func = serializers.IntegerField() def __init__(self, *args, **kwargs): if", "serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response(serializer.data) @list_route(methods=['post']) def reset_order(self,", "== 'aggregate': return AggregateSerializer elif self.action == 'group': return GroupSerializer", "'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model,", "x_lookup, 'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer( queryset,", "queryset, y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get']) def group(self, request): queryset", "get_serializer(self, *args, **kwargs): \"\"\" Return the serializer instance that should", "self.get_serializer_context() return serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self, request): serializer =", "= form.filer_queryset(self.get_queryset()) try: result = form.save(queryset) except Exception as e:", "= serializers.IntegerField() def __init__(self, *args, **kwargs): if 'group_serializer' in kwargs:", "self.model._meta.get_field(y_column) if x_lookup: x_field = x_lookup('none').output_field x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column,", "import AggregateFilter from jet_django.filters.model_group import GroupFilter from jet_django.pagination import CustomPageNumberPagination", "status=status.HTTP_400_BAD_REQUEST) return Response({'action': form._meta.name, 'result': result}) decorator = list_route(methods=['post']) route", "= self.model._meta.get_field(y_column) y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field) y_serializer = y_serializer_class(**y_serializer_kwargs)", "result}) decorator = list_route(methods=['post']) route = decorator(route) setattr(Viewset, action._meta.name, route)", "rest_framework import status, viewsets, serializers from rest_framework.decorators import list_route from", "@list_route(methods=['get']) def aggregate(self, request): queryset = self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower()", "def reset_order(self, request): i = 1 for instance in build_queryset:", "setattr(instance, ordering_field, i) instance.save() i += 1 return Response({}) for", "self.fields['group'] = kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer')", "return Response({}) for action in build_actions: def route(self, request): form", "return serializer_class(*args, **kwargs) @list_route(methods=['post']) def reorder(self, request): serializer = ReorderSerializer(data=request.data)", "action in build_actions: def route(self, request): form = action(data=request.data) if", "= request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') y_field = self.model._meta.get_field(y_column) y_serializer_class,", "kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs)", "kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func =", "super().__init__(*args, **kwargs) class GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func = serializers.IntegerField()", "queryset = self.filter_queryset(self.get_queryset()) x_column = request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func", "i = 1 for instance in build_queryset: setattr(instance, ordering_field, i)", "+= 1 return Response({}) for action in build_actions: def route(self,", "kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class, build_serializer_class,", "return build_detail_serializer_class else: return build_serializer_class @list_route(methods=['get']) def aggregate(self, request): queryset", "'x_lookup': x_lookup, 'y_func': y_func, 'y_column': y_column }) serializer = self.get_serializer(", "decorator = list_route(methods=['post']) route = decorator(route) setattr(Viewset, action._meta.name, route) return", "= (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if self.action == 'aggregate': return", "= self.filter_queryset(self.get_queryset()) y_func = request.GET['_y_func'].lower() y_column = request.GET.get('_y_column', 'id') y_field", "import CustomPageNumberPagination from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo from jet_django.serializers.reorder import", "= kwargs.pop('group_serializer') if 'y_func_serializer' in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args,", "Response from rest_framework.serializers import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from", "in kwargs: self.fields['y_func'] = kwargs.pop('y_func_serializer') super().__init__(*args, **kwargs) def model_viewset_factory(build_model, build_filter_class,", "model = build_model queryset = build_queryset pagination_class = CustomPageNumberPagination filter_class", "serializing output. \"\"\" serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return", "for instance in build_queryset: setattr(instance, ordering_field, i) instance.save() i +=", "y_field) y_serializer = y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, { 'y_func': y_func,", "= () permission_classes = (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self): if self.action", "{ 'x_column': x_column, 'x_lookup': x_lookup, 'y_func': y_func, 'y_column': y_column })", "= self.model._meta.get_field(x_column) x_lookup = x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if x_lookup:", "return Response(serializer.data) @list_route(methods=['post']) def reset_order(self, request): i = 1 for", "return AggregateSerializer elif self.action == 'group': return GroupSerializer elif self.action", "= y_serializer_class(**y_serializer_kwargs) queryset = AggregateFilter().filter(queryset, { 'y_func': y_func, 'y_column': y_column", "AggregateSerializer elif self.action == 'group': return GroupSerializer elif self.action ==", "route(self, request): form = action(data=request.data) if not form.is_valid(): return Response(form.errors,", "queryset = form.filer_queryset(self.get_queryset()) try: result = form.save(queryset) except Exception as", "def get_serializer(self, *args, **kwargs): \"\"\" Return the serializer instance that", "build_serializer_class @list_route(methods=['get']) def aggregate(self, request): queryset = self.filter_queryset(self.get_queryset()) y_func =", "build_filter_class authentication_classes = () permission_classes = (HasProjectPermissions, ModifyNotInDemo) def get_serializer_class(self):", "import ModelSerializer from jet_django.filters.model_aggregate import AggregateFilter from jet_django.filters.model_group import GroupFilter", "form.filer_queryset(self.get_queryset()) try: result = form.save(queryset) except Exception as e: return", "ModifyNotInDemo) def get_serializer_class(self): if self.action == 'aggregate': return AggregateSerializer elif", "x_column = request.GET['_x_column'] x_lookup_name = request.GET.get('_x_lookup') y_func = request.GET['_y_func'].lower() y_column", "used for validating and deserializing input, and for serializing output.", "form._meta.name, 'result': result}) decorator = list_route(methods=['post']) route = decorator(route) setattr(Viewset,", "= serializers.IntegerField() def __init__(self, *args, **kwargs): if 'y_func_serializer' in kwargs:", "= action(data=request.data) if not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset =", ") return Response(serializer.data) def get_serializer(self, *args, **kwargs): \"\"\" Return the", "queryset = GroupFilter().filter(queryset, { 'x_column': x_column, 'x_lookup': x_lookup, 'y_func': y_func,", "instance that should be used for validating and deserializing input,", "serializer instance that should be used for validating and deserializing", "not form.is_valid(): return Response(form.errors, status=status.HTTP_400_BAD_REQUEST) queryset = form.filer_queryset(self.get_queryset()) try: result", "def __init__(self, *args, **kwargs): if 'group_serializer' in kwargs: self.fields['group'] =", "GroupSerializer(serializers.Serializer): group = serializers.CharField() y_func = serializers.IntegerField() def __init__(self, *args,", "self.get_serializer( queryset, y_func_serializer=y_serializer ) return Response(serializer.data) @list_route(methods=['get']) def group(self, request):", "for serializing output. \"\"\" serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context()", "x_lookup = x_field.class_lookups.get(x_lookup_name) y_field = self.model._meta.get_field(y_column) if x_lookup: x_field =", "@list_route(methods=['post']) def reorder(self, request): serializer = ReorderSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return" ]
[ "this.towardsNight = 1 def setSky(this): r = nMap(this.darkling,0,100,0,this.red) g =", "functions. \"\"\" from ursina import color, window, time from nMap", "this.blue = 211 this.darkling = 0 this.rate = rate this.towardsNight", "nMap import nMap class Weather: def __init__(this, rate=1): this.red =", "= nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def update(this):", "Weather functions. \"\"\" from ursina import color, window, time from", "0 this.green = 200 this.blue = 211 this.darkling = 0", "this.towardsNight * time.dt) if this.darkling < 0: this.towardsNight *= -1", "= 0 this.rate = rate this.towardsNight = 1 def setSky(this):", "from nMap import nMap class Weather: def __init__(this, rate=1): this.red", "this.darkling < 0: this.towardsNight *= -1 this.darkling = 0 this.setSky()", "0 this.rate = rate this.towardsNight = 1 def setSky(this): r", "= 211 this.darkling = 0 this.rate = rate this.towardsNight =", "1 def setSky(this): r = nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b", "= rate this.towardsNight = 1 def setSky(this): r = nMap(this.darkling,0,100,0,this.red)", "nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b)", "g = nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def", "b = nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def update(this): this.darkling -=", "time.dt) if this.darkling < 0: this.towardsNight *= -1 this.darkling =", "* this.towardsNight * time.dt) if this.darkling < 0: this.towardsNight *=", "window.color = color.rgb(r,g,b) def update(this): this.darkling -= ( this.rate *", "this.rate * this.towardsNight * time.dt) if this.darkling < 0: this.towardsNight", "setSky(this): r = nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue)", "= nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color =", "200 this.blue = 211 this.darkling = 0 this.rate = rate", "Weather: def __init__(this, rate=1): this.red = 0 this.green = 200", "r = nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color", "( this.rate * this.towardsNight * time.dt) if this.darkling < 0:", "= color.rgb(r,g,b) def update(this): this.darkling -= ( this.rate * this.towardsNight", "rate=1): this.red = 0 this.green = 200 this.blue = 211", "def __init__(this, rate=1): this.red = 0 this.green = 200 this.blue", "class Weather: def __init__(this, rate=1): this.red = 0 this.green =", "this.green = 200 this.blue = 211 this.darkling = 0 this.rate", "from ursina import color, window, time from nMap import nMap", "window, time from nMap import nMap class Weather: def __init__(this,", "__init__(this, rate=1): this.red = 0 this.green = 200 this.blue =", "\"\"\" from ursina import color, window, time from nMap import", "= 0 this.green = 200 this.blue = 211 this.darkling =", "this.darkling = 0 this.rate = rate this.towardsNight = 1 def", "if this.darkling < 0: this.towardsNight *= -1 this.darkling = 0", "nMap(this.darkling,0,100,0,this.green) b = nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def update(this): this.darkling", "\"\"\" Weather functions. \"\"\" from ursina import color, window, time", "this.rate = rate this.towardsNight = 1 def setSky(this): r =", "import color, window, time from nMap import nMap class Weather:", "import nMap class Weather: def __init__(this, rate=1): this.red = 0", "nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def update(this): this.darkling -= ( this.rate", "nMap class Weather: def __init__(this, rate=1): this.red = 0 this.green", "def setSky(this): r = nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green) b =", "= 1 def setSky(this): r = nMap(this.darkling,0,100,0,this.red) g = nMap(this.darkling,0,100,0,this.green)", "-= ( this.rate * this.towardsNight * time.dt) if this.darkling <", "ursina import color, window, time from nMap import nMap class", "color, window, time from nMap import nMap class Weather: def", "update(this): this.darkling -= ( this.rate * this.towardsNight * time.dt) if", "this.darkling -= ( this.rate * this.towardsNight * time.dt) if this.darkling", "= 200 this.blue = 211 this.darkling = 0 this.rate =", "211 this.darkling = 0 this.rate = rate this.towardsNight = 1", "this.red = 0 this.green = 200 this.blue = 211 this.darkling", "* time.dt) if this.darkling < 0: this.towardsNight *= -1 this.darkling", "color.rgb(r,g,b) def update(this): this.darkling -= ( this.rate * this.towardsNight *", "rate this.towardsNight = 1 def setSky(this): r = nMap(this.darkling,0,100,0,this.red) g", "time from nMap import nMap class Weather: def __init__(this, rate=1):", "def update(this): this.darkling -= ( this.rate * this.towardsNight * time.dt)", "= nMap(this.darkling,0,100,0,this.blue) window.color = color.rgb(r,g,b) def update(this): this.darkling -= (" ]
[ "## This is the lock that must be called to", "args['callback'] self.__helpshort = args['helpshort'] self.__helplong = args['helplong'] def callback(self, *args):", "helpful text. Alternately, type in a command to see its", "def Update(self, timestep): try: while self.__console.HasPending(): msg = self.__console.pop() args", "a.helpshort() )) print else: print(\"Command not found.\") else: for a", "text, usually one line of text, preferably not more than", "len(self.__pcommands) > 0: return True return False ## Starts the", "to show: \" + args[0]) ## Console command: help def", "self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ## Pops the first item off", "return self.__helpshort def helplong(self): return self.__helplong ## This class makes", "a.helpshort() )) print() ## Console command: quit def consoleQuit(self, *args):", "games may need their own commands. In that case, you", "self.__helpshort def helplong(self): return self.__helplong ## This class makes the", "prepended with \"Usage: \" # @param 'helplong' : long help", "EventDispatcher class. It's the one you use if # the", "input non-blocking. class ConsoleInput(threading.Thread): ## This is the lock that", "than 50 characters. # In output, it will be prepended", "can be created. #@{ ## Console command: show def consoleShow(self,", "lock that must be called to avoid thread collisions __lock", "register console commands with the server. The library implements a", "the client. def Start(self): self.__continue = True self.start() ## Stops", "consoleHelp(self, *args): if len(args) > 0: for a in self.__consolecommands:", "of text, preferably not more than 50 characters. # In", "it's not unheard of for a server to run in", "standard, generic EventDispatcher class. It's the one you use if", "self.__consolecommands: if a.command() == command: a.callback(*args) foundcommand = True if", "item off the commands list and returns it. def pop(self):", "= [] # Register the standard commands available to every", "def command(self): return self.__command def helpshort(self): return self.__helpshort def helplong(self):", "text. Alternately, type in a command to see its helpful", "\"Usage: \" # @param 'helplong' : long help text, can", "shut down. def Stop(self, blocking=False): self.__continue = False if blocking:", "= False if blocking: self.join() ## Returns true if there", "= command, callback = callback, helpshort = helpshort, helplong =", "Apache License, Version 2.0 (the \"License\"); you may not use", "**args): super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands = [] # Register", "args[0]) ## Console command: help def consoleHelp(self, *args): if len(args)", "EventDispatcher that provides for an interactive console # on the", "self.__helplong = args['helplong'] def callback(self, *args): self.__callback(*args) def command(self): return", "and the various commands that # can be created. #@{", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "help text, usually one line of text, preferably not more", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "has shut down. def Stop(self, blocking=False): self.__continue = False if", "when run in a terminal. This is probably most useful", "game server. self.RegisterCommand('show', self.consoleShow, \"show (connections)\", \"Show whatever you want", "+ args[0]) ## Console command: help def consoleHelp(self, *args): if", "the constructor. # @param 'command' : the name of the", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol import connection ## @file dispatcher", "may need their own commands. In that case, you will", "= input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ## Pops", "in a command to see its helpful text.\") self.RegisterCommand('quit', self.consoleQuit,", "a terminal and have a console. class EventDispatcherServer(DispatcherBase): __console =", "terminal and have a console. class EventDispatcherServer(DispatcherBase): __console = None", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "== command: a.callback(*args) foundcommand = True if not foundcommand: print(\"Command", "(\" \", a.helpshort() )) print() ## Console command: quit def", "if a.command() == command: a.callback(*args) foundcommand = True if not", "the various commands that # can be created. #@{ ##", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "command: help def consoleHelp(self, *args): if len(args) > 0: for", "self.consoleHelp, \"help [command]\", \"print this helpful text. Alternately, type in", "ANY KIND, either express or implied. See the License for", "text, can be as long as needed, as many lines", "**args) self.__lock = threading.RLock() self.__pcommands = [] ## Call to", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "command.lower() # Ignore simple presses of enter if command ==", "## This is a queue of commands, unparsed. __pcommands =", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "library implements a number of standard # commands, but games", "def Start(self): self.__continue = True self.start() ## Stops the server.", "self.__console.Stop() ## Call to register console commands with the server.", "## This is a special server-oriented EventDispatcher that provides for", "a.helplong() )) print(\"%13s %s\" % (\" \", a.helpshort() )) print", "== args[0]: print(\"%10s : %s\" % (args[0], a.helplong() )) print(\"%13s", ": short help text, usually one line of text, preferably", "# Ensure the command is always lowercase self.__command = args['command'].strip().lower()", "callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command = command, callback = callback,", "dispatcher # # This file contains the standard, generic EventDispatcher", "args = msg.split(\" \") command = args.pop(0) command = command.lower()", "it will be prepended with \"Usage: \" # @param 'helplong'", "ConsoleCommand(object): __command = None __callback = None __helpshort = None", "helpshort(self): return self.__helpshort def helplong(self): return self.__helplong ## This class", "the user types to use it. # @param 'callback' :", "pending lines from stdin to work with def HasPending(self): if", "command to see its helpful text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit", "command: a.callback(*args) foundcommand = True if not foundcommand: print(\"Command not", "under the License is distributed on an \"AS IS\" BASIS,", "doesn't support your preferred game engine, or if you'd rather", "Stops the server. It may still take a few seconds", "len(self.GetConnections() ) == 0: print(\"There are no connections at this", "True return False ## Starts the console input. Don't call", "__init__(self, **args): threading.Thread.__init__(self, **args) self.__lock = threading.RLock() self.__pcommands = []", "quit def consoleQuit(self, *args): print(\"Quit signaled from console.\") self.Stop() self.__console.Stop()", "Alternately, type in a command to see its helpful text.\")", "the call will # block until the server has shut", ": %s\" % (args[0], a.helplong() )) print(\"%13s %s\" % (\"", "the server. The library implements a number of standard #", "\"Quit the server.\") def Start(self): self.__console.Start() super().Start() def Update(self, timestep):", "__helplong = None def __init__(self, **args): # Ensure the command", "# # These methods give access to the built-in server", "= command.lower() # Ignore simple presses of enter if command", "# commands, but games may need their own commands. In", "= args['helplong'] def callback(self, *args): self.__callback(*args) def command(self): return self.__command", "so. If blocking is \"True\", then the call will #", "contains the standard, generic EventDispatcher class. It's the one you", "this file except in compliance with the License. You may", "Ignore simple presses of enter if command == '': continue", "except: pass super().Update(timestep) ## @name Console API # # These", "arguments in the constructor. # @param 'command' : the name", "for a in self.GetConnections(): print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1],", "few seconds or so. If blocking is \"True\", then the", "line endings to # signify paragraph breaks, if need be.", "if need be. class ConsoleCommand(object): __command = None __callback =", "if args[0] == \"connections\": if len(self.GetConnections() ) == 0: print(\"There", "@param 'callback' : a function that will process the command", "\" + args[0]) ## Console command: help def consoleHelp(self, *args):", "however. Those will be added as needed. You may put", "have a console. class EventDispatcherServer(DispatcherBase): __console = None __consolecommands =", "= None __callback = None __helpshort = None __helplong =", "None if len(self.__pcommands) > 0: self.__lock.acquire() theCommand = self.__pcommands.pop(0) self.__lock.release()", "help text, can be as long as needed, as many", "generic EventDispatcher class. It's the one you use if #", "== \"connections\": if len(self.GetConnections() ) == 0: print(\"There are no", "\"help [command]\", \"print this helpful text. Alternately, type in a", "types it. # @param 'helpshort' : short help text, usually", "= ConsoleInput() self.__consolecommands = [] # Register the standard commands", "you will need your own callbacks. def RegisterCommand(self, command, callback,", "in self.GetConnections(): print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() *", "def __init__(self, **args): super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands = []", "self.start() ## Stops the server. It may still take a", "if len(args) > 0: for a in self.__consolecommands: if a.command()", "connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) ) else: print(\"Unknown thing to", "= helplong ) ) #@} ## This class implements console", "function that will process the command when the user types", "[] # Register the standard commands available to every game", "seconds or so. If blocking is \"True\", then the call", "def run(self): while self.__continue: msg = input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip()", "server. self.RegisterCommand('show', self.consoleShow, \"show (connections)\", \"Show whatever you want to", "command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command = command, callback =", ": a function that will process the command when the", "file except in compliance with the License. You may obtain", "a special server-oriented EventDispatcher that provides for an interactive console", "testing the library, # though it's not unheard of for", "found.\") else: for a in self.__consolecommands: print(\"%10s : %s\" %", "terminal. This is probably most useful for testing the library,", "self.__continue = True self.start() ## Stops the server. It may", "% (\" \", a.helpshort() )) print else: print(\"Command not found.\")", "Ensure the command is always lowercase self.__command = args['command'].strip().lower() self.__callback", "def helpshort(self): return self.__helpshort def helplong(self): return self.__helplong ## This", "False for a in self.__consolecommands: if a.command() == command: a.callback(*args)", "case, you will need your own callbacks. def RegisterCommand(self, command,", "timestep): try: while self.__console.HasPending(): msg = self.__console.pop() args = msg.split(\"", "endings to # signify paragraph breaks, if need be. class", "call will # block until the server has shut down.", "OR CONDITIONS OF ANY KIND, either express or implied. See", "self.__callback = args['callback'] self.__helpshort = args['helpshort'] self.__helplong = args['helplong'] def", "[] ## Call to start the client. def Start(self): self.__continue", "self.__lock.release() time.sleep(0.01) ## Pops the first item off the commands", "foundcommand = False for a in self.__consolecommands: if a.command() ==", "self.__helpshort = args['helpshort'] self.__helplong = args['helplong'] def callback(self, *args): self.__callback(*args)", "put # line endings, however. Those will be added as", "Copyright 2016 <NAME> Licensed under the Apache License, Version 2.0", "as needed. You may put line endings to # signify", "under the Apache License, Version 2.0 (the \"License\"); you may", "implements console commands. To create a new console command, simply", "to start the client. def Start(self): self.__continue = True self.start()", "a server to run in a terminal and have a", "all the keyword arguments in the constructor. # @param 'command'", "the built-in server console and the various commands that #", "unparsed. __pcommands = None def __init__(self, **args): threading.Thread.__init__(self, **args) self.__lock", "len(args) > 0: for a in self.__consolecommands: if a.command() ==", "a new console command, simply make an instance of #", "%s\" % (\" \", a.helpshort() )) print else: print(\"Command not", "command, what the user types to use it. # @param", "# this class, giving all the keyword arguments in the", "special server-oriented EventDispatcher that provides for an interactive console #", "this class, giving all the keyword arguments in the constructor.", "preferred game engine, or if you'd rather manage the library", "will process the command when the user types it. #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "constructor. # @param 'command' : the name of the command,", "not more than 50 characters. # In output, it will", "See the License for the specific language governing permissions and", "class makes the console input non-blocking. class ConsoleInput(threading.Thread): ## This", "## Pops the first item off the commands list and", "it. # @param 'helpshort' : short help text, usually one", "simple presses of enter if command == '': continue foundcommand", "commands that # can be created. #@{ ## Console command:", "{1:40} {2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) )", "Console command: help def consoleHelp(self, *args): if len(args) > 0:", "lowercase self.__command = args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort = args['helpshort']", "msg = input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ##", "In that case, you will need your own callbacks. def", "a queue of commands, unparsed. __pcommands = None def __init__(self,", "command: show def consoleShow(self, *args): if len(args) != 1: print(\"Usage:", "probably most useful for testing the library, # though it's", "to # signify paragraph breaks, if need be. class ConsoleCommand(object):", "This is the lock that must be called to avoid", "print(\"%10s : %s\" % (args[0], a.helplong() )) print(\"%13s %s\" %", "in writing, software distributed under the License is distributed on", "that case, you will need your own callbacks. def RegisterCommand(self,", "required by applicable law or agreed to in writing, software", "its helpful text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit the server.\") def", "be called to avoid thread collisions __lock = None ##", "to use it. # @param 'callback' : a function that", "Do not put # line endings, however. Those will be", "len(args) != 1: print(\"Usage: show (connections)\") else: if args[0] ==", "of the command, what the user types to use it.", "you use if # the library doesn't support your preferred", "# independently of your game engine. ## This is the", "avoid thread collisions __lock = None ## This is a", "= None ## This is a queue of commands, unparsed.", "language governing permissions and limitations under the License. ''' import", "= None def __init__(self, **args): # Ensure the command is", "\"quit\", \"Quit the server.\") def Start(self): self.__console.Start() super().Start() def Update(self,", "__lock = None ## This is a queue of commands,", "EventDispatcher(DispatcherBase): pass ## This is a special server-oriented EventDispatcher that", "unheard of for a server to run in a terminal", "not unheard of for a server to run in a", "instead call Start(). def run(self): while self.__continue: msg = input(':", "== 0: print(\"There are no connections at this time.\") else:", "pass super().Update(timestep) ## @name Console API # # These methods", "one you use if # the library doesn't support your", "# @param 'helplong' : long help text, can be as", "collisions __lock = None ## This is a queue of", "class, giving all the keyword arguments in the constructor. #", "what the user types to use it. # @param 'callback'", "\"True\", then the call will # block until the server", "in self.__consolecommands: print(\"%10s : %s\" % (a.command(), a.helplong() )) print(\"%13s", "# These methods give access to the built-in server console", "args['helplong'] def callback(self, *args): self.__callback(*args) def command(self): return self.__command def", "game engine. ## This is the standard EventDispatcher. class EventDispatcher(DispatcherBase):", "% (a.command(), a.helplong() )) print(\"%13s %s\" % (\" \", a.helpshort()", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "None __consolecommands = None def __init__(self, **args): super().__init__(**args) self.__console =", "print(\"Quit signaled from console.\") self.Stop() self.__console.Stop() ## Call to register", "command, simply make an instance of # this class, giving", "first item off the commands list and returns it. def", "the standard commands available to every game server. self.RegisterCommand('show', self.consoleShow,", "self.__consolecommands: if a.command() == args[0]: print(\"%10s : %s\" % (args[0],", "the server when run in a terminal. This is probably", "the License. ''' import threading, time from davenetgame.dispatch.base import DispatcherBase", "return self.__command def helpshort(self): return self.__helpshort def helplong(self): return self.__helplong", "CONDITIONS OF ANY KIND, either express or implied. See the", "Version 2.0 (the \"License\"); you may not use this file", "Console API # # These methods give access to the", "line endings, however. Those will be added as needed. You", "## Stops the server. It may still take a few", "commands. In that case, you will need your own callbacks.", "Don't call this directly, instead call Start(). def run(self): while", "__init__(self, **args): super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands = [] #", "for a in self.__consolecommands: print(\"%10s : %s\" % (a.command(), a.helplong()", "True if not foundcommand: print(\"Command not recognized: \" + command)", "def helplong(self): return self.__helplong ## This class makes the console", "there are pending lines from stdin to work with def", "print(\"There are no connections at this time.\") else: for a", "EventDispatcher. class EventDispatcher(DispatcherBase): pass ## This is a special server-oriented", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "= args.pop(0) command = command.lower() # Ignore simple presses of", "client. def Start(self): self.__continue = True self.start() ## Stops the", "def Stop(self, blocking=False): self.__continue = False if blocking: self.join() ##", "this directly, instead call Start(). def run(self): while self.__continue: msg", "Pops the first item off the commands list and returns", "def pop(self): theCommand = None if len(self.__pcommands) > 0: self.__lock.acquire()", "# @param 'callback' : a function that will process the", "else: for a in self.GetConnections(): print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(), str(a),", "{2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) ) else:", "print(\"Command not found.\") else: for a in self.__consolecommands: print(\"%10s :", "class ConsoleInput(threading.Thread): ## This is the lock that must be", "self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ## Pops the first item", "non-blocking. class ConsoleInput(threading.Thread): ## This is the lock that must", "you may not use this file except in compliance with", "are pending lines from stdin to work with def HasPending(self):", "time.\") else: for a in self.GetConnections(): print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(),", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "## This class implements console commands. To create a new", "the License. You may obtain a copy of the License", "more than 50 characters. # In output, it will be", "# block until the server has shut down. def Stop(self,", "if there are pending lines from stdin to work with", "interactive console # on the server when run in a", "a in self.__consolecommands: print(\"%10s : %s\" % (a.command(), a.helplong() ))", "this helpful text. Alternately, type in a command to see", "library doesn't support your preferred game engine, or if you'd", "helplong): self.__consolecommands.append(ConsoleCommand( command = command, callback = callback, helpshort =", "if len(self.__pcommands) > 0: self.__lock.acquire() theCommand = self.__pcommands.pop(0) self.__lock.release() return", "paragraph breaks, if need be. class ConsoleCommand(object): __command = None", "foundcommand = True if not foundcommand: print(\"Command not recognized: \"", "long help text, can be as long as needed, as", "blocking=False): self.__continue = False if blocking: self.join() ## Returns true", "use this file except in compliance with the License. You", "is the standard EventDispatcher. class EventDispatcher(DispatcherBase): pass ## This is", "a function that will process the command when the user", ": the name of the command, what the user types", "makes the console input non-blocking. class ConsoleInput(threading.Thread): ## This is", "return self.__helplong ## This class makes the console input non-blocking.", "self.__callback(*args) def command(self): return self.__command def helpshort(self): return self.__helpshort def", "short help text, usually one line of text, preferably not", "not found.\") else: for a in self.__consolecommands: print(\"%10s : %s\"", "an instance of # this class, giving all the keyword", "= args['callback'] self.__helpshort = args['helpshort'] self.__helplong = args['helplong'] def callback(self,", "self.__lock = threading.RLock() self.__pcommands = [] ## Call to start", "game engine, or if you'd rather manage the library #", "make an instance of # this class, giving all the", "for testing the library, # though it's not unheard of", "the console input. Don't call this directly, instead call Start().", "until the server has shut down. def Stop(self, blocking=False): self.__continue", "a.command() == args[0]: print(\"%10s : %s\" % (args[0], a.helplong() ))", "to register console commands with the server. The library implements", "== '': continue foundcommand = False for a in self.__consolecommands:", "that # can be created. #@{ ## Console command: show", "if # the library doesn't support your preferred game engine,", "print(\"%13s %s\" % (\" \", a.helpshort() )) print else: print(\"Command", "if len(args) != 1: print(\"Usage: show (connections)\") else: if args[0]", "= None __helpshort = None __helplong = None def __init__(self,", "This is probably most useful for testing the library, #", "to every game server. self.RegisterCommand('show', self.consoleShow, \"show (connections)\", \"Show whatever", "connection ## @file dispatcher # # This file contains the", "def consoleShow(self, *args): if len(args) != 1: print(\"Usage: show (connections)\")", "def consoleHelp(self, *args): if len(args) > 0: for a in", "create a new console command, simply make an instance of", "blocking: self.join() ## Returns true if there are pending lines", "when the user types it. # @param 'helpshort' : short", "Call to start the client. def Start(self): self.__continue = True", "def RegisterCommand(self, command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command = command,", "blocking is \"True\", then the call will # block until", "__init__(self, **args): # Ensure the command is always lowercase self.__command", "self.__console = ConsoleInput() self.__consolecommands = [] # Register the standard", "console input. Don't call this directly, instead call Start(). def", "(connections)\", \"Show whatever you want to see.\") self.RegisterCommand('help', self.consoleHelp, \"help", "for a server to run in a terminal and have", "self.__helplong ## This class makes the console input non-blocking. class", "**args): # Ensure the command is always lowercase self.__command =", "import threading, time from davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol import", "class implements console commands. To create a new console command,", "must be called to avoid thread collisions __lock = None", "not put # line endings, however. Those will be added", "console.\") self.Stop() self.__console.Stop() ## Call to register console commands with", "(the \"License\"); you may not use this file except in", "False if blocking: self.join() ## Returns true if there are", "manage the library # independently of your game engine. ##", "from davenetgame.protocol import connection ## @file dispatcher # # This", "\") command = args.pop(0) command = command.lower() # Ignore simple", "instance of # this class, giving all the keyword arguments", "theCommand = None if len(self.__pcommands) > 0: self.__lock.acquire() theCommand =", "created. #@{ ## Console command: show def consoleShow(self, *args): if", "1000) ) ) else: print(\"Unknown thing to show: \" +", "EventDispatcherServer(DispatcherBase): __console = None __consolecommands = None def __init__(self, **args):", "str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) ) else: print(\"Unknown thing", "in the constructor. # @param 'command' : the name of", "print(\"%10s : %s\" % (a.command(), a.helplong() )) print(\"%13s %s\" %", "from davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol import connection ## @file", "## @name Console API # # These methods give access", "# Ignore simple presses of enter if command == '':", "server console and the various commands that # can be", "specific language governing permissions and limitations under the License. '''", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", ")) print(\"%13s %s\" % (\" \", a.helpshort() )) print else:", "Console command: show def consoleShow(self, *args): if len(args) != 1:", "*args): if len(args) != 1: print(\"Usage: show (connections)\") else: if", "self.GetConnections(): print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000)", "self.__continue: msg = input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01)", "help def consoleHelp(self, *args): if len(args) > 0: for a", "Start(self): self.__continue = True self.start() ## Stops the server. It", "'callback' : a function that will process the command when", "own callbacks. def RegisterCommand(self, command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command", "else: if args[0] == \"connections\": if len(self.GetConnections() ) == 0:", "the library # independently of your game engine. ## This", "the Apache License, Version 2.0 (the \"License\"); you may not", "or implied. See the License for the specific language governing", "KIND, either express or implied. See the License for the", "## Starts the console input. Don't call this directly, instead", "signaled from console.\") self.Stop() self.__console.Stop() ## Call to register console", "to in writing, software distributed under the License is distributed", "callbacks. def RegisterCommand(self, command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command =", "while self.__continue: msg = input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release()", "print else: print(\"Command not found.\") else: for a in self.__consolecommands:", "or if you'd rather manage the library # independently of", "endings, however. Those will be added as needed. You may", "None __helplong = None def __init__(self, **args): # Ensure the", "law or agreed to in writing, software distributed under the", "be prepended with \"Usage: \" # @param 'helplong' : long", "class. It's the one you use if # the library", "import connection ## @file dispatcher # # This file contains", "that must be called to avoid thread collisions __lock =", "a command to see its helpful text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\",", "the server has shut down. def Stop(self, blocking=False): self.__continue =", ") self.__lock.release() time.sleep(0.01) ## Pops the first item off the", "@file dispatcher # # This file contains the standard, generic", "at this time.\") else: for a in self.GetConnections(): print(\"{0:3}: {1:40}", ") #@} ## This class implements console commands. To create", "def consoleQuit(self, *args): print(\"Quit signaled from console.\") self.Stop() self.__console.Stop() ##", "= True self.start() ## Stops the server. It may still", "need your own callbacks. def RegisterCommand(self, command, callback, helpshort, helplong):", "= args['helpshort'] self.__helplong = args['helplong'] def callback(self, *args): self.__callback(*args) def", "helpful text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit the server.\") def Start(self):", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "command) except: pass super().Update(timestep) ## @name Console API # #", "thing to show: \" + args[0]) ## Console command: help", "self.__consolecommands: print(\"%10s : %s\" % (a.command(), a.helplong() )) print(\"%13s %s\"", "commands list and returns it. def pop(self): theCommand = None", "print(\"Command not recognized: \" + command) except: pass super().Update(timestep) ##", "your own callbacks. def RegisterCommand(self, command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand(", "Start(self): self.__console.Start() super().Start() def Update(self, timestep): try: while self.__console.HasPending(): msg", "') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ## Pops the first", "an interactive console # on the server when run in", "access to the built-in server console and the various commands", "that provides for an interactive console # on the server", "0: print(\"There are no connections at this time.\") else: for", "the first item off the commands list and returns it.", "(args[0], a.helplong() )) print(\"%13s %s\" % (\" \", a.helpshort() ))", "return True return False ## Starts the console input. Don't", "text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit the server.\") def Start(self): self.__console.Start()", "<filename>davenetgame/dispatch/dispatcher.py<gh_stars>0 #!/usr/bin/env python3 ''' Copyright 2016 <NAME> Licensed under the", "while self.__console.HasPending(): msg = self.__console.pop() args = msg.split(\" \") command", "for the specific language governing permissions and limitations under the", "helpshort, helplong = helplong ) ) #@} ## This class", "@param 'command' : the name of the command, what the", "the server.\") def Start(self): self.__console.Start() super().Start() def Update(self, timestep): try:", "console commands with the server. The library implements a number", "None __callback = None __helpshort = None __helplong = None", "__consolecommands = None def __init__(self, **args): super().__init__(**args) self.__console = ConsoleInput()", "#@} ## This class implements console commands. To create a", "*args): self.__callback(*args) def command(self): return self.__command def helpshort(self): return self.__helpshort", "# @param 'command' : the name of the command, what", "be added as needed. You may put line endings to", "commands, but games may need their own commands. In that", "server. It may still take a few seconds or so.", "thread collisions __lock = None ## This is a queue", "types to use it. # @param 'callback' : a function", "threading.Thread.__init__(self, **args) self.__lock = threading.RLock() self.__pcommands = [] ## Call", "= None def __init__(self, **args): super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands", "the commands list and returns it. def pop(self): theCommand =", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "License. ''' import threading, time from davenetgame.dispatch.base import DispatcherBase from", "(a.command(), a.helplong() )) print(\"%13s %s\" % (\" \", a.helpshort() ))", "for an interactive console # on the server when run", "put line endings to # signify paragraph breaks, if need", "implied. See the License for the specific language governing permissions", "self.__console.Start() super().Start() def Update(self, timestep): try: while self.__console.HasPending(): msg =", "of commands, unparsed. __pcommands = None def __init__(self, **args): threading.Thread.__init__(self,", "%s\" % (\" \", a.helpshort() )) print() ## Console command:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "will be added as needed. You may put line endings", "to see its helpful text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit the", "**args): threading.Thread.__init__(self, **args) self.__lock = threading.RLock() self.__pcommands = [] ##", "## Returns true if there are pending lines from stdin", "= [] ## Call to start the client. def Start(self):", "__command = None __callback = None __helpshort = None __helplong", "# This file contains the standard, generic EventDispatcher class. It's", "added as needed. You may put line endings to #", "msg.split(\" \") command = args.pop(0) command = command.lower() # Ignore", "# though it's not unheard of for a server to", "else: print(\"Command not found.\") else: for a in self.__consolecommands: print(\"%10s", "run(self): while self.__continue: msg = input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() )", "threading, time from davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol import connection", "in a terminal. This is probably most useful for testing", "0: return True return False ## Starts the console input.", "is always lowercase self.__command = args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort", "methods give access to the built-in server console and the", "it. # @param 'callback' : a function that will process", "limitations under the License. ''' import threading, time from davenetgame.dispatch.base", "server to run in a terminal and have a console.", "to run in a terminal and have a console. class", "console command, simply make an instance of # this class,", "needed. Do not put # line endings, however. Those will", "commands, unparsed. __pcommands = None def __init__(self, **args): threading.Thread.__init__(self, **args)", "is the lock that must be called to avoid thread", "for a in self.__consolecommands: if a.command() == command: a.callback(*args) foundcommand", "on the server when run in a terminal. This is", "giving all the keyword arguments in the constructor. # @param", ")) print else: print(\"Command not found.\") else: for a in", "not foundcommand: print(\"Command not recognized: \" + command) except: pass", "off the commands list and returns it. def pop(self): theCommand", "self.__command = args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort = args['helpshort'] self.__helplong", "with the server. The library implements a number of standard", "your preferred game engine, or if you'd rather manage the", "print(\"Usage: show (connections)\") else: if args[0] == \"connections\": if len(self.GetConnections()", "self.__consolecommands = [] # Register the standard commands available to", "long as needed, as many lines as needed. Do not", "standard EventDispatcher. class EventDispatcher(DispatcherBase): pass ## This is a special", "%s\" % (args[0], a.helplong() )) print(\"%13s %s\" % (\" \",", "see its helpful text.\") self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit the server.\")", "with \"Usage: \" # @param 'helplong' : long help text,", "= None if len(self.__pcommands) > 0: self.__lock.acquire() theCommand = self.__pcommands.pop(0)", "msg = self.__console.pop() args = msg.split(\" \") command = args.pop(0)", "self.consoleQuit, \"quit\", \"Quit the server.\") def Start(self): self.__console.Start() super().Start() def", "## @file dispatcher # # This file contains the standard,", "super().Start() def Update(self, timestep): try: while self.__console.HasPending(): msg = self.__console.pop()", "__callback = None __helpshort = None __helplong = None def", "super().Update(timestep) ## @name Console API # # These methods give", "True self.start() ## Stops the server. It may still take", "under the License. ''' import threading, time from davenetgame.dispatch.base import", "in self.__consolecommands: if a.command() == command: a.callback(*args) foundcommand = True", "writing, software distributed under the License is distributed on an", "if len(self.__pcommands) > 0: return True return False ## Starts", "as many lines as needed. Do not put # line", "RegisterCommand(self, command, callback, helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command = command, callback", "command: quit def consoleQuit(self, *args): print(\"Quit signaled from console.\") self.Stop()", "\", a.helpshort() )) print() ## Console command: quit def consoleQuit(self,", "you'd rather manage the library # independently of your game", "line of text, preferably not more than 50 characters. #", "args[0] == \"connections\": if len(self.GetConnections() ) == 0: print(\"There are", "# the library doesn't support your preferred game engine, or", "in compliance with the License. You may obtain a copy", "print() ## Console command: quit def consoleQuit(self, *args): print(\"Quit signaled", "new console command, simply make an instance of # this", "if blocking: self.join() ## Returns true if there are pending", "this time.\") else: for a in self.GetConnections(): print(\"{0:3}: {1:40} {2:10}", ": long help text, can be as long as needed,", "if not foundcommand: print(\"Command not recognized: \" + command) except:", "API # # These methods give access to the built-in", "In output, it will be prepended with \"Usage: \" #", "agreed to in writing, software distributed under the License is", "show: \" + args[0]) ## Console command: help def consoleHelp(self,", "50 characters. # In output, it will be prepended with", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort = args['helpshort'] self.__helplong = args['helplong']", "many lines as needed. Do not put # line endings,", "down. def Stop(self, blocking=False): self.__continue = False if blocking: self.join()", ")) print(\"%13s %s\" % (\" \", a.helpshort() )) print() ##", "DispatcherBase from davenetgame.protocol import connection ## @file dispatcher # #", "# line endings, however. Those will be added as needed.", "of enter if command == '': continue foundcommand = False", "!= 1: print(\"Usage: show (connections)\") else: if args[0] == \"connections\":", "the one you use if # the library doesn't support", "self.__consolecommands.append(ConsoleCommand( command = command, callback = callback, helpshort = helpshort,", "that will process the command when the user types it.", "for a in self.__consolecommands: if a.command() == args[0]: print(\"%10s :", "simply make an instance of # this class, giving all", "will be prepended with \"Usage: \" # @param 'helplong' :", "# can be created. #@{ ## Console command: show def", "print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) )", "every game server. self.RegisterCommand('show', self.consoleShow, \"show (connections)\", \"Show whatever you", "Call to register console commands with the server. The library", "callback = callback, helpshort = helpshort, helplong = helplong )", "either express or implied. See the License for the specific", "commands. To create a new console command, simply make an", "is a special server-oriented EventDispatcher that provides for an interactive", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "a in self.__consolecommands: if a.command() == args[0]: print(\"%10s : %s\"", "threading.RLock() self.__pcommands = [] ## Call to start the client.", "console. class EventDispatcherServer(DispatcherBase): __console = None __consolecommands = None def", "helpshort, helplong): self.__consolecommands.append(ConsoleCommand( command = command, callback = callback, helpshort", "\"License\"); you may not use this file except in compliance", "preferably not more than 50 characters. # In output, it", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "+ command) except: pass super().Update(timestep) ## @name Console API #", "with def HasPending(self): if len(self.__pcommands) > 0: return True return", "use if # the library doesn't support your preferred game", "number of standard # commands, but games may need their", "as needed. Do not put # line endings, however. Those", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "License for the specific language governing permissions and limitations under", "engine. ## This is the standard EventDispatcher. class EventDispatcher(DispatcherBase): pass", "block until the server has shut down. def Stop(self, blocking=False):", "try: while self.__console.HasPending(): msg = self.__console.pop() args = msg.split(\" \")", "*args): print(\"Quit signaled from console.\") self.Stop() self.__console.Stop() ## Call to", ") ) else: print(\"Unknown thing to show: \" + args[0])", "def Start(self): self.__console.Start() super().Start() def Update(self, timestep): try: while self.__console.HasPending():", "enter if command == '': continue foundcommand = False for", "def __init__(self, **args): threading.Thread.__init__(self, **args) self.__lock = threading.RLock() self.__pcommands =", "available to every game server. self.RegisterCommand('show', self.consoleShow, \"show (connections)\", \"Show", "continue foundcommand = False for a in self.__consolecommands: if a.command()", "__helpshort = None __helplong = None def __init__(self, **args): #", "helpshort = helpshort, helplong = helplong ) ) #@} ##", "queue of commands, unparsed. __pcommands = None def __init__(self, **args):", "''' import threading, time from davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol", "# In output, it will be prepended with \"Usage: \"", "the library doesn't support your preferred game engine, or if", "python3 ''' Copyright 2016 <NAME> Licensed under the Apache License,", "= helpshort, helplong = helplong ) ) #@} ## This", "[command]\", \"print this helpful text. Alternately, type in a command", "as long as needed, as many lines as needed. Do", "command when the user types it. # @param 'helpshort' :", "governing permissions and limitations under the License. ''' import threading,", "a few seconds or so. If blocking is \"True\", then", "callback, helpshort = helpshort, helplong = helplong ) ) #@}", "is \"True\", then the call will # block until the", "None def __init__(self, **args): threading.Thread.__init__(self, **args) self.__lock = threading.RLock() self.__pcommands", "server-oriented EventDispatcher that provides for an interactive console # on", "support your preferred game engine, or if you'd rather manage", "call Start(). def run(self): while self.__continue: msg = input(': ')", "server. The library implements a number of standard # commands,", "# signify paragraph breaks, if need be. class ConsoleCommand(object): __command", "None __helpshort = None __helplong = None def __init__(self, **args):", "true if there are pending lines from stdin to work", "will need your own callbacks. def RegisterCommand(self, command, callback, helpshort,", "a in self.GetConnections(): print(\"{0:3}: {1:40} {2:10} {3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing()", "the lock that must be called to avoid thread collisions", "args.pop(0) command = command.lower() # Ignore simple presses of enter", "2016 <NAME> Licensed under the Apache License, Version 2.0 (the", "%s\" % (a.command(), a.helplong() )) print(\"%13s %s\" % (\" \",", "text, preferably not more than 50 characters. # In output,", "the command is always lowercase self.__command = args['command'].strip().lower() self.__callback =", "file contains the standard, generic EventDispatcher class. It's the one", "'helplong' : long help text, can be as long as", "except in compliance with the License. You may obtain a", "pass ## This is a special server-oriented EventDispatcher that provides", "may put line endings to # signify paragraph breaks, if", "HasPending(self): if len(self.__pcommands) > 0: return True return False ##", "are no connections at this time.\") else: for a in", "run in a terminal and have a console. class EventDispatcherServer(DispatcherBase):", "to work with def HasPending(self): if len(self.__pcommands) > 0: return", "= args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort = args['helpshort'] self.__helplong =", "ConsoleInput() self.__consolecommands = [] # Register the standard commands available", "of your game engine. ## This is the standard EventDispatcher.", "## Console command: help def consoleHelp(self, *args): if len(args) >", "self.consoleShow, \"show (connections)\", \"Show whatever you want to see.\") self.RegisterCommand('help',", "compliance with the License. You may obtain a copy of", "will # block until the server has shut down. def", "This is the standard EventDispatcher. class EventDispatcher(DispatcherBase): pass ## This", "helplong = helplong ) ) #@} ## This class implements", "signify paragraph breaks, if need be. class ConsoleCommand(object): __command =", "list and returns it. def pop(self): theCommand = None if", "\"print this helpful text. Alternately, type in a command to", "command(self): return self.__command def helpshort(self): return self.__helpshort def helplong(self): return", "Stop(self, blocking=False): self.__continue = False if blocking: self.join() ## Returns", "import DispatcherBase from davenetgame.protocol import connection ## @file dispatcher #", "recognized: \" + command) except: pass super().Update(timestep) ## @name Console", "command = command, callback = callback, helpshort = helpshort, helplong", "server has shut down. def Stop(self, blocking=False): self.__continue = False", ")) print() ## Console command: quit def consoleQuit(self, *args): print(\"Quit", "print(\"%13s %s\" % (\" \", a.helpshort() )) print() ## Console", "permissions and limitations under the License. ''' import threading, time", "a.helplong() )) print(\"%13s %s\" % (\" \", a.helpshort() )) print()", "@param 'helplong' : long help text, can be as long", "Starts the console input. Don't call this directly, instead call", "from console.\") self.Stop() self.__console.Stop() ## Call to register console commands", "console input non-blocking. class ConsoleInput(threading.Thread): ## This is the lock", "callback(self, *args): self.__callback(*args) def command(self): return self.__command def helpshort(self): return", "but games may need their own commands. In that case,", "To create a new console command, simply make an instance", "the user types it. # @param 'helpshort' : short help", "a.command() == command: a.callback(*args) foundcommand = True if not foundcommand:", "lines as needed. Do not put # line endings, however.", "needed, as many lines as needed. Do not put #", "characters. # In output, it will be prepended with \"Usage:", "# @param 'helpshort' : short help text, usually one line", "need be. class ConsoleCommand(object): __command = None __callback = None", "the server. It may still take a few seconds or", "foundcommand: print(\"Command not recognized: \" + command) except: pass super().Update(timestep)", "connections at this time.\") else: for a in self.GetConnections(): print(\"{0:3}:", "args['helpshort'] self.__helplong = args['helplong'] def callback(self, *args): self.__callback(*args) def command(self):", "## Console command: quit def consoleQuit(self, *args): print(\"Quit signaled from", "\"connections\": if len(self.GetConnections() ) == 0: print(\"There are no connections", "rather manage the library # independently of your game engine.", "in a terminal and have a console. class EventDispatcherServer(DispatcherBase): __console", "and have a console. class EventDispatcherServer(DispatcherBase): __console = None __consolecommands", "This file contains the standard, generic EventDispatcher class. It's the", "> 0: for a in self.__consolecommands: if a.command() == args[0]:", "You may put line endings to # signify paragraph breaks,", "<NAME> Licensed under the Apache License, Version 2.0 (the \"License\");", "# Register the standard commands available to every game server.", "This class makes the console input non-blocking. class ConsoleInput(threading.Thread): ##", "be. class ConsoleCommand(object): __command = None __callback = None __helpshort", "call this directly, instead call Start(). def run(self): while self.__continue:", "\" # @param 'helplong' : long help text, can be", "consoleShow(self, *args): if len(args) != 1: print(\"Usage: show (connections)\") else:", "built-in server console and the various commands that # can", "in self.__consolecommands: if a.command() == args[0]: print(\"%10s : %s\" %", "directly, instead call Start(). def run(self): while self.__continue: msg =", "output, it will be prepended with \"Usage: \" # @param", "lines from stdin to work with def HasPending(self): if len(self.__pcommands)", "input(': ') self.__lock.acquire() self.__pcommands.append(msg.strip() ) self.__lock.release() time.sleep(0.01) ## Pops the", "server when run in a terminal. This is probably most", "not recognized: \" + command) except: pass super().Update(timestep) ## @name", "Console command: quit def consoleQuit(self, *args): print(\"Quit signaled from console.\")", "may still take a few seconds or so. If blocking", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "of standard # commands, but games may need their own", "user types to use it. # @param 'callback' : a", "time from davenetgame.dispatch.base import DispatcherBase from davenetgame.protocol import connection ##", "self.__console.HasPending(): msg = self.__console.pop() args = msg.split(\" \") command =", "is a queue of commands, unparsed. __pcommands = None def", "> 0: return True return False ## Starts the console", "* 1000) ) ) else: print(\"Unknown thing to show: \"", "the name of the command, what the user types to", "the command, what the user types to use it. #", "the command when the user types it. # @param 'helpshort'", "commands with the server. The library implements a number of", "or so. If blocking is \"True\", then the call will", "to avoid thread collisions __lock = None ## This is", "else: print(\"Unknown thing to show: \" + args[0]) ## Console", "This is a special server-oriented EventDispatcher that provides for an", "\"Show whatever you want to see.\") self.RegisterCommand('help', self.consoleHelp, \"help [command]\",", "one line of text, preferably not more than 50 characters.", "use it. # @param 'callback' : a function that will", "= True if not foundcommand: print(\"Command not recognized: \" +", "if you'd rather manage the library # independently of your", "express or implied. See the License for the specific language", "% (\" \", a.helpshort() )) print() ## Console command: quit", "then the call will # block until the server has", "a.callback(*args) foundcommand = True if not foundcommand: print(\"Command not recognized:", "self.join() ## Returns true if there are pending lines from", "be created. #@{ ## Console command: show def consoleShow(self, *args):", "None def __init__(self, **args): # Ensure the command is always", "This is a queue of commands, unparsed. __pcommands = None", "self.RegisterCommand('show', self.consoleShow, \"show (connections)\", \"Show whatever you want to see.\")", "to see.\") self.RegisterCommand('help', self.consoleHelp, \"help [command]\", \"print this helpful text.", "= False for a in self.__consolecommands: if a.command() == command:", "class EventDispatcherServer(DispatcherBase): __console = None __consolecommands = None def __init__(self,", "'helpshort' : short help text, usually one line of text,", "still take a few seconds or so. If blocking is", "most useful for testing the library, # though it's not", "helplong ) ) #@} ## This class implements console commands.", "needed. You may put line endings to # signify paragraph", "your game engine. ## This is the standard EventDispatcher. class", "type in a command to see its helpful text.\") self.RegisterCommand('quit',", "independently of your game engine. ## This is the standard", "\" + command) except: pass super().Update(timestep) ## @name Console API", "## Console command: show def consoleShow(self, *args): if len(args) !=", "else: for a in self.__consolecommands: print(\"%10s : %s\" % (a.command(),", "0: for a in self.__consolecommands: if a.command() == args[0]: print(\"%10s", "server.\") def Start(self): self.__console.Start() super().Start() def Update(self, timestep): try: while", "standard # commands, but games may need their own commands.", "ConsoleInput(threading.Thread): ## This is the lock that must be called", "## This is the standard EventDispatcher. class EventDispatcher(DispatcherBase): pass ##", "of for a server to run in a terminal and", "__console = None __consolecommands = None def __init__(self, **args): super().__init__(**args)", "= msg.split(\" \") command = args.pop(0) command = command.lower() #", "implements a number of standard # commands, but games may", "def __init__(self, **args): # Ensure the command is always lowercase", "take a few seconds or so. If blocking is \"True\",", "library, # though it's not unheard of for a server", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "though it's not unheard of for a server to run", "'command' : the name of the command, what the user", "#!/usr/bin/env python3 ''' Copyright 2016 <NAME> Licensed under the Apache", "the library, # though it's not unheard of for a", "int(a.GetConnectionPing() * 1000) ) ) else: print(\"Unknown thing to show:", "davenetgame.protocol import connection ## @file dispatcher # # This file", "it. def pop(self): theCommand = None if len(self.__pcommands) > 0:", "a number of standard # commands, but games may need", "process the command when the user types it. # @param", "usually one line of text, preferably not more than 50", "console # on the server when run in a terminal.", "keyword arguments in the constructor. # @param 'command' : the", "whatever you want to see.\") self.RegisterCommand('help', self.consoleHelp, \"help [command]\", \"print", "if a.command() == args[0]: print(\"%10s : %s\" % (args[0], a.helplong()", "if len(self.GetConnections() ) == 0: print(\"There are no connections at", "with the License. You may obtain a copy of the", "no connections at this time.\") else: for a in self.GetConnections():", "def HasPending(self): if len(self.__pcommands) > 0: return True return False", "standard commands available to every game server. self.RegisterCommand('show', self.consoleShow, \"show", "commands available to every game server. self.RegisterCommand('show', self.consoleShow, \"show (connections)\",", "self.RegisterCommand('quit', self.consoleQuit, \"quit\", \"Quit the server.\") def Start(self): self.__console.Start() super().Start()", "super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands = [] # Register the", "## Call to register console commands with the server. The", ") ) #@} ## This class implements console commands. To", "The library implements a number of standard # commands, but", "a terminal. This is probably most useful for testing the", ") else: print(\"Unknown thing to show: \" + args[0]) ##", "It may still take a few seconds or so. If", "# on the server when run in a terminal. This", "presses of enter if command == '': continue foundcommand =", "name of the command, what the user types to use", "is probably most useful for testing the library, # though", "None def __init__(self, **args): super().__init__(**args) self.__console = ConsoleInput() self.__consolecommands =", "= None __helplong = None def __init__(self, **args): # Ensure", "% (args[0], a.helplong() )) print(\"%13s %s\" % (\" \", a.helpshort()", "want to see.\") self.RegisterCommand('help', self.consoleHelp, \"help [command]\", \"print this helpful", "self.__console.pop() args = msg.split(\" \") command = args.pop(0) command =", "= None __consolecommands = None def __init__(self, **args): super().__init__(**args) self.__console", "command, callback = callback, helpshort = helpshort, helplong = helplong", "''' Copyright 2016 <NAME> Licensed under the Apache License, Version", "## This class makes the console input non-blocking. class ConsoleInput(threading.Thread):", "self.__pcommands = [] ## Call to start the client. def", "applicable law or agreed to in writing, software distributed under", "*args): if len(args) > 0: for a in self.__consolecommands: if", "These methods give access to the built-in server console and", ": %s\" % (a.command(), a.helplong() )) print(\"%13s %s\" % (\"", "Returns true if there are pending lines from stdin to", "consoleQuit(self, *args): print(\"Quit signaled from console.\") self.Stop() self.__console.Stop() ## Call", "breaks, if need be. class ConsoleCommand(object): __command = None __callback", "If blocking is \"True\", then the call will # block", "self.__continue = False if blocking: self.join() ## Returns true if", "from stdin to work with def HasPending(self): if len(self.__pcommands) >", ") == 0: print(\"There are no connections at this time.\")", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "see.\") self.RegisterCommand('help', self.consoleHelp, \"help [command]\", \"print this helpful text. Alternately,", "the specific language governing permissions and limitations under the License.", "= callback, helpshort = helpshort, helplong = helplong ) )", "the standard EventDispatcher. class EventDispatcher(DispatcherBase): pass ## This is a", "command = args.pop(0) command = command.lower() # Ignore simple presses", "Update(self, timestep): try: while self.__console.HasPending(): msg = self.__console.pop() args =", "library # independently of your game engine. ## This is", "to the built-in server console and the various commands that", "{3:4}\".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) ) else: print(\"Unknown", "Register the standard commands available to every game server. self.RegisterCommand('show',", "can be as long as needed, as many lines as", "the console input non-blocking. class ConsoleInput(threading.Thread): ## This is the", "pop(self): theCommand = None if len(self.__pcommands) > 0: self.__lock.acquire() theCommand", "class EventDispatcher(DispatcherBase): pass ## This is a special server-oriented EventDispatcher", "It's the one you use if # the library doesn't", "returns it. def pop(self): theCommand = None if len(self.__pcommands) >", "start the client. def Start(self): self.__continue = True self.start() ##", "self.__command def helpshort(self): return self.__helpshort def helplong(self): return self.__helplong ##", "stdin to work with def HasPending(self): if len(self.__pcommands) > 0:", "@name Console API # # These methods give access to", "a in self.__consolecommands: if a.command() == command: a.callback(*args) foundcommand =", "False ## Starts the console input. Don't call this directly,", "show (connections)\") else: if args[0] == \"connections\": if len(self.GetConnections() )", "work with def HasPending(self): if len(self.__pcommands) > 0: return True", "or agreed to in writing, software distributed under the License", "their own commands. In that case, you will need your", "## Call to start the client. def Start(self): self.__continue =", "input. Don't call this directly, instead call Start(). def run(self):", "you want to see.\") self.RegisterCommand('help', self.consoleHelp, \"help [command]\", \"print this", "console commands. To create a new console command, simply make", "called to avoid thread collisions __lock = None ## This", "helplong(self): return self.__helplong ## This class makes the console input", "console and the various commands that # can be created.", "command = command.lower() # Ignore simple presses of enter if", "\"show (connections)\", \"Show whatever you want to see.\") self.RegisterCommand('help', self.consoleHelp,", "need their own commands. In that case, you will need", "various commands that # can be created. #@{ ## Console", "OF ANY KIND, either express or implied. See the License", "always lowercase self.__command = args['command'].strip().lower() self.__callback = args['callback'] self.__helpshort =", "time.sleep(0.01) ## Pops the first item off the commands list", "return False ## Starts the console input. Don't call this", "(\" \", a.helpshort() )) print else: print(\"Command not found.\") else:", "the standard, generic EventDispatcher class. It's the one you use", "and returns it. def pop(self): theCommand = None if len(self.__pcommands)", "be as long as needed, as many lines as needed.", "'': continue foundcommand = False for a in self.__consolecommands: if", "= self.__console.pop() args = msg.split(\" \") command = args.pop(0) command", "@param 'helpshort' : short help text, usually one line of", "License, Version 2.0 (the \"License\"); you may not use this", "the keyword arguments in the constructor. # @param 'command' :", "= None def __init__(self, **args): threading.Thread.__init__(self, **args) self.__lock = threading.RLock()", "a console. class EventDispatcherServer(DispatcherBase): __console = None __consolecommands = None", "self.Stop() self.__console.Stop() ## Call to register console commands with the", "Those will be added as needed. You may put line", "len(self.__pcommands) > 0: self.__lock.acquire() theCommand = self.__pcommands.pop(0) self.__lock.release() return theCommand", "and limitations under the License. ''' import threading, time from", "as needed, as many lines as needed. Do not put", "Start(). def run(self): while self.__continue: msg = input(': ') self.__lock.acquire()", "command is always lowercase self.__command = args['command'].strip().lower() self.__callback = args['callback']", "\", a.helpshort() )) print else: print(\"Command not found.\") else: for", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "give access to the built-in server console and the various", "# # This file contains the standard, generic EventDispatcher class.", "args[0]: print(\"%10s : %s\" % (args[0], a.helplong() )) print(\"%13s %s\"", "1: print(\"Usage: show (connections)\") else: if args[0] == \"connections\": if", "= threading.RLock() self.__pcommands = [] ## Call to start the", "self.RegisterCommand('help', self.consoleHelp, \"help [command]\", \"print this helpful text. Alternately, type", "show def consoleShow(self, *args): if len(args) != 1: print(\"Usage: show", "(connections)\") else: if args[0] == \"connections\": if len(self.GetConnections() ) ==", "License. You may obtain a copy of the License at", "useful for testing the library, # though it's not unheard", "command == '': continue foundcommand = False for a in", "if command == '': continue foundcommand = False for a", "#@{ ## Console command: show def consoleShow(self, *args): if len(args)", "This class implements console commands. To create a new console", "def callback(self, *args): self.__callback(*args) def command(self): return self.__command def helpshort(self):", "of # this class, giving all the keyword arguments in", "__pcommands = None def __init__(self, **args): threading.Thread.__init__(self, **args) self.__lock =", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "print(\"Unknown thing to show: \" + args[0]) ## Console command:", "class ConsoleCommand(object): __command = None __callback = None __helpshort =", "user types it. # @param 'helpshort' : short help text,", "own commands. In that case, you will need your own", "engine, or if you'd rather manage the library # independently", "None ## This is a queue of commands, unparsed. __pcommands", "provides for an interactive console # on the server when", "run in a terminal. This is probably most useful for" ]
[ "17:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('account', '0002_remove_customuser_full_name'), ]", "operations = [ migrations.AddField( model_name='customuser', name='phone_number', field=models.CharField(blank=True, max_length=500), ), ]", "= [ ('account', '0002_remove_customuser_full_name'), ] operations = [ migrations.AddField( model_name='customuser',", "'0002_remove_customuser_full_name'), ] operations = [ migrations.AddField( model_name='customuser', name='phone_number', field=models.CharField(blank=True, max_length=500),", "2019-11-17 17:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "class Migration(migrations.Migration): dependencies = [ ('account', '0002_remove_customuser_full_name'), ] operations =", "2.2.7 on 2019-11-17 17:19 from django.db import migrations, models class", "# Generated by Django 2.2.7 on 2019-11-17 17:19 from django.db", "Migration(migrations.Migration): dependencies = [ ('account', '0002_remove_customuser_full_name'), ] operations = [", "<filename>account/migrations/0003_customuser_phone_number.py # Generated by Django 2.2.7 on 2019-11-17 17:19 from", "by Django 2.2.7 on 2019-11-17 17:19 from django.db import migrations,", "dependencies = [ ('account', '0002_remove_customuser_full_name'), ] operations = [ migrations.AddField(", "Django 2.2.7 on 2019-11-17 17:19 from django.db import migrations, models", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "on 2019-11-17 17:19 from django.db import migrations, models class Migration(migrations.Migration):", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('account',", "models class Migration(migrations.Migration): dependencies = [ ('account', '0002_remove_customuser_full_name'), ] operations", "] operations = [ migrations.AddField( model_name='customuser', name='phone_number', field=models.CharField(blank=True, max_length=500), ),", "[ ('account', '0002_remove_customuser_full_name'), ] operations = [ migrations.AddField( model_name='customuser', name='phone_number',", "Generated by Django 2.2.7 on 2019-11-17 17:19 from django.db import", "('account', '0002_remove_customuser_full_name'), ] operations = [ migrations.AddField( model_name='customuser', name='phone_number', field=models.CharField(blank=True,", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('account', '0002_remove_customuser_full_name')," ]
[ "base = int(input('Digite o valor da base: ')) expoente =", "expoente tem que ser positivo') potencia = 1 for c", "1 for c in range(1, expoente + 1): potencia *=", "= 0 while expoente <= 0: expoente = int(input('Digite o", "expoente = int(input('Digite o valor do expoente: ')) if expoente", "for c in range(1, expoente + 1): potencia *= base", "')) expoente = 0 while expoente <= 0: expoente =", "valor do expoente: ')) if expoente <= 0: print('O expoente", "print('O expoente tem que ser positivo') potencia = 1 for", "0: expoente = int(input('Digite o valor do expoente: ')) if", "o valor da base: ')) expoente = 0 while expoente", "while expoente <= 0: expoente = int(input('Digite o valor do", "= int(input('Digite o valor da base: ')) expoente = 0", "if expoente <= 0: print('O expoente tem que ser positivo')", "int(input('Digite o valor do expoente: ')) if expoente <= 0:", "= 1 for c in range(1, expoente + 1): potencia", "0: print('O expoente tem que ser positivo') potencia = 1", "do expoente: ')) if expoente <= 0: print('O expoente tem", "tem que ser positivo') potencia = 1 for c in", "da base: ')) expoente = 0 while expoente <= 0:", "valor da base: ')) expoente = 0 while expoente <=", "base: ')) expoente = 0 while expoente <= 0: expoente", "')) if expoente <= 0: print('O expoente tem que ser", "expoente <= 0: print('O expoente tem que ser positivo') potencia", "positivo') potencia = 1 for c in range(1, expoente +", "c in range(1, expoente + 1): potencia *= base print(f'{base}^", "0 while expoente <= 0: expoente = int(input('Digite o valor", "<= 0: expoente = int(input('Digite o valor do expoente: '))", "expoente + 1): potencia *= base print(f'{base}^ {expoente} = {potencia}')", "expoente = 0 while expoente <= 0: expoente = int(input('Digite", "potencia = 1 for c in range(1, expoente + 1):", "= int(input('Digite o valor do expoente: ')) if expoente <=", "<reponame>gabrieldcpadilha/ListaDeExercicios-PythonBrasil<filename>03_Estrutura_de_Repeticao/13_potenciacao.py base = int(input('Digite o valor da base: ')) expoente", "<= 0: print('O expoente tem que ser positivo') potencia =", "range(1, expoente + 1): potencia *= base print(f'{base}^ {expoente} =", "int(input('Digite o valor da base: ')) expoente = 0 while", "expoente: ')) if expoente <= 0: print('O expoente tem que", "ser positivo') potencia = 1 for c in range(1, expoente", "in range(1, expoente + 1): potencia *= base print(f'{base}^ {expoente}", "que ser positivo') potencia = 1 for c in range(1,", "o valor do expoente: ')) if expoente <= 0: print('O", "expoente <= 0: expoente = int(input('Digite o valor do expoente:" ]
[ "frappe.utils import flt from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document):", "see license.txt from __future__ import unicode_literals import frappe from frappe.model.document", "self.total_debit == 0 : frappe.throw('Total Cannot be Zero') if not", "Copyright (c) 2021, <NAME> and contributors # For license information,", "= flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit) + flt(entry.credit) self.difference =", "def on_submit(self): for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): #", "utf-8 -*- # Copyright (c) 2021, <NAME> and contributors #", "JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw(\"The total debit and", "total debit and credit must be equal. The current difference", "The current difference is {}\".format(self.difference)) if self.total_credit == 0 or", "is {}\".format(self.difference)) if self.total_credit == 0 or self.total_debit == 0", "make_reverse_gl_entry class JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw(\"The total", "from frappe.model.document import Document from frappe.utils import flt from accounting.accounting.general_ledger", "validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw(\"The total debit and credit must", "else: self.title = self.accounts[0].account def on_submit(self): for entry in self.accounts:", "be Zero') if not self.accounts: frappe.throw('Account Entries are required') else:", "self.total_credit = flt(self.total_credit) + flt(entry.credit) self.difference = flt(self.total_debit) - (self.total_credit)", "must be equal. The current difference is {}\".format(self.difference)) if self.total_credit", "on_cancel(self): # cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference", "license.txt from __future__ import unicode_literals import frappe from frappe.model.document import", "{}\".format(self.difference)) if self.total_credit == 0 or self.total_debit == 0 :", "<NAME> and contributors # For license information, please see license.txt", "__future__ import unicode_literals import frappe from frappe.model.document import Document from", "if not self.accounts: frappe.throw('Account Entries are required') else: self.title =", "if self.difference: frappe.throw(\"The total debit and credit must be equal.", "in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name)", "please see license.txt from __future__ import unicode_literals import frappe from", "equal. The current difference is {}\".format(self.difference)) if self.total_credit == 0", "= 0,0,0 for entry in self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit)", "== 0 : frappe.throw('Total Cannot be Zero') if not self.accounts:", "be equal. The current difference is {}\".format(self.difference)) if self.total_credit ==", "unicode_literals import frappe from frappe.model.document import Document from frappe.utils import", "0,0,0 for entry in self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit", "-*- # Copyright (c) 2021, <NAME> and contributors # For", "For license information, please see license.txt from __future__ import unicode_literals", "self.title = self.accounts[0].account def on_submit(self): for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit)", "frappe.throw(\"The total debit and credit must be equal. The current", "self.total_credit,self.difference = 0,0,0 for entry in self.accounts: self.total_debit = flt(self.total_debit)", "def on_cancel(self): # cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit,", "Entries are required') else: self.title = self.accounts[0].account def on_submit(self): for", "self.difference: frappe.throw(\"The total debit and credit must be equal. The", ": frappe.throw('Total Cannot be Zero') if not self.accounts: frappe.throw('Account Entries", "0 : frappe.throw('Total Cannot be Zero') if not self.accounts: frappe.throw('Account", "# Copyright (c) 2021, <NAME> and contributors # For license", "for entry in self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit =", "def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0 for entry in self.accounts:", "if self.total_credit == 0 or self.total_debit == 0 : frappe.throw('Total", "-*- coding: utf-8 -*- # Copyright (c) 2021, <NAME> and", "and contributors # For license information, please see license.txt from", "self.total_credit == 0 or self.total_debit == 0 : frappe.throw('Total Cannot", "calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0 for entry in self.accounts: self.total_debit", "and credit must be equal. The current difference is {}\".format(self.difference))", "accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if", "gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0 for", "<gh_stars>1-10 # -*- coding: utf-8 -*- # Copyright (c) 2021,", "# For license information, please see license.txt from __future__ import", "contributors # For license information, please see license.txt from __future__", "make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0 for entry in", "from frappe.utils import flt from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class", "== 0 or self.total_debit == 0 : frappe.throw('Total Cannot be", "not self.accounts: frappe.throw('Account Entries are required') else: self.title = self.accounts[0].account", "self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit) + flt(entry.credit)", "Zero') if not self.accounts: frappe.throw('Account Entries are required') else: self.title", "0 or self.total_debit == 0 : frappe.throw('Total Cannot be Zero')", "import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if self.difference:", "are required') else: self.title = self.accounts[0].account def on_submit(self): for entry", "frappe.model.document import Document from frappe.utils import flt from accounting.accounting.general_ledger import", "make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self):", "entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0 for entry", "import unicode_literals import frappe from frappe.model.document import Document from frappe.utils", "make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw(\"The", "entry in self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit)", "flt from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def validate(self):", "import frappe from frappe.model.document import Document from frappe.utils import flt", "self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit) + flt(entry.credit) self.difference", "on_submit(self): for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel", "self.total_debit, self.total_credit,self.difference = 0,0,0 for entry in self.accounts: self.total_debit =", "self.accounts: frappe.throw('Account Entries are required') else: self.title = self.accounts[0].account def", "self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def", "flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit) + flt(entry.credit) self.difference = flt(self.total_debit)", "from __future__ import unicode_literals import frappe from frappe.model.document import Document", "coding: utf-8 -*- # Copyright (c) 2021, <NAME> and contributors", "cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference = 0,0,0", "= self.accounts[0].account def on_submit(self): for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def", "frappe from frappe.model.document import Document from frappe.utils import flt from", "calc_total_debit_credit(self) if self.difference: frappe.throw(\"The total debit and credit must be", "Cannot be Zero') if not self.accounts: frappe.throw('Account Entries are required')", "information, please see license.txt from __future__ import unicode_literals import frappe", "self.accounts[0].account def on_submit(self): for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self):", "required') else: self.title = self.accounts[0].account def on_submit(self): for entry in", "import Document from frappe.utils import flt from accounting.accounting.general_ledger import make_gl_entry,", "frappe.throw('Account Entries are required') else: self.title = self.accounts[0].account def on_submit(self):", "from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def validate(self): calc_total_debit_credit(self)", "in self.accounts: self.total_debit = flt(self.total_debit) +flt(entry.debit) self.total_credit = flt(self.total_credit) +", "+flt(entry.debit) self.total_credit = flt(self.total_credit) + flt(entry.credit) self.difference = flt(self.total_debit) -", "# -*- coding: utf-8 -*- # Copyright (c) 2021, <NAME>", "for entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel gl", "Document from frappe.utils import flt from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry", "# cancel gl entry make_reverse_gl_entry(self,self.doctype,self.name) def calc_total_debit_credit(self): self.total_debit, self.total_credit,self.difference =", "credit must be equal. The current difference is {}\".format(self.difference)) if", "difference is {}\".format(self.difference)) if self.total_credit == 0 or self.total_debit ==", "frappe.throw('Total Cannot be Zero') if not self.accounts: frappe.throw('Account Entries are", "or self.total_debit == 0 : frappe.throw('Total Cannot be Zero') if", "(c) 2021, <NAME> and contributors # For license information, please", "entry in self.accounts: make_gl_entry(self,entry.account,entry.debit,entry.credit) def on_cancel(self): # cancel gl entry", "import flt from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry class JournalEntry(Document): def", "def validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw(\"The total debit and credit", "current difference is {}\".format(self.difference)) if self.total_credit == 0 or self.total_debit", "license information, please see license.txt from __future__ import unicode_literals import", "2021, <NAME> and contributors # For license information, please see", "class JournalEntry(Document): def validate(self): calc_total_debit_credit(self) if self.difference: frappe.throw(\"The total debit", "debit and credit must be equal. The current difference is" ]
[ "models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20, decimal_places= 10) # 위의 2개", "10) # 위의 2개 타입으로 클래스 만들면 ok # link,", "만들면 ok # link, string-> CharField, data-> DecimalField # 보통", "question_text= models.CharField(max_length= 100) # column, datatype public_date= models.CharField(max_length= 100) votes=", "class Question(models.Model): # Table question_text= models.CharField(max_length= 100) # column, datatype", "ok # link, string-> CharField, data-> DecimalField # 보통 max_length=", "# 위의 2개 타입으로 클래스 만들면 ok # link, string->", "string-> CharField, data-> DecimalField # 보통 max_length= 100으로 함 class", "# link, string-> CharField, data-> DecimalField # 보통 max_length= 100으로", "DecimalField # 보통 max_length= 100으로 함 class Economics(models.Model): title= models.CharField(max_length=", "상속 class Question(models.Model): # Table question_text= models.CharField(max_length= 100) # column,", "column, datatype public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20, decimal_places= 10)", "# Create your models here. # 클래스의 기능: 상속 class", "CharField, data-> DecimalField # 보통 max_length= 100으로 함 class Economics(models.Model):", "here. # 클래스의 기능: 상속 class Question(models.Model): # Table question_text=", "models.CharField(max_length= 100) # column, datatype public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits=", "from django.db import models # Create your models here. #", "models here. # 클래스의 기능: 상속 class Question(models.Model): # Table", "decimal_places= 10) # 위의 2개 타입으로 클래스 만들면 ok #", "20, decimal_places= 10) # 위의 2개 타입으로 클래스 만들면 ok", "class Economics(models.Model): title= models.CharField(max_length= 100) href= models.CharField(max_length= 100) create_date= models.CharField(max_length=", "django.db import models # Create your models here. # 클래스의", "datatype public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20, decimal_places= 10) #", "2개 타입으로 클래스 만들면 ok # link, string-> CharField, data->", "100) votes= models.DecimalField(max_digits= 20, decimal_places= 10) # 위의 2개 타입으로", "100) # column, datatype public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20,", "<reponame>mmeooo/test_django<filename>polls/models.py from django.db import models # Create your models here.", "# 클래스의 기능: 상속 class Question(models.Model): # Table question_text= models.CharField(max_length=", "# Table question_text= models.CharField(max_length= 100) # column, datatype public_date= models.CharField(max_length=", "보통 max_length= 100으로 함 class Economics(models.Model): title= models.CharField(max_length= 100) href=", "import models # Create your models here. # 클래스의 기능:", "your models here. # 클래스의 기능: 상속 class Question(models.Model): #", "data-> DecimalField # 보통 max_length= 100으로 함 class Economics(models.Model): title=", "models.DecimalField(max_digits= 20, decimal_places= 10) # 위의 2개 타입으로 클래스 만들면", "# column, datatype public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20, decimal_places=", "타입으로 클래스 만들면 ok # link, string-> CharField, data-> DecimalField", "votes= models.DecimalField(max_digits= 20, decimal_places= 10) # 위의 2개 타입으로 클래스", "models # Create your models here. # 클래스의 기능: 상속", "max_length= 100으로 함 class Economics(models.Model): title= models.CharField(max_length= 100) href= models.CharField(max_length=", "클래스의 기능: 상속 class Question(models.Model): # Table question_text= models.CharField(max_length= 100)", "Question(models.Model): # Table question_text= models.CharField(max_length= 100) # column, datatype public_date=", "link, string-> CharField, data-> DecimalField # 보통 max_length= 100으로 함", "위의 2개 타입으로 클래스 만들면 ok # link, string-> CharField,", "Table question_text= models.CharField(max_length= 100) # column, datatype public_date= models.CharField(max_length= 100)", "# 보통 max_length= 100으로 함 class Economics(models.Model): title= models.CharField(max_length= 100)", "100으로 함 class Economics(models.Model): title= models.CharField(max_length= 100) href= models.CharField(max_length= 100)", "Economics(models.Model): title= models.CharField(max_length= 100) href= models.CharField(max_length= 100) create_date= models.CharField(max_length= 100)", "public_date= models.CharField(max_length= 100) votes= models.DecimalField(max_digits= 20, decimal_places= 10) # 위의", "Create your models here. # 클래스의 기능: 상속 class Question(models.Model):", "함 class Economics(models.Model): title= models.CharField(max_length= 100) href= models.CharField(max_length= 100) create_date=", "기능: 상속 class Question(models.Model): # Table question_text= models.CharField(max_length= 100) #", "클래스 만들면 ok # link, string-> CharField, data-> DecimalField #" ]
[ "b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3: print \"%s - %d,", "python # -*- coding: utf-8 -*- import sys,os,time if len(sys.argv)<2:", "len(sys.argv)<2: print \"usage: test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d')", "-*- coding: utf-8 -*- import sys,os,time if len(sys.argv)<2: print \"usage:", "coding: utf-8 -*- import sys,os,time if len(sys.argv)<2: print \"usage: test_snap.py", "kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not", "is a camera not used in HF system if kam_cmd=='show'", "(path, i) b=os.listdir(sub) if 'capture' in b: b.remove('capture') b.sort() sub2='%s/%s'", "or last_sub-int(b[-1])>3: print \"%s - %d, %s - %d, (%d)\"", "if kam_cmd=='show' or last_sub-int(b[-1])>3: print \"%s - %d, %s -", "last_sub=int(time.time()/600) for i in a: sub='%s/%s' % (path, i) b=os.listdir(sub)", "sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera", "% (i, len(b), b[-1], len(c), last_sub-int(b[-1])) else: print \"usage: test_snap.py", "not used in HF system if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600)", "print \"usage: test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') #", "in b: b.remove('capture') b.sort() sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2) if", "(sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3: print \"%s -", "in HF system if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for i", "b.remove('capture') b.sort() sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or", "in a: sub='%s/%s' % (path, i) b=os.listdir(sub) if 'capture' in", "or kam_cmd=='check': last_sub=int(time.time()/600) for i in a: sub='%s/%s' % (path,", "b.sort() sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3:", "c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3: print \"%s - %d, %s", "b=os.listdir(sub) if 'capture' in b: b.remove('capture') b.sort() sub2='%s/%s' % (sub,", "(i, len(b), b[-1], len(c), last_sub-int(b[-1])) else: print \"usage: test_snap.py <check|show>\"", "#!/usr/bin/env python # -*- coding: utf-8 -*- import sys,os,time if", "\"usage: test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this", "a: sub='%s/%s' % (path, i) b=os.listdir(sub) if 'capture' in b:", "camera not used in HF system if kam_cmd=='show' or kam_cmd=='check':", "print \"%s - %d, %s - %d, (%d)\" % (i,", "i in a: sub='%s/%s' % (path, i) b=os.listdir(sub) if 'capture'", "HF system if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for i in", "utf-8 -*- import sys,os,time if len(sys.argv)<2: print \"usage: test_snap.py <check|show>\"", "% (sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3: print \"%s", "- %d, (%d)\" % (i, len(b), b[-1], len(c), last_sub-int(b[-1])) else:", "# this is a camera not used in HF system", "b: b.remove('capture') b.sort() sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show'", "i) b=os.listdir(sub) if 'capture' in b: b.remove('capture') b.sort() sub2='%s/%s' %", "kam_cmd=='check': last_sub=int(time.time()/600) for i in a: sub='%s/%s' % (path, i)", "a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used in HF", "import sys,os,time if len(sys.argv)<2: print \"usage: test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1]", "system if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for i in a:", "sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2) if kam_cmd=='show' or last_sub-int(b[-1])>3: print", "%s - %d, (%d)\" % (i, len(b), b[-1], len(c), last_sub-int(b[-1]))", "this is a camera not used in HF system if", "- %d, %s - %d, (%d)\" % (i, len(b), b[-1],", "% (path, i) b=os.listdir(sub) if 'capture' in b: b.remove('capture') b.sort()", "a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used in", "for i in a: sub='%s/%s' % (path, i) b=os.listdir(sub) if", "last_sub-int(b[-1])>3: print \"%s - %d, %s - %d, (%d)\" %", "a camera not used in HF system if kam_cmd=='show' or", "sub='%s/%s' % (path, i) b=os.listdir(sub) if 'capture' in b: b.remove('capture')", "if 'capture' in b: b.remove('capture') b.sort() sub2='%s/%s' % (sub, b[-1])", "kam_cmd=='show' or last_sub-int(b[-1])>3: print \"%s - %d, %s - %d,", "\"%s - %d, %s - %d, (%d)\" % (i, len(b),", "%d, %s - %d, (%d)\" % (i, len(b), b[-1], len(c),", "used in HF system if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for", "kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for i in a: sub='%s/%s' %", "(%d)\" % (i, len(b), b[-1], len(c), last_sub-int(b[-1])) else: print \"usage:", "if kam_cmd=='show' or kam_cmd=='check': last_sub=int(time.time()/600) for i in a: sub='%s/%s'", "if len(sys.argv)<2: print \"usage: test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path)", "len(b), b[-1], len(c), last_sub-int(b[-1])) else: print \"usage: test_snap.py <check|show>\" sys.exit(2)", "path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used", "test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is", "'capture' in b: b.remove('capture') b.sort() sub2='%s/%s' % (sub, b[-1]) c=os.listdir(sub2)", "-*- import sys,os,time if len(sys.argv)<2: print \"usage: test_snap.py <check|show>\" sys.exit(2)", "%d, (%d)\" % (i, len(b), b[-1], len(c), last_sub-int(b[-1])) else: print", "<check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store' a=os.listdir(path) a.remove('535e1a5c1ecffb2fa372fd7d') # this is a", "sys,os,time if len(sys.argv)<2: print \"usage: test_snap.py <check|show>\" sys.exit(2) kam_cmd=sys.argv[1] path='/var/data2/snap_store'", "# -*- coding: utf-8 -*- import sys,os,time if len(sys.argv)<2: print" ]
[ "classification y\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property", "def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): if not self.within_context:", "*args, **kwargs) return value return wrapper_decorator class ClientDataset(ABC): def __init__(self,", "arrays containing the samples x, and classification y\"\"\" self._train_data =", "False self._train_data = None self._test_data = None self._validation_data = None", "self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context def training_data_x(self):", "Data as array\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1]", "being fed to the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch):", "usage of ClientDataset.dataset_x inside a \"with statement\". \"\"\") else: value", "= client_dataset_processor self._train_data = None self._test_data = None self._validation_data =", "client_dataset_processor self._train_data = None self._test_data = None self._validation_data = None", "a \"with statement\". \"\"\") else: value = func(self, *args, **kwargs)", "the samples x, and classification y\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data,", "memory. Please wrap the usage of ClientDataset.dataset_x inside a \"with", "the samples x, and classification y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST)", "return self._train_data @property @throw_error_outside_context def training_data_x(self): \"\"\"Returns the Training Data", "training_data_x(self): \"\"\"Returns the Training Data as an array of samples\"\"\"", "as pair of arrays containing the samples x, and classification", "classification y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context", "str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier = client_identifier self.client_dataset_loader", "return self._validation_data @property @throw_error_outside_context def validation_data_x(self): \"\"\"Returns the Validation Data", "__init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier =", "Tried to access client Dataset outside of context manager. This", "= self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self):", "return self._train_data[1] @property @throw_error_outside_context def test_data(self): \"\"\"Returns the Training Data", "def test_data(self): \"\"\"Returns the Training Data as pair of arrays", "array of samples\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0]", "def validation_data_y(self): \"\"\"Returns the Classifications for the Validation Data as", "DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self): self.within_context = True def __exit__(self,", "process_x(self, raw_x_batch): \"\"\"Pre-processes each batch of features before being fed", "self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return data @property @throw_error_outside_context", "raw_x_batch): \"\"\"Pre-processes each batch of features before being fed to", "self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context def training_data_x(self): \"\"\"Returns the", "def training_data_x(self): \"\"\"Returns the Training Data as an array of", "classification y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context", "DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context def test_data(self): \"\"\"Returns the Training", "Classifications for the Training Data as array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data,", "None self.within_context = False def process_x(self, raw_x_batch): \"\"\"Pre-processes each batch", "sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions", "statement\". \"\"\") else: value = func(self, *args, **kwargs) return value", "Training Data as an array of samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data,", "\"\"\"Returns the Validation Data as pair of arrays containing the", "@throw_error_outside_context def test_data_y(self): \"\"\"Returns the Classifications for the Test Data", "of samples\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property", "ClientDatasetProcessor, ): self.client_identifier = client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor =", "of features before being fed to the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch)", "Data as array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property", "\"\"\"Returns the Training Data as an array of samples\"\"\" self._train_data", "as an array of samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return", "class ClientDataset(ABC): def __init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor,", "self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component: DatasetComponents): if data is None:", "import ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def", "False def process_x(self, raw_x_batch): \"\"\"Pre-processes each batch of features before", "self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context def validation_data(self):", "= self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context def training_data_x(self): \"\"\"Returns", "Validation Data as array\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return", "= False self._train_data = None self._test_data = None self._validation_data =", "Classifications for the Test Data as array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data,", "training_data_y(self): \"\"\"Returns the Classifications for the Training Data as array\"\"\"", "= self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return data @property", "None self._validation_data = None self.within_context = False def process_x(self, raw_x_batch):", "client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data = None", "ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier = client_identifier self.client_dataset_loader = client_dataset_loader", "the Classifications for the Training Data as array\"\"\" self._train_data =", "Training Data as array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1]", "arrays containing the samples x, and classification y\"\"\" self._test_data =", "to data leaks and bad use of memory. Please wrap", "return wrapper_decorator class ClientDataset(ABC): def __init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader,", "model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component: DatasetComponents): if data", "x, and classification y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data", "return self._validation_data[1] def __enter__(self): self.within_context = True def __exit__(self, exc_type,", "@property @throw_error_outside_context def validation_data_y(self): \"\"\"Returns the Classifications for the Validation", "test_data_x(self): \"\"\"Returns the Test Data as an array of samples\"\"\"", "DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context def training_data_x(self): \"\"\"Returns the Training", "*args, **kwargs): if not self.within_context: raise OutsideOfContextError( \"\"\"Error: Tried to", "the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component: DatasetComponents): if", "to the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component: DatasetComponents):", "Test Data as array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1]", "\"\"\"Returns the Test Data as an array of samples\"\"\" self._test_data", "import gc from abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader,", "samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context def", "self._test_data @property @throw_error_outside_context def test_data_x(self): \"\"\"Returns the Test Data as", "ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError", "DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context def training_data_y(self): \"\"\"Returns the Classifications", "\"\"\"Pre-processes each batch of labels before being fed to the", "@throw_error_outside_context def validation_data_x(self): \"\"\"Returns the Validation Data as an array", "if not self.within_context: raise OutsideOfContextError( \"\"\"Error: Tried to access client", "@property @throw_error_outside_context def test_data_x(self): \"\"\"Returns the Test Data as an", "ClientDataset(ABC): def __init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ):", "import functools import gc from abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader", "value = func(self, *args, **kwargs) return value return wrapper_decorator class", "abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor", "_lazy_initialise_data(self, data, dataset_component: DatasetComponents): if data is None: data =", "= func(self, *args, **kwargs) return value return wrapper_decorator class ClientDataset(ABC):", "of arrays containing the samples x, and classification y\"\"\" self._validation_data", "@property @throw_error_outside_context def test_data_y(self): \"\"\"Returns the Classifications for the Test", "gc from abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents", "the Training Data as pair of arrays containing the samples", "client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier = client_identifier self.client_dataset_loader =", "\"with statement\". \"\"\") else: value = func(self, *args, **kwargs) return", "self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context def training_data_y(self): \"\"\"Returns the", "@property @throw_error_outside_context def training_data(self): \"\"\"Returns the Training Data as pair", "@throw_error_outside_context def training_data_y(self): \"\"\"Returns the Classifications for the Training Data", "validation_data_y(self): \"\"\"Returns the Classifications for the Validation Data as array\"\"\"", "def training_data_y(self): \"\"\"Returns the Classifications for the Training Data as", "self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self): \"\"\"Returns the", "return self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self): \"\"\"Returns the Classifications for", "batch of features before being fed to the model.\"\"\" return", "Training Data as pair of arrays containing the samples x,", "self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context def test_data_x(self):", "self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self): \"\"\"Returns", "and classification y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property", "wrapper_decorator class ClientDataset(ABC): def __init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor:", "**kwargs): if not self.within_context: raise OutsideOfContextError( \"\"\"Error: Tried to access", "def process_x(self, raw_x_batch): \"\"\"Pre-processes each batch of features before being", "the Validation Data as array\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION)", "of arrays containing the samples x, and classification y\"\"\" self._test_data", "\"\"\"Pre-processes each batch of features before being fed to the", "batch of labels before being fed to the model.\"\"\" return", "sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs):", "of arrays containing the samples x, and classification y\"\"\" self._train_data", "return self._test_data[1] @property @throw_error_outside_context def validation_data(self): \"\"\"Returns the Validation Data", "each batch of labels before being fed to the model.\"\"\"", "containing the samples x, and classification y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data,", "def process_y(self, raw_y_batch): \"\"\"Pre-processes each batch of labels before being", "array\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self):", "= None self._test_data = None self._validation_data = None self.within_context =", "from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func):", "as an array of samples\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION)", "\"\"\"Returns the Classifications for the Training Data as array\"\"\" self._train_data", "DatasetComponents): if data is None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return", "labels before being fed to the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def", "else: return data @property @throw_error_outside_context def training_data(self): \"\"\"Returns the Training", "def __exit__(self, exc_type, exc_value, exc_traceback): self.within_context = False self._train_data =", "__exit__(self, exc_type, exc_value, exc_traceback): self.within_context = False self._train_data = None", "Data as an array of samples\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data,", "before being fed to the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self,", "return data @property @throw_error_outside_context def training_data(self): \"\"\"Returns the Training Data", "self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context def", "wrapper_decorator(self, *args, **kwargs): if not self.within_context: raise OutsideOfContextError( \"\"\"Error: Tried", "array of samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property", "fed to the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): \"\"\"Pre-processes", "an array of samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0]", "self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self): self.within_context = True def", "validation_data(self): \"\"\"Returns the Validation Data as pair of arrays containing", "= self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context def training_data_y(self): \"\"\"Returns", "@throw_error_outside_context def training_data_x(self): \"\"\"Returns the Training Data as an array", "and classification y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property", "def training_data(self): \"\"\"Returns the Training Data as pair of arrays", "@throw_error_outside_context def validation_data(self): \"\"\"Returns the Validation Data as pair of", "\"\"\"Error: Tried to access client Dataset outside of context manager.", "might lead to data leaks and bad use of memory.", "__enter__(self): self.within_context = True def __exit__(self, exc_type, exc_value, exc_traceback): self.within_context", "= self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self): self.within_context =", "data @property @throw_error_outside_context def training_data(self): \"\"\"Returns the Training Data as", "data, dataset_component: DatasetComponents): if data is None: data = self.client_dataset_loader.load_dataset(self.client_identifier,", "self._test_data[1] @property @throw_error_outside_context def validation_data(self): \"\"\"Returns the Validation Data as", "raise OutsideOfContextError( \"\"\"Error: Tried to access client Dataset outside of", "x, and classification y\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return", "def __enter__(self): self.within_context = True def __exit__(self, exc_type, exc_value, exc_traceback):", "and bad use of memory. Please wrap the usage of", "return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): \"\"\"Pre-processes each batch of labels", "from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from", "Dataset outside of context manager. This might lead to data", "import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): if", "self.client_identifier = client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data", "\"\"\"Returns the Validation Data as an array of samples\"\"\" self._validation_data", "@functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): if not self.within_context: raise OutsideOfContextError(", "Validation Data as an array of samples\"\"\" self._validation_data = self._lazy_initialise_data(", "self._train_data = None self._test_data = None self._validation_data = None self.within_context", "@throw_error_outside_context def test_data(self): \"\"\"Returns the Training Data as pair of", "the Training Data as array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return", "\"\"\") else: value = func(self, *args, **kwargs) return value return", "): self.client_identifier = client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor = client_dataset_processor", "containing the samples x, and classification y\"\"\" self._validation_data = self._lazy_initialise_data(", "@throw_error_outside_context def validation_data_y(self): \"\"\"Returns the Classifications for the Validation Data", "client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier = client_identifier", "an array of samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0]", "each batch of features before being fed to the model.\"\"\"", "This might lead to data leaks and bad use of", "self._train_data[0] @property @throw_error_outside_context def training_data_y(self): \"\"\"Returns the Classifications for the", "training_data(self): \"\"\"Returns the Training Data as pair of arrays containing", "= None self.within_context = False def process_x(self, raw_x_batch): \"\"\"Pre-processes each", "arrays containing the samples x, and classification y\"\"\" self._validation_data =", "= self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context def validation_data(self): \"\"\"Returns", "array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context def", "@throw_error_outside_context def test_data_x(self): \"\"\"Returns the Test Data as an array", "functools import gc from abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import", "return self._test_data[0] @property @throw_error_outside_context def test_data_y(self): \"\"\"Returns the Classifications for", "else: value = func(self, *args, **kwargs) return value return wrapper_decorator", "inside a \"with statement\". \"\"\") else: value = func(self, *args,", "raw_y_batch): \"\"\"Pre-processes each batch of labels before being fed to", "if data is None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]),", "the Training Data as an array of samples\"\"\" self._train_data =", "@property @throw_error_outside_context def training_data_y(self): \"\"\"Returns the Classifications for the Training", "self._train_data @property @throw_error_outside_context def training_data_x(self): \"\"\"Returns the Training Data as", "from sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args,", "samples x, and classification y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return", "= self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context def validation_data_x(self):", "\"\"\"Returns the Training Data as pair of arrays containing the", "samples\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context", "self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context def", "outside of context manager. This might lead to data leaks", "@property @throw_error_outside_context def validation_data_x(self): \"\"\"Returns the Validation Data as an", "Data as an array of samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN)", "validation_data_x(self): \"\"\"Returns the Validation Data as an array of samples\"\"\"", "dataset_component: DatasetComponents): if data is None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component)", "samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context def", "as array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context", "samples x, and classification y\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION)", "containing the samples x, and classification y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data,", "exc_traceback): self.within_context = False self._train_data = None self._test_data = None", "self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self): \"\"\"Returns the Classifications for the", "@property @throw_error_outside_context def validation_data(self): \"\"\"Returns the Validation Data as pair", "\"\"\"Returns the Classifications for the Validation Data as array\"\"\" self._validation_data", "as array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context", "the Validation Data as an array of samples\"\"\" self._validation_data =", "Data as pair of arrays containing the samples x, and", "= False def process_x(self, raw_x_batch): \"\"\"Pre-processes each batch of features", "@property @throw_error_outside_context def test_data(self): \"\"\"Returns the Training Data as pair", "x, and classification y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data", "Validation Data as pair of arrays containing the samples x,", "ClientDataset.dataset_x inside a \"with statement\". \"\"\") else: value = func(self,", "the Classifications for the Test Data as array\"\"\" self._test_data =", "self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context def test_data_y(self):", "of memory. Please wrap the usage of ClientDataset.dataset_x inside a", "self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context def validation_data_x(self): \"\"\"Returns the", "DatasetComponents.VALIDATION) return self._validation_data[0] @property @throw_error_outside_context def validation_data_y(self): \"\"\"Returns the Classifications", "fed to the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component:", "data leaks and bad use of memory. Please wrap the", "= self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context def test_data_x(self): \"\"\"Returns", "exc_value, exc_traceback): self.within_context = False self._train_data = None self._test_data =", "return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data, dataset_component: DatasetComponents): if data is", "return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return data @property @throw_error_outside_context def training_data(self):", "ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor", "of labels before being fed to the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch)", "self.within_context = True def __exit__(self, exc_type, exc_value, exc_traceback): self.within_context =", "y\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context", "access client Dataset outside of context manager. This might lead", "not self.within_context: raise OutsideOfContextError( \"\"\"Error: Tried to access client Dataset", "DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context def test_data_y(self): \"\"\"Returns the Classifications", "test_data(self): \"\"\"Returns the Training Data as pair of arrays containing", "client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data = None self._test_data = None", "array of samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property", "return value return wrapper_decorator class ClientDataset(ABC): def __init__(self, client_identifier: str,", "DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context def test_data_x(self): \"\"\"Returns the Test", "self.client_dataset_loader = client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data = None self._test_data", "DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context def validation_data(self): \"\"\"Returns the Validation", "DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError def", "None self._test_data = None self._validation_data = None self.within_context = False", "use of memory. Please wrap the usage of ClientDataset.dataset_x inside", "the samples x, and classification y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN)", "self._train_data[1] @property @throw_error_outside_context def test_data(self): \"\"\"Returns the Training Data as", "self.client_dataset_processor = client_dataset_processor self._train_data = None self._test_data = None self._validation_data", "Please wrap the usage of ClientDataset.dataset_x inside a \"with statement\".", "import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions import", "client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier = client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor", "an array of samples\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return", "@throw_error_outside_context def training_data(self): \"\"\"Returns the Training Data as pair of", "self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context def test_data_x(self): \"\"\"Returns the", "func(self, *args, **kwargs) return value return wrapper_decorator class ClientDataset(ABC): def", "= None self._validation_data = None self.within_context = False def process_x(self,", "None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return", "True def __exit__(self, exc_type, exc_value, exc_traceback): self.within_context = False self._train_data", "= self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context def test_data(self): \"\"\"Returns", "import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import", "self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): \"\"\"Pre-processes each batch of labels before", "process_y(self, raw_y_batch): \"\"\"Pre-processes each batch of labels before being fed", "the Validation Data as pair of arrays containing the samples", "the Test Data as array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return", "def _lazy_initialise_data(self, data, dataset_component: DatasetComponents): if data is None: data", "self._test_data[0] @property @throw_error_outside_context def test_data_y(self): \"\"\"Returns the Classifications for the", "Data as an array of samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST)", "and classification y\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data", "throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): if not self.within_context: raise", "client Dataset outside of context manager. This might lead to", "self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property @throw_error_outside_context def validation_data(self): \"\"\"Returns the", "self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context def training_data_y(self):", "self._test_data = None self._validation_data = None self.within_context = False def", "exc_type, exc_value, exc_traceback): self.within_context = False self._train_data = None self._test_data", "to access client Dataset outside of context manager. This might", "for the Training Data as array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN)", "to the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): \"\"\"Pre-processes each", "the Test Data as an array of samples\"\"\" self._test_data =", "data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return data", "self._validation_data @property @throw_error_outside_context def validation_data_x(self): \"\"\"Returns the Validation Data as", "manager. This might lead to data leaks and bad use", "context manager. This might lead to data leaks and bad", "dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return data @property @throw_error_outside_context def", "= client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data = None self._test_data =", "self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else: return data @property @throw_error_outside_context def training_data(self): \"\"\"Returns", "self._train_data = None self._test_data = None self._validation_data = None gc.collect()", "**kwargs) return value return wrapper_decorator class ClientDataset(ABC): def __init__(self, client_identifier:", "def test_data_x(self): \"\"\"Returns the Test Data as an array of", "is None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"]) else:", "the Classifications for the Validation Data as array\"\"\" self._validation_data =", "lead to data leaks and bad use of memory. Please", "self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context def test_data(self): \"\"\"Returns the", "return self._test_data @property @throw_error_outside_context def test_data_x(self): \"\"\"Returns the Test Data", "y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data @property @throw_error_outside_context def", "self._validation_data[1] def __enter__(self): self.within_context = True def __exit__(self, exc_type, exc_value,", "def validation_data_x(self): \"\"\"Returns the Validation Data as an array of", "leaks and bad use of memory. Please wrap the usage", "return self._train_data[0] @property @throw_error_outside_context def training_data_y(self): \"\"\"Returns the Classifications for", "self._validation_data = None self.within_context = False def process_x(self, raw_x_batch): \"\"\"Pre-processes", "of samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context", "wrap the usage of ClientDataset.dataset_x inside a \"with statement\". \"\"\")", "self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self): self.within_context = True", "= True def __exit__(self, exc_type, exc_value, exc_traceback): self.within_context = False", "test_data_y(self): \"\"\"Returns the Classifications for the Test Data as array\"\"\"", "def test_data_y(self): \"\"\"Returns the Classifications for the Test Data as", "def __init__(self, client_identifier: str, client_dataset_loader: ClientDatasetLoader, client_dataset_processor: ClientDatasetProcessor, ): self.client_identifier", "of ClientDataset.dataset_x inside a \"with statement\". \"\"\") else: value =", "DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context def validation_data_x(self): \"\"\"Returns the Validation", "def validation_data(self): \"\"\"Returns the Validation Data as pair of arrays", "the usage of ClientDataset.dataset_x inside a \"with statement\". \"\"\") else:", "def wrapper_decorator(self, *args, **kwargs): if not self.within_context: raise OutsideOfContextError( \"\"\"Error:", "= self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context def test_data_y(self): \"\"\"Returns", "before being fed to the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self,", "y\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data @property @throw_error_outside_context def", "data is None: data = self.client_dataset_loader.load_dataset(self.client_identifier, dataset_component) return self.process_x(data[\"x\"]), self.process_y(data[\"y\"])", "sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func)", "for the Test Data as array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST)", "ClientDatasetProcessor from sources.utils.exception_definitions import OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self,", "as an array of samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return", "Classifications for the Validation Data as array\"\"\" self._validation_data = self._lazy_initialise_data(", "being fed to the model.\"\"\" return self.client_dataset_processor.process_y(raw_y_batch) def _lazy_initialise_data(self, data,", "OutsideOfContextError( \"\"\"Error: Tried to access client Dataset outside of context", "features before being fed to the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def", "of context manager. This might lead to data leaks and", "= client_identifier self.client_dataset_loader = client_dataset_loader self.client_dataset_processor = client_dataset_processor self._train_data =", "OutsideOfContextError def throw_error_outside_context(func): @functools.wraps(func) def wrapper_decorator(self, *args, **kwargs): if not", "\"\"\"Returns the Classifications for the Test Data as array\"\"\" self._test_data", "of samples\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0] @property @throw_error_outside_context", "as array\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def", "Test Data as an array of samples\"\"\" self._test_data = self._lazy_initialise_data(self._test_data,", "self.process_y(data[\"y\"]) else: return data @property @throw_error_outside_context def training_data(self): \"\"\"Returns the", "samples x, and classification y\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return", "model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): \"\"\"Pre-processes each batch of", "value return wrapper_decorator class ClientDataset(ABC): def __init__(self, client_identifier: str, client_dataset_loader:", "for the Validation Data as array\"\"\" self._validation_data = self._lazy_initialise_data( self._validation_data,", "self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data @property @throw_error_outside_context def validation_data_x(self): \"\"\"Returns", "from abc import ABC from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents from", "array\"\"\" self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context def", "the model.\"\"\" return self.client_dataset_processor.process_x(raw_x_batch) def process_y(self, raw_y_batch): \"\"\"Pre-processes each batch", "self.within_context: raise OutsideOfContextError( \"\"\"Error: Tried to access client Dataset outside", "bad use of memory. Please wrap the usage of ClientDataset.dataset_x", "self.within_context = False def process_x(self, raw_x_batch): \"\"\"Pre-processes each batch of", "self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0] @property @throw_error_outside_context def test_data_y(self): \"\"\"Returns the", "pair of arrays containing the samples x, and classification y\"\"\"", "Data as array\"\"\" self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1] @property", "self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1] @property @throw_error_outside_context def test_data(self):", "self.within_context = False self._train_data = None self._test_data = None self._validation_data", "self._validation_data = self._lazy_initialise_data( self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1] def __enter__(self): self.within_context", "@property @throw_error_outside_context def training_data_x(self): \"\"\"Returns the Training Data as an" ]
[ "not find frame '{0}' in model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None", "robot kinematics. Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose between", "return None ee = kinematics.robotGeometry.framesModel.framesByName[ framename ] if not kinematics.framesConnectivity.hasRelativePose(ee,", "self.robotGeometry = geometry self.jointPoses = jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base", "framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find frame '{0}' in", "= geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity", "framename): if framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find frame", "kinematics. Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose between two", "not seem to be connected\".format(framename)) return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee,", "kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}' and the base frame do not", "By merging the two, this class have access to the", ") return H def serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model {modelname}", "to the full robot kinematics. Thanks to gr.motions.ConnectedFramesInspector, an arbitrary", "def __init__(self, geometry, jointPoses): self.robotGeometry = geometry self.jointPoses = jointPoses", "ostream.write('\\n') for cp in robotKinematics.robotGeometry.byPose.values() : text = motdsl.poseSpecToMotionDSLSnippet( cp", "as mxrepr import motiondsl.motiondsl as motdsl logger = logging.getLogger(__name__) class", "be connected\".format(framename)) return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr =", "= kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q", "find frame '{0}' in model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None ee", "connected\".format(framename)) return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec)", "to be connected\".format(framename)) return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr", "model. By merging the two, this class have access to", "logger = logging.getLogger(__name__) class RobotKinematics: '''The composition of the constant", "do not seem to be connected\".format(framename)) return None poseSpec =", "jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\\n')", "This class is a simple aggregation of the geometry model", "geometry model and the joint-poses model. By merging the two,", "mxrepr import motiondsl.motiondsl as motdsl logger = logging.getLogger(__name__) class RobotKinematics:", "header =''' Model {modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for", "ee = kinematics.robotGeometry.framesModel.framesByName[ framename ] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame", "geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if", "] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def", "'{0}' and the base frame do not seem to be", "frames on the robot can be obtained. ''' def __init__(self,", "joint-poses model. By merging the two, this class have access", "len(H.variables) ) H = H.setVariablesValue( valueslist=q ) return H def", "aggregation of the geometry model and the joint-poses model. By", "robot. This class is a simple aggregation of the geometry", "cp in robotKinematics.robotGeometry.byPose.values() : text = motdsl.poseSpecToMotionDSLSnippet( cp ) ostream.write(text)", "H.setVariablesValue( valueslist=q ) return H def serializeToMotionDSLModel(robotKinematics, ostream): header ='''", "'{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None ee = kinematics.robotGeometry.framesModel.framesByName[ framename ] if", "kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl as motdsl logger = logging.getLogger(__name__)", "not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}' and the base frame do", "import kgprim.motions as motions import kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr", "model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None ee = kinematics.robotGeometry.framesModel.framesByName[ framename ]", "poses and the joint poses of a robot. This class", "arbitrary relative pose between two frames on the robot can", "have access to the full robot kinematics. Thanks to gr.motions.ConnectedFramesInspector,", "'{0}' in model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None ee = kinematics.robotGeometry.framesModel.framesByName[", "kinematics.robotGeometry.framesModel.framesByName[ framename ] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}' and", "for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text)", "robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\\n') for cp", "RobotKinematics: '''The composition of the constant poses and the joint", "the robot can be obtained. ''' def __init__(self, geometry, jointPoses):", "None ee = kinematics.robotGeometry.framesModel.framesByName[ framename ] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame):", "logging.getLogger(__name__) class RobotKinematics: '''The composition of the constant poses and", "simple aggregation of the geometry model and the joint-poses model.", "the two, this class have access to the full robot", "jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if framename", "full robot kinematics. Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose", "jp ) ostream.write(text) ostream.write('\\n') for cp in robotKinematics.robotGeometry.byPose.values() : text", "= motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\\n') for cp in robotKinematics.robotGeometry.byPose.values()", "in model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None ee = kinematics.robotGeometry.framesModel.framesByName[ framename", "access to the full robot kinematics. Thanks to gr.motions.ConnectedFramesInspector, an", "kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q =", "framename ] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}' and the", "logger.error(\"Frame '{0}' and the base frame do not seem to", "as frommotions import kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl as motdsl", "frame '{0}' in model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return None ee =", "kinematics.baseFrame): logger.error(\"Frame '{0}' and the base frame do not seem", "Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose between two frames", "of the geometry model and the joint-poses model. By merging", "jointPoses): self.robotGeometry = geometry self.jointPoses = jointPoses self.baseFrame = geometry.framesModel.linkFrames[", "two frames on the robot can be obtained. ''' def", "logger.error(\"Could not find frame '{0}' in model '{1}'\".format(framename, kinematics.robotGeometry.robotName)) return", "motdsl logger = logging.getLogger(__name__) class RobotKinematics: '''The composition of the", "merging the two, this class have access to the full", "for cp in robotKinematics.robotGeometry.byPose.values() : text = motdsl.poseSpecToMotionDSLSnippet( cp )", "obtained. ''' def __init__(self, geometry, jointPoses): self.robotGeometry = geometry self.jointPoses", "the joint-poses model. By merging the two, this class have", "gr.motions.ConnectedFramesInspector, an arbitrary relative pose between two frames on the", "''' def __init__(self, geometry, jointPoses): self.robotGeometry = geometry self.jointPoses =", "H def serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model {modelname} Convention =", "a robot. This class is a simple aggregation of the", "this class have access to the full robot kinematics. Thanks", "import kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl as motdsl logger =", "None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H =", "= currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text =", "def base_H_ee(kinematics, framename): if framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not", "'''The composition of the constant poses and the joint poses", "poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr)", "'''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp", "= kinematics.robotGeometry.framesModel.framesByName[ framename ] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}'", "and the base frame do not seem to be connected\".format(framename))", "motiondsl.motiondsl as motdsl logger = logging.getLogger(__name__) class RobotKinematics: '''The composition", "__init__(self, geometry, jointPoses): self.robotGeometry = geometry self.jointPoses = jointPoses self.baseFrame", ") self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if framename not", "geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity =", "not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find frame '{0}' in model", "{modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values():", ") ostream.write(text) ostream.write('\\n') for cp in robotKinematics.robotGeometry.byPose.values() : text =", "kgprim.motions as motions import kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr as", "mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables) ) H = H.setVariablesValue( valueslist=q", "numpy.zeros( len(H.variables) ) H = H.setVariablesValue( valueslist=q ) return H", "return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H", "cotr = frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables)", "pose between two frames on the robot can be obtained.", "logging import numpy import kgprim.motions as motions import kgprim.ct.frommotions as", "the joint poses of a robot. This class is a", "= mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables) ) H = H.setVariablesValue(", "as motions import kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr as mxrepr", "motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could", "seem to be connected\".format(framename)) return None poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame)", "self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel )", "jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel", "if framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find frame '{0}'", "in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\\n') for", "currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet(", ") H = H.setVariablesValue( valueslist=q ) return H def serializeToMotionDSLModel(robotKinematics,", "model and the joint-poses model. By merging the two, this", "and the joint poses of a robot. This class is", "poses of a robot. This class is a simple aggregation", "Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text", "and the joint-poses model. By merging the two, this class", "return H def serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model {modelname} Convention", "class RobotKinematics: '''The composition of the constant poses and the", "= geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename):", "Model {modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp in", "the full robot kinematics. Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative", "import numpy import kgprim.motions as motions import kgprim.ct.frommotions as frommotions", "motions import kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr as mxrepr import", "kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl as", "a simple aggregation of the geometry model and the joint-poses", "the base frame do not seem to be connected\".format(framename)) return", "kinematics.robotGeometry.robotName)) return None ee = kinematics.robotGeometry.framesModel.framesByName[ framename ] if not", "numpy import kgprim.motions as motions import kgprim.ct.frommotions as frommotions import", "on the robot can be obtained. ''' def __init__(self, geometry,", "of the constant poses and the joint poses of a", "def serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model {modelname} Convention = currentFrame", "relative pose between two frames on the robot can be", "ostream.write(text) ostream.write('\\n') for cp in robotKinematics.robotGeometry.byPose.values() : text = motdsl.poseSpecToMotionDSLSnippet(", "class have access to the full robot kinematics. Thanks to", "= frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables) )", "= geometry self.jointPoses = jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ]", "frame do not seem to be connected\".format(framename)) return None poseSpec", "q = numpy.zeros( len(H.variables) ) H = H.setVariablesValue( valueslist=q )", "as motdsl logger = logging.getLogger(__name__) class RobotKinematics: '''The composition of", "frommotions import kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl as motdsl logger", "H = mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables) ) H =", "text = motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\\n') for cp in", "if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}' and the base frame", "= numpy.zeros( len(H.variables) ) H = H.setVariablesValue( valueslist=q ) return", "H = H.setVariablesValue( valueslist=q ) return H def serializeToMotionDSLModel(robotKinematics, ostream):", "motdsl.poseSpecToMotionDSLSnippet( jp ) ostream.write(text) ostream.write('\\n') for cp in robotKinematics.robotGeometry.byPose.values() :", "geometry self.jointPoses = jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses", "geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses)", "two, this class have access to the full robot kinematics.", "kinematics.baseFrame) cotr = frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros(", "in robotKinematics.robotGeometry.byPose.values() : text = motdsl.poseSpecToMotionDSLSnippet( cp ) ostream.write(text) ostream.write('\\n')", "import motiondsl.motiondsl as motdsl logger = logging.getLogger(__name__) class RobotKinematics: '''The", "an arbitrary relative pose between two frames on the robot", "import kgprim.ct.frommotions as frommotions import kgprim.ct.repr.mxrepr as mxrepr import motiondsl.motiondsl", "] if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame): logger.error(\"Frame '{0}' and the base", "kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find frame '{0}' in model '{1}'\".format(framename, kinematics.robotGeometry.robotName))", "the geometry model and the joint-poses model. By merging the", "geometry, jointPoses): self.robotGeometry = geometry self.jointPoses = jointPoses self.baseFrame =", "in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find frame '{0}' in model '{1}'\".format(framename,", "serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model {modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName)", "can be obtained. ''' def __init__(self, geometry, jointPoses): self.robotGeometry =", "= logging.getLogger(__name__) class RobotKinematics: '''The composition of the constant poses", "self.jointPoses = jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses =", "of a robot. This class is a simple aggregation of", "joint poses of a robot. This class is a simple", "= jointPoses self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ] allPoses = geometry.posesModel.mergeModel(", "= motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if framename not in kinematics.robotGeometry.framesModel.framesByName:", "=''' Model {modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header) for jp", "= H.setVariablesValue( valueslist=q ) return H def serializeToMotionDSLModel(robotKinematics, ostream): header", "self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics, framename): if framename not in", "allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel ) self.framesConnectivity = motions.ConnectedFramesInspector(allPoses) def base_H_ee(kinematics,", "import logging import numpy import kgprim.motions as motions import kgprim.ct.frommotions", "base frame do not seem to be connected\".format(framename)) return None", "robot can be obtained. ''' def __init__(self, geometry, jointPoses): self.robotGeometry", "ostream): header =''' Model {modelname} Convention = currentFrame '''.format(modelname=robotKinematics.robotGeometry.robotName) ostream.write(header)", "be obtained. ''' def __init__(self, geometry, jointPoses): self.robotGeometry = geometry", "between two frames on the robot can be obtained. '''", "<reponame>mfrigerio17/robot-model-tools import logging import numpy import kgprim.motions as motions import", "the constant poses and the joint poses of a robot.", "valueslist=q ) return H def serializeToMotionDSLModel(robotKinematics, ostream): header =''' Model", "to gr.motions.ConnectedFramesInspector, an arbitrary relative pose between two frames on", "composition of the constant poses and the joint poses of", "base_H_ee(kinematics, framename): if framename not in kinematics.robotGeometry.framesModel.framesByName: logger.error(\"Could not find", "class is a simple aggregation of the geometry model and", "frommotions.toCoordinateTransform(poseSpec) H = mxrepr.hCoordinatesSymbolic(cotr) q = numpy.zeros( len(H.variables) ) H", "is a simple aggregation of the geometry model and the", "ostream.write(header) for jp in robotKinematics.jointPoses.poseSpecByJoint.values(): text = motdsl.poseSpecToMotionDSLSnippet( jp )", "constant poses and the joint poses of a robot. This" ]
[ "django.core.management.base import BaseCommand from django.db import connection as django_connection from", "consumer = AWXConsumer( 'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run()", "conn, TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt: logger.debug('Terminating Task", "import Celery from celery.beat import PersistentScheduler from celery.apps import beat", "= task.apply_async() class TaskResult(object): id = result['uuid'] return TaskResult() app", "handle(self, *arg, **options): if options.get('status'): print Control('dispatcher').status() return if options.get('running'):", "from awx.main.dispatch.worker import AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name):", "from django.core.cache import cache as django_cache from django.core.management.base import BaseCommand", "state of any running dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print the", "Exchange(q), routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append(", "**kwargs): if os.getppid() != self.ppid: # if the parent PID", "AWXConsumer( 'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt:", "AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8') +", "'reload'}) # It's important to close these because we're _about_", "# for the DB and memcached connections (that way lies", "as conn: try: bcast = 'tower_broadcast_all' queues = [ Queue(q,", "(that way lies race conditions) django_connection.close() django_cache.close() beat = Process(target=self.beat)", "lies race conditions) django_connection.close() django_cache.close() beat = Process(target=self.beat) beat.daemon =", "def tick(self, *args, **kwargs): if os.getppid() != self.ppid: # if", "from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import AWXConsumer, TaskWorker logger", "jobs will run to completion first')) def beat(self): from celery", "import Process from django.conf import settings from django.core.cache import cache", "race conditions) django_connection.close() django_cache.close() beat = Process(target=self.beat) beat.daemon = True", "awx.main.dispatch.control import Control from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import", "from django.core.management.base import BaseCommand from django.db import connection as django_connection", "django_connection.close() django_cache.close() beat = Process(target=self.beat) beat.daemon = True beat.start() reaper.reap()", "logging from multiprocessing import Process from django.conf import settings from", "setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs): if os.getppid()", "if options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) # It's important to close", "this process has been orphaned # via e.g., segfault or", "of any running dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs", "the dispatcher to recycle all of its worker processes;' 'running", "connection as django_connection from kombu import Connection, Exchange, Queue from", "of its worker processes;' 'running jobs will run to completion", "import BaseCommand from django.db import connection as django_connection from kombu", "It's important to close these because we're _about_ to fork,", "fork, and we # don't want the forked processes to", "Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True ) ) consumer =", "multiprocessing import Process from django.conf import settings from django.core.cache import", "super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self,", "self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs): if os.getppid() != self.ppid:", "dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the dispatcher to recycle all", "as django_connection from kombu import Connection, Exchange, Queue from awx.main.dispatch", "celery.apps import beat class AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs): self.ppid", "TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async() class TaskResult(object): id = result['uuid']", "def handle(self, *arg, **options): if options.get('status'): print Control('dispatcher').status() return if", "apply_async(self, entry, producer=None, advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task) result, queue", "parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the dispatcher to recycle all of", "os import logging from multiprocessing import Process from django.conf import", "task dispatcher' def add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true', help='print the", "django_connection from kombu import Connection, Exchange, Queue from awx.main.dispatch import", "cache as django_cache from django.core.management.base import BaseCommand from django.db import", "30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self, *arg, **options): if", "awx.main.dispatch.worker import AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return", "close these because we're _about_ to fork, and we #", "class AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs): self.ppid = os.getppid() super(AWXScheduler,", "def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs): if", "import PersistentScheduler from celery.apps import beat class AWXScheduler(PersistentScheduler): def __init__(self,", "and we # don't want the forked processes to inherit", ") ) consumer = AWXConsumer( 'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4)", "super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self, entry, producer=None, advance=True, **kwargs): task", "exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True ) ) consumer = AWXConsumer( 'dispatcher',", "self).tick(*args, **kwargs) def apply_async(self, entry, producer=None, advance=True, **kwargs): task =", "def construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID class Command(BaseCommand):", "import settings from django.core.cache import cache as django_cache from django.core.management.base", "django_cache.close() beat = Process(target=self.beat) beat.daemon = True beat.start() reaper.reap() consumer", "action='store_true', help='print the internal state of any running dispatchers') parser.add_argument('--running',", "advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async() class", "result['uuid'] return TaskResult() app = Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES", "from celery.apps import beat class AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs):", "] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True ) )", "from kombu import Connection, Exchange, Queue from awx.main.dispatch import get_local_queuename,", "= settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler", "django.db import connection as django_connection from kombu import Connection, Exchange,", "# via e.g., segfault or sigkill, we should exit too", "worker processes;' 'running jobs will run to completion first')) def", "all of its worker processes;' 'running jobs will run to", "type='fanout'), routing_key=bcast, reply=True ) ) consumer = AWXConsumer( 'dispatcher', conn,", "app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self, *arg, **options): if options.get('status'):", "'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt: logger.debug('Terminating", "= TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async() class TaskResult(object): id =", "beat.daemon = True beat.start() reaper.reap() consumer = None with Connection(settings.BROKER_URL)", "these because we're _about_ to fork, and we # don't", "dest='reload', action='store_true', help=('cause the dispatcher to recycle all of its", "# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved.", "processes to inherit the open sockets # for the DB", "from django.conf import settings from django.core.cache import cache as django_cache", "should exit too raise SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs) def", "parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any running", "awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import AWXConsumer, TaskWorker logger =", "*args, **kwargs): self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self):", "processes;' 'running jobs will run to completion first')) def beat(self):", "any tasked managed by this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause", "scheduler_cls=AWXScheduler ).run() def handle(self, *arg, **options): if options.get('status'): print Control('dispatcher').status()", "settings.CLUSTER_HOST_ID class Command(BaseCommand): help = 'Launch the task dispatcher' def", "Control('dispatcher').control({'control': 'reload'}) # It's important to close these because we're", "return TaskResult() app = Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES =", "to close these because we're _about_ to fork, and we", "print Control('dispatcher').status() return if options.get('running'): print Control('dispatcher').running() return if options.get('reload'):", "or sigkill, we should exit too raise SystemExit() return super(AWXScheduler,", "SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self, entry, producer=None, advance=True,", "the UUIDs of any tasked managed by this dispatcher') parser.add_argument('--reload',", "'Launch the task dispatcher' def add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true',", "= result['uuid'] return TaskResult() app = Celery() app.conf.BROKER_URL = settings.BROKER_URL", "if options.get('running'): print Control('dispatcher').running() return if options.get('reload'): return Control('dispatcher').control({'control': 'reload'})", "e.g., segfault or sigkill, we should exit too raise SystemExit()", "this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the dispatcher to recycle", "*arg, **options): if options.get('status'): print Control('dispatcher').status() return if options.get('running'): print", "import cache as django_cache from django.core.management.base import BaseCommand from django.db", "Process(target=self.beat) beat.daemon = True beat.start() reaper.reap() consumer = None with", "task.apply_async() class TaskResult(object): id = result['uuid'] return TaskResult() app =", "AutoscalePool from awx.main.dispatch.worker import AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch') def", "+ '_' + settings.CLUSTER_HOST_ID class Command(BaseCommand): help = 'Launch the", "Celery from celery.beat import PersistentScheduler from celery.apps import beat class", "__init__(self, *args, **kwargs): self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def", "import get_local_queuename, reaper from awx.main.dispatch.control import Control from awx.main.dispatch.pool import", "from celery import Celery from celery.beat import PersistentScheduler from celery.apps", "os.getppid() != self.ppid: # if the parent PID changes, this", "settings from django.core.cache import cache as django_cache from django.core.management.base import", "2015 Ansible, Inc. # All Rights Reserved. import os import", "**options): if options.get('status'): print Control('dispatcher').status() return if options.get('running'): print Control('dispatcher').running()", "raise SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self, entry, producer=None,", "Exchange, Queue from awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch.control import", "self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs): if os.getppid() != self.ppid: #", "id = result['uuid'] return TaskResult() app = Celery() app.conf.BROKER_URL =", "has been orphaned # via e.g., segfault or sigkill, we", "dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked", "in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'),", "from multiprocessing import Process from django.conf import settings from django.core.cache", "return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID class Command(BaseCommand): help =", "parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any tasked managed", "conditions) django_connection.close() django_cache.close() beat = Process(target=self.beat) beat.daemon = True beat.start()", "DB and memcached connections (that way lies race conditions) django_connection.close()", "the internal state of any running dispatchers') parser.add_argument('--running', dest='running', action='store_true',", "Rights Reserved. import os import logging from multiprocessing import Process", "reaper from awx.main.dispatch.control import Control from awx.main.dispatch.pool import AutoscalePool from", "import beat class AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs): self.ppid =", "Connection(settings.BROKER_URL) as conn: try: bcast = 'tower_broadcast_all' queues = [", "tasked managed by this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the", "process has been orphaned # via e.g., segfault or sigkill,", "construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True ) ) consumer = AWXConsumer(", "Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. import", "django.core.cache import cache as django_cache from django.core.management.base import BaseCommand from", "**kwargs) def apply_async(self, entry, producer=None, advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task)", "**kwargs): task = TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async() class TaskResult(object):", "for the DB and memcached connections (that way lies race", "help='print the internal state of any running dispatchers') parser.add_argument('--running', dest='running',", "return if options.get('running'): print Control('dispatcher').running() return if options.get('reload'): return Control('dispatcher').control({'control':", "= logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID", "import logging from multiprocessing import Process from django.conf import settings", "TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt: logger.debug('Terminating Task Dispatcher')", "+ settings.CLUSTER_HOST_ID class Command(BaseCommand): help = 'Launch the task dispatcher'", "Ansible, Inc. # All Rights Reserved. import os import logging", "= Process(target=self.beat) beat.daemon = True beat.start() reaper.reap() consumer = None", "help = 'Launch the task dispatcher' def add_arguments(self, parser): parser.add_argument('--status',", "help='print the UUIDs of any tasked managed by this dispatcher')", "managed by this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the dispatcher", "AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs): self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args,", "<filename>awx/main/management/commands/run_dispatcher.py # Copyright (c) 2015 Ansible, Inc. # All Rights", "run to completion first')) def beat(self): from celery import Celery", "super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs): if os.getppid() !=", "AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt: logger.debug('Terminating Task Dispatcher') if consumer:", "**kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args, **kwargs):", "connections (that way lies race conditions) django_connection.close() django_cache.close() beat =", "way lies race conditions) django_connection.close() django_cache.close() beat = Process(target=self.beat) beat.daemon", "'running jobs will run to completion first')) def beat(self): from", "beat.start() reaper.reap() consumer = None with Connection(settings.BROKER_URL) as conn: try:", "routing_key=bcast, reply=True ) ) consumer = AWXConsumer( 'dispatcher', conn, TaskWorker(),", "app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db',", "result, queue = task.apply_async() class TaskResult(object): id = result['uuid'] return", "All Rights Reserved. import os import logging from multiprocessing import", "to recycle all of its worker processes;' 'running jobs will", "the open sockets # for the DB and memcached connections", "running dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of any", "the DB and memcached connections (that way lies race conditions)", "if options.get('status'): print Control('dispatcher').status() return if options.get('running'): print Control('dispatcher').running() return", "Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30, app,", "sigkill, we should exit too raise SystemExit() return super(AWXScheduler, self).tick(*args,", "will run to completion first')) def beat(self): from celery import", "= True beat.start() reaper.reap() consumer = None with Connection(settings.BROKER_URL) as", "celery.beat import PersistentScheduler from celery.apps import beat class AWXScheduler(PersistentScheduler): def", "for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast),", "action='store_true', help=('cause the dispatcher to recycle all of its worker", "internal state of any running dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print", "import Connection, Exchange, Queue from awx.main.dispatch import get_local_queuename, reaper from", "!= self.ppid: # if the parent PID changes, this process", "app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def", "'tower_broadcast_all' queues = [ Queue(q, Exchange(q), routing_key=q) for q in", "if os.getppid() != self.ppid: # if the parent PID changes,", "import Control from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import AWXConsumer,", "too raise SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self, entry,", "= os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE)", "via e.g., segfault or sigkill, we should exit too raise", "logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_' +", "Control('dispatcher').status() return if options.get('running'): print Control('dispatcher').running() return if options.get('reload'): return", "first')) def beat(self): from celery import Celery from celery.beat import", "import AutoscalePool from awx.main.dispatch.worker import AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch')", "self.ppid: # if the parent PID changes, this process has", "Control from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker import AWXConsumer, TaskWorker", "awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch.control import Control from awx.main.dispatch.pool", "[get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True )", "q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast,", "segfault or sigkill, we should exit too raise SystemExit() return", "queues = [ Queue(q, Exchange(q), routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC", "class TaskResult(object): id = result['uuid'] return TaskResult() app = Celery()", "def beat(self): from celery import Celery from celery.beat import PersistentScheduler", "sockets # for the DB and memcached connections (that way", "UUIDs of any tasked managed by this dispatcher') parser.add_argument('--reload', dest='reload',", "[ Queue(q, Exchange(q), routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])", "= Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30,", "logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID class", "orphaned # via e.g., segfault or sigkill, we should exit", "any running dispatchers') parser.add_argument('--running', dest='running', action='store_true', help='print the UUIDs of", "inherit the open sockets # for the DB and memcached", "want the forked processes to inherit the open sockets #", "Control('dispatcher').running() return if options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) # It's important", "options.get('status'): print Control('dispatcher').status() return if options.get('running'): print Control('dispatcher').running() return if", "beat = Process(target=self.beat) beat.daemon = True beat.start() reaper.reap() consumer =", "forked processes to inherit the open sockets # for the", "celery import Celery from celery.beat import PersistentScheduler from celery.apps import", "been orphaned # via e.g., segfault or sigkill, we should", "(settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast,", "Queue(q, Exchange(q), routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ]", "task = TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async() class TaskResult(object): id", "bcast = 'tower_broadcast_all' queues = [ Queue(q, Exchange(q), routing_key=q) for", "import connection as django_connection from kombu import Connection, Exchange, Queue", "django.conf import settings from django.core.cache import cache as django_cache from", "Process from django.conf import settings from django.core.cache import cache as", "beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self, *arg, **options):", "options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) # It's important to close these", "get_local_queuename, reaper from awx.main.dispatch.control import Control from awx.main.dispatch.pool import AutoscalePool", "schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self, *arg, **options): if options.get('status'): print", "of any tasked managed by this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true',", "return super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self, entry, producer=None, advance=True, **kwargs):", "if the parent PID changes, this process has been orphaned", "conn: try: bcast = 'tower_broadcast_all' queues = [ Queue(q, Exchange(q),", "consumer = None with Connection(settings.BROKER_URL) as conn: try: bcast =", "os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def", ") consumer = AWXConsumer( 'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4) )", "TaskResult(object): id = result['uuid'] return TaskResult() app = Celery() app.conf.BROKER_URL", "and memcached connections (that way lies race conditions) django_connection.close() django_cache.close()", "TaskWorker logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_'", "we should exit too raise SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs)", "# if the parent PID changes, this process has been", "+ [get_local_queuename()]) ] queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True", "open sockets # for the DB and memcached connections (that", "producer=None, advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task) result, queue = task.apply_async()", ") consumer.run() except KeyboardInterrupt: logger.debug('Terminating Task Dispatcher') if consumer: consumer.stop()", "with Connection(settings.BROKER_URL) as conn: try: bcast = 'tower_broadcast_all' queues =", "action='store_true', help='print the UUIDs of any tasked managed by this", "by this dispatcher') parser.add_argument('--reload', dest='reload', action='store_true', help=('cause the dispatcher to", "as django_cache from django.core.management.base import BaseCommand from django.db import connection", "self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule() self.update_from_dict(settings.CELERYBEAT_SCHEDULE) def tick(self, *args,", "dest='running', action='store_true', help='print the UUIDs of any tasked managed by", "print Control('dispatcher').running() return if options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) # It's", "beat class AWXScheduler(PersistentScheduler): def __init__(self, *args, **kwargs): self.ppid = os.getppid()", "import AWXConsumer, TaskWorker logger = logging.getLogger('awx.main.dispatch') def construct_bcast_queue_name(common_name): return common_name.encode('utf8')", "False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self, *arg,", "return Control('dispatcher').control({'control': 'reload'}) # It's important to close these because", "Queue from awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch.control import Control", "**kwargs): self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler,", "PersistentScheduler from celery.apps import beat class AWXScheduler(PersistentScheduler): def __init__(self, *args,", "TaskResult() app = Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False", "queue = task.apply_async() class TaskResult(object): id = result['uuid'] return TaskResult()", "entry, producer=None, advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task) result, queue =", "queues, AutoscalePool(min_workers=4) ) consumer.run() except KeyboardInterrupt: logger.debug('Terminating Task Dispatcher') if", "def __init__(self, *args, **kwargs): self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs)", "= [ Queue(q, Exchange(q), routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC +", "add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of", "from awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch.control import Control from", "recycle all of its worker processes;' 'running jobs will run", "self.ppid = os.getppid() super(AWXScheduler, self).__init__(*args, **kwargs) def setup_schedule(self): super(AWXScheduler, self).setup_schedule()", "the parent PID changes, this process has been orphaned #", "dest='status', action='store_true', help='print the internal state of any running dispatchers')", "to inherit the open sockets # for the DB and", "parser): parser.add_argument('--status', dest='status', action='store_true', help='print the internal state of any", "dispatcher' def add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true', help='print the internal", "completion first')) def beat(self): from celery import Celery from celery.beat", "to completion first')) def beat(self): from celery import Celery from", "True beat.start() reaper.reap() consumer = None with Connection(settings.BROKER_URL) as conn:", "from django.db import connection as django_connection from kombu import Connection,", "PID changes, this process has been orphaned # via e.g.,", "Connection, Exchange, Queue from awx.main.dispatch import get_local_queuename, reaper from awx.main.dispatch.control", "help=('cause the dispatcher to recycle all of its worker processes;'", "we # don't want the forked processes to inherit the", "from celery.beat import PersistentScheduler from celery.apps import beat class AWXScheduler(PersistentScheduler):", "reaper.reap() consumer = None with Connection(settings.BROKER_URL) as conn: try: bcast", "queues.append( Queue( construct_bcast_queue_name(bcast), exchange=Exchange(bcast, type='fanout'), routing_key=bcast, reply=True ) ) consumer", "Inc. # All Rights Reserved. import os import logging from", "= False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run() def handle(self,", "kombu import Connection, Exchange, Queue from awx.main.dispatch import get_local_queuename, reaper", "import os import logging from multiprocessing import Process from django.conf", "reply=True ) ) consumer = AWXConsumer( 'dispatcher', conn, TaskWorker(), queues,", "the forked processes to inherit the open sockets # for", "_about_ to fork, and we # don't want the forked", "the task dispatcher' def add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true', help='print", "# It's important to close these because we're _about_ to", "Command(BaseCommand): help = 'Launch the task dispatcher' def add_arguments(self, parser):", "None with Connection(settings.BROKER_URL) as conn: try: bcast = 'tower_broadcast_all' queues", "common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID class Command(BaseCommand): help = 'Launch", "don't want the forked processes to inherit the open sockets", "app = Celery() app.conf.BROKER_URL = settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat(", "because we're _about_ to fork, and we # don't want", "= 'tower_broadcast_all' queues = [ Queue(q, Exchange(q), routing_key=q) for q", "memcached connections (that way lies race conditions) django_connection.close() django_cache.close() beat", "to fork, and we # don't want the forked processes", "'_' + settings.CLUSTER_HOST_ID class Command(BaseCommand): help = 'Launch the task", "= 'Launch the task dispatcher' def add_arguments(self, parser): parser.add_argument('--status', dest='status',", "*args, **kwargs): if os.getppid() != self.ppid: # if the parent", "from awx.main.dispatch.control import Control from awx.main.dispatch.pool import AutoscalePool from awx.main.dispatch.worker", "its worker processes;' 'running jobs will run to completion first'))", "construct_bcast_queue_name(common_name): return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID class Command(BaseCommand): help", "(c) 2015 Ansible, Inc. # All Rights Reserved. import os", "try: bcast = 'tower_broadcast_all' queues = [ Queue(q, Exchange(q), routing_key=q)", "dispatcher to recycle all of its worker processes;' 'running jobs", "# don't want the forked processes to inherit the open", "class Command(BaseCommand): help = 'Launch the task dispatcher' def add_arguments(self,", "Reserved. import os import logging from multiprocessing import Process from", "parent PID changes, this process has been orphaned # via", "we're _about_ to fork, and we # don't want the", "beat(self): from celery import Celery from celery.beat import PersistentScheduler from", "changes, this process has been orphaned # via e.g., segfault", "django_cache from django.core.management.base import BaseCommand from django.db import connection as", "return if options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) # It's important to", "important to close these because we're _about_ to fork, and", "options.get('running'): print Control('dispatcher').running() return if options.get('reload'): return Control('dispatcher').control({'control': 'reload'}) #", "# All Rights Reserved. import os import logging from multiprocessing", "settings.BROKER_URL app.conf.CELERY_TASK_RESULT_EXPIRES = False beat.Beat( 30, app, schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler ).run()", "= AWXConsumer( 'dispatcher', conn, TaskWorker(), queues, AutoscalePool(min_workers=4) ) consumer.run() except", "def apply_async(self, entry, producer=None, advance=True, **kwargs): task = TaskWorker.resolve_callable(entry.task) result,", "routing_key=q) for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()]) ] queues.append( Queue(", "exit too raise SystemExit() return super(AWXScheduler, self).tick(*args, **kwargs) def apply_async(self,", "def add_arguments(self, parser): parser.add_argument('--status', dest='status', action='store_true', help='print the internal state", "tick(self, *args, **kwargs): if os.getppid() != self.ppid: # if the", ").run() def handle(self, *arg, **options): if options.get('status'): print Control('dispatcher').status() return", "BaseCommand from django.db import connection as django_connection from kombu import", "= None with Connection(settings.BROKER_URL) as conn: try: bcast = 'tower_broadcast_all'" ]
[ "la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) xt, yt,", "z is the box center in z-axis :param dir_cls_preds: (batch_size,", "is the box center in z-axis :param anchors: (N, 7", "zip(cgs, cas)] return torch.cat([xt, yt, zt, wt, lt, ht, rt,", "> 0) ^ dir_labels.byte() batch_box_preds[..., -1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds),", "xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors,", "\"\"\" box_ndim = anchors.shape[-1] xa, ya, za, wa, la, ha,", "za + ha / 2 zg = zg + hg", "= torch.log(wg / wa) ht = torch.log(hg / ha) rt", "def __init__(self, code_size=7): super().__init__() self.code_size = code_size @staticmethod def encode_np(boxes,", "torch.split(anchors, 1, dim=-1) xg, yg, zg, wg, lg, hg, rg,", "la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xg, yg,", "1, dim=-1) xt, yt, zt, wt, lt, ht, rt, *cts", "zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1)", "za) / ha # 1.6 lt = np.log(lg / la)", ":return: \"\"\" box_ndim = anchors.shape[-1] xa, ya, za, wa, la,", "+ za lg = np.exp(lt) * la wg = np.exp(wt)", "** 2 + wa ** 2) xg = xt *", "wg = torch.exp(wt) * wa hg = torch.exp(ht) * ha", "dir_labels.byte() batch_box_preds[..., -1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else:", "ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim,", "= zt * ha + za lg = torch.exp(lt) *", "za lg = torch.exp(lt) * la wg = torch.exp(wt) *", "numpy as np import torch from . import common_utils class", "axis=-1) # need to convert boxes to z-center format zg", "box_encodings to z-bottom format za = za + ha /", "\"\"\" :param boxes: (N, 7 + ?) x, y, z,", "boxes: (N, 7 + ?) x, y, z, w, l,", "box_ndim, axis=-1) xt, yt, zt, wt, lt, ht, rt, *cts", "np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=-1) @staticmethod", "decode_np(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7 + ?) x,", ". import common_utils class ResidualCoder(object): def __init__(self, code_size=7): super().__init__() self.code_size", "lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1) # need", "need to convert boxes to z-center format zg = zg", "np.log(hg / ha) rt = rg - ra cts =", "wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1) za", "in z-axis :param anchors: (N, 7 + ?) :return: \"\"\"", "wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xg,", "box center in z-axis :param anchors: (batch_size, N, 7 +", "ha, ra, *cas = torch.split(anchors, 1, dim=-1) xt, yt, zt,", "wa hg = torch.exp(ht) * ha rg = rt +", "code_size=7): super().__init__() self.code_size = code_size @staticmethod def encode_np(boxes, anchors): \"\"\"", "torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1) def", "box_encodings: (N, 7 + ?) x, y, z, w, l,", "+ hg / 2 za = za + ha /", "anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param box_preds: (batch_size,", "rt + ra zg = zg - hg / 2", "w, l, h, r, custom values, z is the box", "rt, *cts], dim=-1) @staticmethod def decode_torch(box_encodings, anchors): \"\"\" :param box_encodings:", "za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)", "torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte()", "def encode_np(boxes, anchors): \"\"\" :param boxes: (N, 7 + ?)", "- hg / 2 cgs = [t + a for", "yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1,", "torch.split(boxes, 1, dim=-1) za = za + ha / 2", "= yt * diagonal + ya zg = zt *", "za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)", "= np.split(boxes, box_ndim, axis=-1) # need to convert boxes to", "zg - hg / 2 cgs = [t + a", "wt = np.log(wg / wa) ht = np.log(hg / ha)", "zip(cts, cas)] return torch.cat([xg, yg, zg, wg, lg, hg, rg,", "(batch_size, H, W, num_anchors_per_locations*2) :return: \"\"\" batch_box_preds = self.decode_torch(box_preds, anchors)", "g, a in zip(cgs, cas)] return torch.cat([xt, yt, zt, wt,", "self.code_size = code_size @staticmethod def encode_np(boxes, anchors): \"\"\" :param boxes:", "+ ?) :return: \"\"\" xa, ya, za, wa, la, ha,", ":param box_preds: (batch_size, N, 7 + ?), x, y, z,", "anchors): \"\"\" :param box_encodings: (N, 7 + ?) x, y,", "2 cgs = [t + a for t, a in", "code_size @staticmethod def encode_np(boxes, anchors): \"\"\" :param boxes: (N, 7", "z-bottom format za = za + ha / 2 diagonal", "ra, *cas = np.split(anchors, box_ndim, axis=-1) xt, yt, zt, wt,", "wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) xg,", "torch from . import common_utils class ResidualCoder(object): def __init__(self, code_size=7):", "dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param box_preds: (batch_size, N,", "in z-axis :param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2) :return: \"\"\"", "else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period = (2 * np.pi", "batch_box_preds[..., -1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels", ") else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period = (2 *", "yt, zt, wt, lt, ht, rt, *cts], axis=-1) @staticmethod def", "zip(cgs, cas)] return np.concatenate([xt, yt, zt, wt, lt, ht, rt,", "= np.split(box_encodings, box_ndim, axis=-1) # need to convert box_encodings to", "= np.exp(wt) * wa hg = np.exp(ht) * ha rg", "xt = (xg - xa) / diagonal yt = (yg", "return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=-1)", "cas)] return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs],", "wa hg = np.exp(ht) * ha rg = rt +", "rg, *cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset,", "?) :return: \"\"\" box_ndim = anchors.shape[-1] xa, ya, za, wa,", "(N, 7 + ?) :return: \"\"\" xa, ya, za, wa,", "7 + ?), x, y, z, w, l, h, r,", "zt = (zg - za) / ha # 1.6 lt", ":param anchors: (batch_size, N, 7 + ?), x, y, z,", "axis=-1) xt, yt, zt, wt, lt, ht, rt, *cts =", "2 + wa ** 2) xg = xt * diagonal", "= za + ha / 2 diagonal = torch.sqrt(la **", "ha rg = rt + ra zg = zg -", "* la wg = torch.exp(wt) * wa hg = torch.exp(ht)", "= (2 * np.pi / num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[...,", "wg, lg, hg, rg, *cgs], axis=-1) @staticmethod def encode_torch(boxes, anchors):", "z-axis :param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2) :return: \"\"\" batch_box_preds", "ha) rt = rg - ra cts = [g -", "ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1) # need to", "yg = yt * diagonal + ya zg = zt", "= zg + hg / 2 za = za +", "za = za + ha / 2 diagonal = np.sqrt(la", "ra cts = [g - a for g, a in", "= code_size @staticmethod def encode_np(boxes, anchors): \"\"\" :param boxes: (N,", "- za) / ha lt = torch.log(lg / la) wt", "lt, ht, rt, *cts], dim=-1) @staticmethod def decode_torch(box_encodings, anchors): \"\"\"", "/ diagonal zt = (zg - za) / ha lt", "if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[..., -1]", "dir_offset + period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds if __name__ ==", "z, w, l, h, r, custom values, z is the", "a for g, a in zip(cgs, cas)] return torch.cat([xt, yt,", "convert box_encodings to z-bottom format za = za + ha", "[t + a for t, a in zip(cts, cas)] return", "za = za + ha / 2 diagonal = torch.sqrt(la", "(zg - za) / ha lt = torch.log(lg / la)", "yg, zg, wg, lg, hg, rg, *cgs], axis=-1) @staticmethod def", "= torch.max(dir_cls_preds, dim=-1)[1] period = (2 * np.pi / num_dir_bins)", "** 2) xg = xt * diagonal + xa yg", "7 + ?) :return: \"\"\" box_ndim = anchors.shape[-1] xa, ya,", "a in zip(cts, cas)] return torch.cat([xg, yg, zg, wg, lg,", "np.split(anchors, box_ndim, axis=-1) xg, yg, zg, wg, lg, hg, rg,", "(zg - za) / ha # 1.6 lt = np.log(lg", "torch.log(lg / la) wt = torch.log(wg / wa) ht =", "= za + ha / 2 zg = zg +", "@staticmethod def decode_torch(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7 +", "+ a for t, a in zip(cts, cas)] return torch.cat([xg,", "z-axis :param anchors: (N, 7 + ?) :return: \"\"\" xa,", "from . import common_utils class ResidualCoder(object): def __init__(self, code_size=7): super().__init__()", "= torch.exp(lt) * la wg = torch.exp(wt) * wa hg", "rg, *cgs], axis=-1) @staticmethod def encode_torch(boxes, anchors): \"\"\" :param boxes:", "anchors) if dir_cls_preds is not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1],", "= za + ha / 2 diagonal = np.sqrt(la **", "cts = [g - a for g, a in zip(cgs,", "*cgs = np.split(boxes, box_ndim, axis=-1) # need to convert boxes", "+ period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds if __name__ == '__main__':", "center in z-axis :param anchors: (batch_size, N, 7 + ?),", "ha / 2 diagonal = np.sqrt(la ** 2 + wa", "hg / 2 diagonal = torch.sqrt(la ** 2 + wa", "def decode_torch(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7 + ?)", "common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period ) batch_box_preds[..., 6]", "torch.max(dir_cls_preds, dim=-1)[1] period = (2 * np.pi / num_dir_bins) dir_rot", "wt, lt, ht, rt, *cts], dim=-1) @staticmethod def decode_torch(box_encodings, anchors):", "axis=-1) # need to convert box_encodings to z-bottom format za", "np.exp(lt) * la wg = np.exp(wt) * wa hg =", "** 2) xt = (xg - xa) / diagonal yt", "z-axis :param anchors: (batch_size, N, 7 + ?), x, y,", "period = (2 * np.pi / num_dir_bins) dir_rot = common_utils.limit_period_torch(", "la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) xg, yg,", "/ 2 diagonal = np.sqrt(la ** 2 + wa **", "\"\"\" :param box_encodings: (N, 7 + ?) x, y, z,", "hg / 2 cgs = [t + a for t,", "= [g - a for g, a in zip(cgs, cas)]", "a for g, a in zip(cgs, cas)] return np.concatenate([xt, yt,", "in zip(cgs, cas)] return np.concatenate([xt, yt, zt, wt, lt, ht,", "zt * ha + za lg = np.exp(lt) * la", "dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param box_preds: (batch_size, N, 7 + ?),", "= [t + a for t, a in zip(cts, cas)]", "dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[..., -1] > 0)", "/ la) wt = np.log(wg / wa) ht = np.log(hg", "= anchors.shape[-1] xa, ya, za, wa, la, ha, ra, *cas", ":return: \"\"\" xa, ya, za, wa, la, ha, ra, *cas", "num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param box_preds: (batch_size, N, 7", "for t, a in zip(cts, cas)] return np.concatenate([xg, yg, zg,", ":param anchors: (N, 7 + ?) :return: \"\"\" box_ndim =", "lg = np.exp(lt) * la wg = np.exp(wt) * wa", "zg = zt * ha + za lg = torch.exp(lt)", "wa ** 2) xg = xt * diagonal + xa", "a in zip(cgs, cas)] return np.concatenate([xt, yt, zt, wt, lt,", "anchors: (N, 7 + ?) :return: \"\"\" xa, ya, za,", "(batch_size, N, 7 + ?), x, y, z, w, l,", "* wa hg = torch.exp(ht) * ha rg = rt", "** 2 + wa ** 2) # 4.3 xt =", "= np.log(lg / la) wt = np.log(wg / wa) ht", "rt = rg - ra cts = [g - a", "lt, ht, rt, *cts], axis=-1) @staticmethod def decode_np(box_encodings, anchors): \"\"\"", "+ wa ** 2) xg = xt * diagonal +", "/ wa) ht = torch.log(hg / ha) rt = rg", "ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xt, yt, zt,", "- ya) / diagonal zt = (zg - za) /", "yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim,", "* diagonal + ya zg = zt * ha +", "zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)", "2) xt = (xg - xa) / diagonal yt =", "= np.sqrt(la ** 2 + wa ** 2) # 4.3", "num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period", "rg - ra cts = [g - a for g,", "= (zg - za) / ha # 1.6 lt =", "wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1) #", "ht = torch.log(hg / ha) rt = rg - ra", "wa) ht = torch.log(hg / ha) rt = rg -", "+ ha / 2 zg = zg + hg /", "rt, *cts = np.split(box_encodings, box_ndim, axis=-1) # need to convert", "ht, rt, *cts], dim=-1) @staticmethod def decode_torch(box_encodings, anchors): \"\"\" :param", "ha / 2 diagonal = torch.sqrt(la ** 2 + wa", "= torch.split(box_encodings, 1, dim=-1) za = za + ha /", "lt = np.log(lg / la) wt = np.log(wg / wa)", "if dir_cls_preds is not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1)", "0) ^ dir_labels.byte() batch_box_preds[..., -1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds)", "torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period =", "the box center in z-axis :param anchors: (N, 7 +", "za = za + ha / 2 zg = zg", "opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period", "torch.split(box_encodings, 1, dim=-1) za = za + ha / 2", "(N, 7 + ?) :return: \"\"\" box_ndim = anchors.shape[-1] xa,", "the box center in z-axis :param dir_cls_preds: (batch_size, H, W,", "z-center format zg = zg + hg / 2 za", "np.split(box_encodings, box_ndim, axis=-1) # need to convert box_encodings to z-bottom", "ht = np.log(hg / ha) rt = rg - ra", "(xg - xa) / diagonal yt = (yg - ya)", "1.6 lt = np.log(lg / la) wt = np.log(wg /", "yt = (yg - ya) / diagonal zt = (zg", "dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels", "l, h, r, custom values, z is the box center", "wa ** 2) # 4.3 xt = (xg - xa)", "2 diagonal = torch.sqrt(la ** 2 + wa ** 2)", "box center in z-axis :param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2)", "*cas = np.split(anchors, box_ndim, axis=-1) xt, yt, zt, wt, lt,", "ht, rt, *cts = torch.split(box_encodings, 1, dim=-1) za = za", "*cas = torch.split(anchors, 1, dim=-1) xg, yg, zg, wg, lg,", "+ ?) x, y, z, w, l, h, r, custom", "\"\"\" batch_box_preds = self.decode_torch(box_preds, anchors) if dir_cls_preds is not None:", "= torch.split(boxes, 1, dim=-1) za = za + ha /", "+ wa ** 2) # 4.3 xt = (xg -", "diagonal zt = (zg - za) / ha # 1.6", "hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1) # need to", "= dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1]", "la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xt, yt,", "axis=-1) @staticmethod def decode_np(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7", "/ diagonal zt = (zg - za) / ha #", "xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors,", "diagonal yt = (yg - ya) / diagonal zt =", "anchors.shape[-1] xa, ya, za, wa, la, ha, ra, *cas =", "+ ?) :return: \"\"\" box_ndim = anchors.shape[-1] xa, ya, za,", "rg, *cgs = torch.split(boxes, 1, dim=-1) za = za +", "* ha + za lg = torch.exp(lt) * la wg", "use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[..., -1] >", "hg = np.exp(ht) * ha rg = rt + ra", "use_binary_dir_classifier=False): \"\"\" :param box_preds: (batch_size, N, 7 + ?), x,", "np.split(boxes, box_ndim, axis=-1) # need to convert boxes to z-center", "for g, a in zip(cgs, cas)] return torch.cat([xt, yt, zt,", "batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)", "diagonal + ya zg = zt * ha + za", "self.decode_torch(box_preds, anchors) if dir_cls_preds is not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0],", "dim=-1)[1] period = (2 * np.pi / num_dir_bins) dir_rot =", "*cas = np.split(anchors, box_ndim, axis=-1) xg, yg, zg, wg, lg,", "box_ndim, axis=-1) xg, yg, zg, wg, lg, hg, rg, *cgs", "hg, rg, *cgs], axis=-1) @staticmethod def encode_torch(boxes, anchors): \"\"\" :param", "np.exp(ht) * ha rg = rt + ra zg =", "dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2) :return: \"\"\" batch_box_preds = self.decode_torch(box_preds,", "the box center in z-axis :param anchors: (batch_size, N, 7", "format za = za + ha / 2 diagonal =", "decode_torch(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7 + ?) x,", "box_ndim = anchors.shape[-1] xa, ya, za, wa, la, ha, ra,", "zg = zt * ha + za lg = np.exp(lt)", "return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1)", "= torch.split(anchors, 1, dim=-1) xg, yg, zg, wg, lg, hg,", "wt, lt, ht, rt, *cts], axis=-1) @staticmethod def decode_np(box_encodings, anchors):", "xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes,", "None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels =", "wg, lg, hg, rg, *cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors,", "- za) / ha # 1.6 lt = np.log(lg /", "6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype) return", "\"\"\" :param box_preds: (batch_size, N, 7 + ?), x, y,", "/ 2 zg = zg + hg / 2 diagonal", "za + ha / 2 diagonal = np.sqrt(la ** 2", "- xa) / diagonal yt = (yg - ya) /", "np.sqrt(la ** 2 + wa ** 2) xg = xt", "a in zip(cgs, cas)] return torch.cat([xt, yt, zt, wt, lt,", ":param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2) :return: \"\"\" batch_box_preds =", "= torch.exp(ht) * ha rg = rt + ra zg", "(N, 7 + ?) x, y, z, w, l, h,", "za lg = np.exp(lt) * la wg = np.exp(wt) *", "ha + za lg = np.exp(lt) * la wg =", "= torch.log(hg / ha) rt = rg - ra cts", "+ ha / 2 diagonal = torch.sqrt(la ** 2 +", "box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param box_preds:", "/ diagonal yt = (yg - ya) / diagonal zt", ":return: \"\"\" batch_box_preds = self.decode_torch(box_preds, anchors) if dir_cls_preds is not", "dir_cls_preds is not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if", "is the box center in z-axis :param anchors: (batch_size, N,", "ha + za lg = torch.exp(lt) * la wg =", "dir_limit_offset, period ) batch_box_preds[..., 6] = dir_rot + dir_offset +", "= torch.split(anchors, 1, dim=-1) xt, yt, zt, wt, lt, ht,", "z-axis :param anchors: (N, 7 + ?) :return: \"\"\" box_ndim", "diagonal = torch.sqrt(la ** 2 + wa ** 2) xg", "xg = xt * diagonal + xa yg = yt", "xa) / diagonal yt = (yg - ya) / diagonal", "* la wg = np.exp(wt) * wa hg = np.exp(ht)", "= common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period ) batch_box_preds[...,", "__init__(self, code_size=7): super().__init__() self.code_size = code_size @staticmethod def encode_np(boxes, anchors):", "xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings,", "(batch_box_preds[..., -1] > 0) ^ dir_labels.byte() batch_box_preds[..., -1] += torch.where(", "period ) batch_box_preds[..., 6] = dir_rot + dir_offset + period", "/ ha # 1.6 lt = np.log(lg / la) wt", "* np.pi / num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6] -", "torch.sqrt(la ** 2 + wa ** 2) xt = (xg", "class ResidualCoder(object): def __init__(self, code_size=7): super().__init__() self.code_size = code_size @staticmethod", "to z-center format zg = zg + hg / 2", "cgs = [t + a for t, a in zip(cts,", "= rt + ra zg = zg - hg /", ":param boxes: (N, 7 + ?) x, y, z, w,", "diagonal = np.sqrt(la ** 2 + wa ** 2) xg", "to convert boxes to z-center format zg = zg +", "np import torch from . import common_utils class ResidualCoder(object): def", "@staticmethod def decode_np(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7 +", "box center in z-axis :param anchors: (N, 7 + ?)", "for t, a in zip(cts, cas)] return torch.cat([xg, yg, zg,", "diagonal = torch.sqrt(la ** 2 + wa ** 2) xt", "ha, ra, *cas = torch.split(anchors, 1, dim=-1) xg, yg, zg,", "is not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier:", "ht, rt, *cts], axis=-1) @staticmethod def decode_np(box_encodings, anchors): \"\"\" :param", "rt, *cts], axis=-1) @staticmethod def decode_np(box_encodings, anchors): \"\"\" :param box_encodings:", "a for t, a in zip(cts, cas)] return np.concatenate([xg, yg,", "dir_offset, dir_limit_offset, period ) batch_box_preds[..., 6] = dir_rot + dir_offset", "boxes to z-center format zg = zg + hg /", "+ ra zg = zg - hg / 2 cgs", "ha lt = torch.log(lg / la) wt = torch.log(wg /", "# 4.3 xt = (xg - xa) / diagonal yt", "zt * ha + za lg = torch.exp(lt) * la", "cas)] return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs],", "la wg = np.exp(wt) * wa hg = np.exp(ht) *", "in zip(cts, cas)] return np.concatenate([xg, yg, zg, wg, lg, hg,", "xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes,", "opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte() batch_box_preds[..., -1]", "a for t, a in zip(cts, cas)] return torch.cat([xg, yg,", "wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1) za", "diagonal zt = (zg - za) / ha lt =", "# need to convert boxes to z-center format zg =", "diagonal = np.sqrt(la ** 2 + wa ** 2) #", ":param anchors: (N, 7 + ?) :return: \"\"\" xa, ya,", "*cts], axis=-1) @staticmethod def decode_np(box_encodings, anchors): \"\"\" :param box_encodings: (N,", "= rg - ra cts = [g - a for", "def decode_np(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7 + ?)", "= np.split(anchors, box_ndim, axis=-1) xg, yg, zg, wg, lg, hg,", "zg = zg + hg / 2 za = za", "yg, zg, wg, lg, hg, rg, *cgs], dim=-1) def decode_with_head_direction_torch(self,", "= np.exp(ht) * ha rg = rt + ra zg", "lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1) za =", "np.log(wg / wa) ht = np.log(hg / ha) rt =", "za + ha / 2 diagonal = torch.sqrt(la ** 2", "= zg + hg / 2 diagonal = torch.sqrt(la **", "encode_torch(boxes, anchors): \"\"\" :param boxes: (N, 7 + ?) x,", "zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)", "\"\"\" xa, ya, za, wa, la, ha, ra, *cas =", "= (yg - ya) / diagonal zt = (zg -", "1, dim=-1) xg, yg, zg, wg, lg, hg, rg, *cgs", "-1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels =", "period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds if __name__ == '__main__': pass", "box_preds: (batch_size, N, 7 + ?), x, y, z, w,", "- a for g, a in zip(cgs, cas)] return torch.cat([xt,", "7 + ?) x, y, z, w, l, h, r,", "a in zip(cts, cas)] return np.concatenate([xg, yg, zg, wg, lg,", "= zt * ha + za lg = np.exp(lt) *", "yt, zt, wt, lt, ht, rt, *cts], dim=-1) @staticmethod def", "dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds if", "(yg - ya) / diagonal zt = (zg - za)", "= (xg - xa) / diagonal yt = (yg -", "custom values, z is the box center in z-axis :param", "is the box center in z-axis :param dir_cls_preds: (batch_size, H,", "batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period ) batch_box_preds[..., 6] =", "ya) / diagonal zt = (zg - za) / ha", "zt, wt, lt, ht, rt, *cts], axis=-1) @staticmethod def decode_np(box_encodings,", "center in z-axis :param anchors: (N, 7 + ?) :return:", "in z-axis :param anchors: (batch_size, N, 7 + ?), x,", "= zg - hg / 2 cgs = [t +", "axis=-1) xg, yg, zg, wg, lg, hg, rg, *cgs =", "yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim,", "encode_np(boxes, anchors): \"\"\" :param boxes: (N, 7 + ?) x,", "center in z-axis :param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2) :return:", "rg, *cgs = np.split(boxes, box_ndim, axis=-1) # need to convert", "la) wt = np.log(wg / wa) ht = np.log(hg /", "4.3 xt = (xg - xa) / diagonal yt =", "dim=-1) xt, yt, zt, wt, lt, ht, rt, *cts =", "2) xg = xt * diagonal + xa yg =", "cas)] return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts],", "zg = zg + hg / 2 diagonal = torch.sqrt(la", "** 2 + wa ** 2) xt = (xg -", "z is the box center in z-axis :param anchors: (N,", "values, z is the box center in z-axis :param dir_cls_preds:", "ra, *cas = np.split(anchors, box_ndim, axis=-1) xg, yg, zg, wg,", "import numpy as np import torch from . import common_utils", "import torch from . import common_utils class ResidualCoder(object): def __init__(self,", "ResidualCoder(object): def __init__(self, code_size=7): super().__init__() self.code_size = code_size @staticmethod def", "/ la) wt = torch.log(wg / wa) ht = torch.log(hg", "?) x, y, z, w, l, h, r, custom values,", "dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period )", "wg = np.exp(wt) * wa hg = np.exp(ht) * ha", "/ 2 diagonal = torch.sqrt(la ** 2 + wa **", "lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1) # need", "-1] > 0) ^ dir_labels.byte() batch_box_preds[..., -1] += torch.where( opp_labels,", "wa) ht = np.log(hg / ha) rt = rg -", "zt = (zg - za) / ha lt = torch.log(lg", "dim=-1) xg, yg, zg, wg, lg, hg, rg, *cgs =", "return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1)", "values, z is the box center in z-axis :param anchors:", "= torch.log(lg / la) wt = torch.log(wg / wa) ht", "wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xt,", "/ 2 za = za + ha / 2 diagonal", "*cts], dim=-1) @staticmethod def decode_torch(box_encodings, anchors): \"\"\" :param box_encodings: (N,", "# 1.6 lt = np.log(lg / la) wt = np.log(wg", "torch.exp(lt) * la wg = torch.exp(wt) * wa hg =", "2 zg = zg + hg / 2 diagonal =", "6] - dir_offset, dir_limit_offset, period ) batch_box_preds[..., 6] = dir_rot", "@staticmethod def encode_torch(boxes, anchors): \"\"\" :param boxes: (N, 7 +", "torch.exp(wt) * wa hg = torch.exp(ht) * ha rg =", "# need to convert box_encodings to z-bottom format za =", "common_utils class ResidualCoder(object): def __init__(self, code_size=7): super().__init__() self.code_size = code_size", "zg + hg / 2 za = za + ha", "*cts = torch.split(box_encodings, 1, dim=-1) za = za + ha", "axis=-1) @staticmethod def encode_torch(boxes, anchors): \"\"\" :param boxes: (N, 7", "?), x, y, z, w, l, h, r, custom values,", "zg + hg / 2 diagonal = torch.sqrt(la ** 2", "H, W, num_anchors_per_locations*2) :return: \"\"\" batch_box_preds = self.decode_torch(box_preds, anchors) if", "= xt * diagonal + xa yg = yt *", "la) wt = torch.log(wg / wa) ht = torch.log(hg /", "+ xa yg = yt * diagonal + ya zg", "in zip(cts, cas)] return torch.cat([xg, yg, zg, wg, lg, hg,", "= torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[..., -1] > 0) ^", "in zip(cgs, cas)] return torch.cat([xt, yt, zt, wt, lt, ht,", "super().__init__() self.code_size = code_size @staticmethod def encode_np(boxes, anchors): \"\"\" :param", "np.sqrt(la ** 2 + wa ** 2) # 4.3 xt", "dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False):", "-1) if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels = (batch_box_preds[...,", "*cgs = torch.split(boxes, 1, dim=-1) za = za + ha", "anchors): \"\"\" :param boxes: (N, 7 + ?) x, y,", "1, dim=-1) za = za + ha / 2 zg", "*cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset,", "cas)] return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts],", "lg = torch.exp(lt) * la wg = torch.exp(wt) * wa", "xt, yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings,", "= np.exp(lt) * la wg = np.exp(wt) * wa hg", "r, custom values, z is the box center in z-axis", "- ra cts = [g - a for g, a", "yt * diagonal + ya zg = zt * ha", "+ dir_offset + period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds if __name__", "xt * diagonal + xa yg = yt * diagonal", "dim=-1) @staticmethod def decode_torch(box_encodings, anchors): \"\"\" :param box_encodings: (N, 7", "torch.log(wg / wa) ht = torch.log(hg / ha) rt =", "2) # 4.3 xt = (xg - xa) / diagonal", "+ hg / 2 diagonal = torch.sqrt(la ** 2 +", "dim=-1)[1] opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte() batch_box_preds[...,", "torch.log(hg / ha) rt = rg - ra cts =", "dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param box_preds: (batch_size, N, 7 +", "t, a in zip(cts, cas)] return np.concatenate([xg, yg, zg, wg,", "N, 7 + ?), x, y, z, w, l, h,", "* ha rg = rt + ra zg = zg", "*cas = torch.split(anchors, 1, dim=-1) xt, yt, zt, wt, lt,", "2 za = za + ha / 2 diagonal =", "dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period = (2 * np.pi /", ":param box_encodings: (N, 7 + ?) x, y, z, w,", "need to convert box_encodings to z-bottom format za = za", "2 + wa ** 2) # 4.3 xt = (xg", "as np import torch from . import common_utils class ResidualCoder(object):", "wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1) xt,", "t, a in zip(cts, cas)] return torch.cat([xg, yg, zg, wg,", "* ha + za lg = np.exp(lt) * la wg", "*cts = np.split(box_encodings, box_ndim, axis=-1) # need to convert box_encodings", "<gh_stars>1-10 import numpy as np import torch from . import", "ha, ra, *cas = np.split(anchors, box_ndim, axis=-1) xg, yg, zg,", "2 diagonal = np.sqrt(la ** 2 + wa ** 2)", "+= torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels = torch.max(dir_cls_preds,", "= torch.sqrt(la ** 2 + wa ** 2) xt =", "* diagonal + xa yg = yt * diagonal +", "anchors: (N, 7 + ?) :return: \"\"\" box_ndim = anchors.shape[-1]", "/ ha lt = torch.log(lg / la) wt = torch.log(wg", "(2 * np.pi / num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6]", "** 2) # 4.3 xt = (xg - xa) /", "x, y, z, w, l, h, r, custom values, z", "wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1) #", "dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds,", "diagonal + xa yg = yt * diagonal + ya", "ya zg = zt * ha + za lg =", "h, r, custom values, z is the box center in", "+ a for t, a in zip(cts, cas)] return np.concatenate([xg,", "dim=-1) za = za + ha / 2 zg =", "rt, *cts = torch.split(box_encodings, 1, dim=-1) za = za +", "zip(cts, cas)] return np.concatenate([xg, yg, zg, wg, lg, hg, rg,", "- a for g, a in zip(cgs, cas)] return np.concatenate([xt,", "to z-bottom format za = za + ha / 2", "= (batch_box_preds[..., -1] > 0) ^ dir_labels.byte() batch_box_preds[..., -1] +=", "torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1]", "* wa hg = np.exp(ht) * ha rg = rt", "= np.sqrt(la ** 2 + wa ** 2) xg =", "ha / 2 zg = zg + hg / 2", "+ ya zg = zt * ha + za lg", "lt = torch.log(lg / la) wt = torch.log(wg / wa)", "def encode_torch(boxes, anchors): \"\"\" :param boxes: (N, 7 + ?)", "convert boxes to z-center format zg = zg + hg", "= np.log(hg / ha) rt = rg - ra cts", "/ 2 cgs = [t + a for t, a", "not None: dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels", "y, z, w, l, h, r, custom values, z is", "np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1) @staticmethod", "batch_box_preds = self.decode_torch(box_preds, anchors) if dir_cls_preds is not None: dir_cls_preds", "np.split(anchors, box_ndim, axis=-1) xt, yt, zt, wt, lt, ht, rt,", "2 + wa ** 2) xt = (xg - xa)", "dim=-1) za = za + ha / 2 diagonal =", "yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1,", "zg, wg, lg, hg, rg, *cgs], axis=-1) @staticmethod def encode_torch(boxes,", "zg = zg - hg / 2 cgs = [t", "?) :return: \"\"\" xa, ya, za, wa, la, ha, ra,", "g, a in zip(cgs, cas)] return np.concatenate([xt, yt, zt, wt,", "zt, wt, lt, ht, rt, *cts], dim=-1) @staticmethod def decode_torch(box_encodings,", "ra zg = zg - hg / 2 cgs =", "ha # 1.6 lt = np.log(lg / la) wt =", "np.log(lg / la) wt = np.log(wg / wa) ht =", "xa yg = yt * diagonal + ya zg =", "rg = rt + ra zg = zg - hg", "*cgs], axis=-1) @staticmethod def encode_torch(boxes, anchors): \"\"\" :param boxes: (N,", "to convert box_encodings to z-bottom format za = za +", "hg / 2 za = za + ha / 2", "num_anchors_per_locations*2) :return: \"\"\" batch_box_preds = self.decode_torch(box_preds, anchors) if dir_cls_preds is", "[g - a for g, a in zip(cgs, cas)] return", "la wg = torch.exp(wt) * wa hg = torch.exp(ht) *", "= np.log(wg / wa) ht = np.log(hg / ha) rt", "torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1) @staticmethod", "box_ndim, axis=-1) # need to convert boxes to z-center format", "= torch.exp(wt) * wa hg = torch.exp(ht) * ha rg", "torch.split(anchors, 1, dim=-1) xt, yt, zt, wt, lt, ht, rt,", "torch.exp(ht) * ha rg = rt + ra zg =", "+ za lg = torch.exp(lt) * la wg = torch.exp(wt)", "= (zg - za) / ha lt = torch.log(lg /", "+ ha / 2 diagonal = np.sqrt(la ** 2 +", "wt = torch.log(wg / wa) ht = torch.log(hg / ha)", "za) / ha lt = torch.log(lg / la) wt =", "+ ?), x, y, z, w, l, h, r, custom", "zg, wg, lg, hg, rg, *cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds,", "lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1) za =", "def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\"", ") batch_box_preds[..., 6] = dir_rot + dir_offset + period *", "np.pi / num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset,", "np.exp(wt) * wa hg = np.exp(ht) * ha rg =", "ra, *cas = torch.split(anchors, 1, dim=-1) xt, yt, zt, wt,", "zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1)", "/ num_dir_bins) dir_rot = common_utils.limit_period_torch( batch_box_preds[..., 6] - dir_offset, dir_limit_offset,", "torch.tensor(0.0).type_as(batch_box_preds) ) else: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] period = (2", "decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False): \"\"\" :param", "ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1,", "= self.decode_torch(box_preds, anchors) if dir_cls_preds is not None: dir_cls_preds =", "anchors: (batch_size, N, 7 + ?), x, y, z, w,", "^ dir_labels.byte() batch_box_preds[..., -1] += torch.where( opp_labels, torch.tensor(np.pi).type_as(batch_box_preds), torch.tensor(0.0).type_as(batch_box_preds) )", "box_ndim, axis=-1) # need to convert box_encodings to z-bottom format", "= dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype) return batch_box_preds", "= torch.sqrt(la ** 2 + wa ** 2) xg =", "format zg = zg + hg / 2 za =", "torch.sqrt(la ** 2 + wa ** 2) xg = xt", "ra, *cas = torch.split(anchors, 1, dim=-1) xg, yg, zg, wg,", "/ ha) rt = rg - ra cts = [g", "= np.split(anchors, box_ndim, axis=-1) xt, yt, zt, wt, lt, ht,", "hg = torch.exp(ht) * ha rg = rt + ra", "W, num_anchors_per_locations*2) :return: \"\"\" batch_box_preds = self.decode_torch(box_preds, anchors) if dir_cls_preds", "import common_utils class ResidualCoder(object): def __init__(self, code_size=7): super().__init__() self.code_size =", "@staticmethod def encode_np(boxes, anchors): \"\"\" :param boxes: (N, 7 +", "wa ** 2) xt = (xg - xa) / diagonal", "return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1)", "lg, hg, rg, *cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds,", "hg, rg, *cgs = torch.split(boxes, 1, dim=-1) za = za", "hg, rg, *cgs], dim=-1) def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds, num_dir_bins,", "box_preds.shape[1], -1) if use_binary_dir_classifier: dir_labels = torch.max(dir_cls_preds, dim=-1)[1] opp_labels =", "1, dim=-1) za = za + ha / 2 diagonal", "lg, hg, rg, *cgs], axis=-1) @staticmethod def encode_torch(boxes, anchors): \"\"\"", "7 + ?) :return: \"\"\" xa, ya, za, wa, la,", "for g, a in zip(cgs, cas)] return np.concatenate([xt, yt, zt,", "+ wa ** 2) xt = (xg - xa) /", "/ wa) ht = np.log(hg / ha) rt = rg", "z is the box center in z-axis :param anchors: (batch_size,", "- dir_offset, dir_limit_offset, period ) batch_box_preds[..., 6] = dir_rot +" ]
[ "acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2 * acos_val return", "pt_idx[0]] = results[idx_pos] return res_tensor class AverageMeter(object): \"\"\"Computes and stores", "+ n) self.avg += val * n / (self.count +", "(self.count + n) else: self.avg *= self.count / (self.count +", "= -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not all(z[0][:-1] <= z[0][1:]): print(z)", "/ bd.norm_const def build_bd_lookup_table(table_type, options, path=None): \"\"\" Builds a lookup", "= np.arccos(np.dot(quat_a, quat_b)) # diff_ang = 2 * np.min([acos_val, np.pi", "cr z = sy * cp * cr - cy", "with open(path, \"rb\") as dillfile: (serialized_type, serialized_options, res_table, coords) \\", "as dillfile: dill.dump((options, res_table), dillfile) return res_table def _compute_bd_lookup_table(coords, nc_options):", "path = os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing table", "0.5) sp = math.sin(math.radians(yaw) * 0.5) cr = math.cos(math.radians(pitch) *", "* np.log(np.pi)) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]]))", "coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3))", "0 self.count = 0 def update(self, val, n=1): self.last_val =", "x: 0.0, lambda x: 2. * np.pi, # phi2 lambda", "serialized_options, res_table, coords) \\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type)", "1, 0]].unsqueeze(0) return z def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1,", "quat / np.linalg.norm(quat) return quat def radians(degree_tensor): \"\"\" Method to", "build_bd_lookup_table(table_type, options, path=None): \"\"\" Builds a lookup table for interpolating", "np.sin(phi2) return integrand_transformed(np.array([ sp1 * sp2 * np.sin(phi3), sp1 *", "radian_tensor def generate_coordinates(coords): \"\"\" A function that returns all possible", "is None: integral_options = {\"epsrel\": 1e-4, \"epsabs\": 1e-4} bd =", "self.avg += val * n / (self.count + n) self.count", "np.pi, # phi1 **integral_options ) return eaad_int[0] / bd.norm_const def", "building a new one. Arguments: options: Dict cotaining table options.", "Mises kappa parameters for roll, pitch, yaw. integral_options: Options to", "y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return", "\"wb\") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) else: sys.exit(\"Unknown", "term does not contain the # normalization constant. At the", "results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] =", "a new one. Arguments: table_type: Type of lookup table used.", "constant. At the end, the result of the integration is", "== config_hash, \\ \"Serialized lookup table does not match given", "possible triples \"\"\" x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()", "on the result as the Bingham normalization constant is agnostic", "for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]", "tuple. For the Bingham case, the tuple containins: table_type (str):", "np.min([acos_val, np.pi - acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang =", "num_points = len(coords) pool = Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx", "the lookup table. res_tensor (numpy.ndarray): The actual lookup table data.", "2 * acos_val return diff_ang if integral_options is None: integral_options", "results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] =", "new one. Arguments: table_type: Type of lookup table used. May", "dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert", "angles in degree format to radians. Arguments: degree_tensor (torch.Tensor): Tensor", "in the format expected by the manstats BinghamDistribution class. integral_options:", "= cy * cp * cr + sy * sp", "acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0 * acos_val return", "])) * (sp1 ** 2.) * sp2 eaad_int = integrate.tplquad(", "\"\"\" hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if", "angles in degree format. Returns: radian_tensor (torch.Tensor): Tensor consisting of", "class assumes that no new keys are added during the", "** 3) \\ * scipy.special.iv(0, param_kappa[0]) \\ * scipy.special.iv(0, param_kappa[1])", "* math.pi return radian_tensor def generate_coordinates(coords): \"\"\" A function that", "does not match given type & options.\" else: coords =", "end, the result of the integration is # divided by", "assumes that no new keys are added during the computation.", "phi2, phi3): sp1 = np.sin(phi1) sp2 = np.sin(phi2) return integrand_transformed(np.array([", "\"wb\") as dillfile: dill.dump((options, res_table), dillfile) return res_table def _compute_bd_lookup_table(coords,", "res_table) \\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash =", "one. if os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_type, serialized_options,", "sy = math.sin(math.radians(roll) * 0.5) cp = math.cos(math.radians(yaw) * 0.5)", "dillfile) return res_table def _compute_bd_lookup_table(coords, nc_options): num_points = len(coords) pool", "= point_indices[idx] # Indexing pt_idx in the order 2,1,0 vs.", "torch tensor of angles in degree format to radians. Arguments:", "sy * sp * cr y = sy * cp", "coordinate of possible triples y: the second coordinate of possible", "x: 2. * np.pi, # phi2 lambda x, y: 0.0,", "cy * cp * cr + sy * sp *", "quat = quat / np.linalg.norm(quat) return quat def radians(degree_tensor): \"\"\"", "(str): options (dict): The options used to generate the lookup", "self.avg = 0 self.count = 0 def update(self, val, n=1):", "= math.sin(math.radians(pitch) * 0.5) w = cy * cp *", "\"\"\" Expected Absolute Angular Deviation of Bingham Random Vector Arguments:", "_compute_vm_lookup_table(coords): num_points = len(coords) pool = Pool() def nc_wrapper(idx): cur_pt_idx", "sr quat = np.array([w, x, y, z]) quat = quat", "from dill serialized file. Returns a table specific tuple. For", "coords[pt_idx[0]], 0.]), \"numerical\", nc_options) print(\"Computing NC for Z=[{}, {}, {},", "y, z def ensure_dir_exists(path): \"\"\" Checks if a directory exists", "parameters for roll, pitch, yaw. integral_options: Options to pass on", "diff_ang = 2 * np.min([acos_val, np.pi - acos_val]) acos_val =", "pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2],", "np.arccos(np.dot(quat_a, quat_b)) # diff_ang = 2 * np.min([acos_val, np.pi -", "def _compute_vm_lookup_table(coords): num_points = len(coords) pool = Pool() def nc_wrapper(idx):", "res_table), dillfile) return res_table def _compute_bd_lookup_table(coords, nc_options): num_points = len(coords)", "num_points = len(coords) pool = Pool() def nc_wrapper(idx): cur_pt_idx =", "num_points > 1, \\ \"Grid must have more than one", "y: np.pi, # phi1 **integral_options ) return eaad_int[0] / bd.norm_const", "dict must contain a key \"coords\" which is a numpy", "= hash_obj.hexdigest() assert file_config_hash == config_hash, \\ \"Serialized lookup table", "found.\" with open(path, \"rb\") as dillfile: return dill.load(dillfile) def eaad_von_mises(kappas,", "exists, it is loaded and returned instead of building a", "possible triples z the third coordinate of possible triples \"\"\"", "of Bingham Random Vector Arguments: kappas: Von Mises kappa parameters", "nc_options) print(\"Computing NC for Z=[{}, {}, {}, 0.0]: {}\".format( coords[pt_idx[2]],", "* n / (self.count + n) self.count += n self.last_val", "<reponame>jainajinkya/deep_bingham \"\"\" Utilities for learning pipeline.\"\"\" from __future__ import print_function", "Bingham dispersion parameter in the format expected by the manstats", "the eaad and the bingham normalization constant. \"\"\" def aad(quat_a,", "- cy * sp * sr quat = np.array([w, x,", "points per axis. (lbound, rbound) = options[\"bounds\"] num_points = options[\"num_points\"]", "interpolating the bingham normalization constant. If a lookup table with", "* sp * sr x = cy * cp *", "y: the second coordinate of possible triples z the third", "or 'nonuniform' options: Dict cotaining type specific options. If type", "for roll, pitch, yaw. integral_options: Options to pass on to", "pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx", "coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing", "math.cos(math.radians(pitch) * 0.5) sr = math.sin(math.radians(pitch) * 0.5) w =", "of coordinates Returns: x: the first coordinate of possible triples", "x, y: 2. * np.pi, # phi1 **integral_options ) return", "format to radians. Arguments: degree_tensor (torch.Tensor): Tensor consisting of angles", "cr - cy * sp * sr quat = np.array([w,", "open(path, \"wb\") as dillfile: dill.dump((options, res_table), dillfile) return res_table def", "sy * cp * sr + cy * sp *", "\"\"\" Loads lookup table from dill serialized file. Returns a", "(np.pi ** 3) \\ * scipy.special.iv(0, param_kappa[0]) \\ * scipy.special.iv(0,", "# Number of points per axis. (lbound, rbound) = options[\"bounds\"]", "1e-7} coords = options[\"coords\"] res_table = _compute_bd_lookup_table(coords, nc_options) with open(path,", "* np.min([acos_val, np.pi - acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang", "+= val[key] * n / (self.count + n) else: self.avg", "+ n) else: self.avg *= self.count / (self.count + n)", "= 0 self.avg = 0 self.count = 0 def update(self,", "the Bingham normalization constant is agnostic to it. # However,", "np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options} ) def integrand_transformed(x): #", "computation time. # # TODO: Make pymanstats choose best order", "/ (self.count + n) else: self.avg *= self.count / (self.count", "the Bingham case, the tuple containins: table_type (str): options (dict):", "= Number of points per dimension. If type is \"nonuniform\"", "coords[cur_pt_idx[2]])) print(\"Computing NC for kappas=[{}, {}, {}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]],", "The options used to generate the lookup table. res_tensor (numpy.ndarray):", "results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] =", "import third_party.deep_bingham.bingham_distribution as ms import math import numpy as np", "dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): \"\"\" Expected Absolute Angular Deviation of", "it otherwise. \"\"\" if not os.path.exists(path): os.makedirs(path) def load_lookup_table(path): \"\"\"", "contains: options (dict): The options used to generate the lookup", "key in val: self.avg[key] *= self.count / (self.count + n)", "Z=[{}, {}, {}, 0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return", "otherwise. \"\"\" if not os.path.exists(path): os.makedirs(path) def load_lookup_table(path): \"\"\" Loads", "np import os import scipy import scipy.integrate as integrate import", "import cpu_count def convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts roll, yaw, pitch", "def integrand(phi1, phi2, phi3): sp1 = np.sin(phi1) sp2 = np.sin(phi2)", "\\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing table or create new", "vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return z def", "Loads lookup table from dill serialized file. Returns a table", "& options.\" elif table_type == \"uniform\": # Number of points", "vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not all(z[0][:-1] <=", "lambda x: 0.0, lambda x: np.pi, # phi2 lambda x,", "= math.cos(math.radians(roll) * 0.5) sy = math.sin(math.radians(roll) * 0.5) cp", "= 2 * acos_val return diff_ang if integral_options is None:", "x, y: np.pi, # phi1 **integral_options ) return eaad_int[0] /", "as Pool from pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll, yaw, pitch):", "coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const point_indices = list(itertools.combinations_with_replacement( range(0,", "- sy * sp * cr y = sy *", "and 1d integration which is why the order matters for", "actual computation time. # # TODO: Make pymanstats choose best", "in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]]", "res_table def build_vm_lookup_table(options, path=None): \"\"\" Builds a lookup table for", "matters for the # actual computation time. # # TODO:", "*= self.count / (self.count + n) self.avg += val *", "it. # However, the numpy integration that is used to", "Load existing table or create new one. if os.path.exists(path): with", "as dillfile: (serialized_options, res_table) \\ = dill.load(dillfile) hash_obj = hashlib.sha256()", "- acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2 *", "to a quaternion. \"\"\" # roll (z), yaw (y), pitch", "result of the integration is # divided by it. return", "math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int = integrate.tplquad( integrand_aad, 0.0, 2.0", "format. Returns: radian_tensor (torch.Tensor): Tensor consisting of angles in radian", "\"epsabs\": 1e-2} param_mu = np.array([0., 0., 0.]) # radians quat_mu", "= options[\"bounds\"] num_points = options[\"num_points\"] assert num_points > 1, \\", "table_type (str): options (dict): The options used to generate the", "0 self.avg = 0 self.count = 0 def update(self, val,", "placing the file in the precomputed folder. \"\"\" hash_obj =", "for Z=[{}, {}, {}, 0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const))", "\\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2, phi3): sp1", "in radian format. \"\"\" radian_tensor = degree_tensor/180 * math.pi return", "data. coords (numpy.ndarray): Coordinates at which lookup table was evaluated.", "for key in val: self.avg[key] *= self.count / (self.count +", "as dillfile: return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): \"\"\" Expected Absolute", "hash based on the options and to use this for", "that is used to compute it, combines # numerical 2d", "* np.exp(param_kappa[2] * np.cos(phi3)) \\ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2),", "has no impact # on the result as the Bingham", "1e-3, \"epsabs\": 1e-7} coords = options[\"coords\"] res_table = _compute_bd_lookup_table(coords, nc_options)", "1, \\ \"Grid must have more than one point per", "0.0, lambda x, y: 2. * np.pi, # phi1 **integral_options", "sp1 * sp2 * np.sin(phi3), sp1 * sp2 * np.cos(phi3),", "integrand_transformed(x): # To avoid unnecessary divisions, this term does not", "normalization constant is agnostic to it. # However, the numpy", "the precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash", "\"\"\" Checks if a directory exists and creates it otherwise.", "file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash, \\ \"Serialized lookup", "0: self.avg = copy.deepcopy(val) else: for key in val: self.avg[key]", "is \"uniform\" this dict must contain: \"bounds\" = Tuple (lower_bound,", "(serialized_options, res_table) \\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash", "log_norm_const)) return log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results", "integral_options is None: integral_options = {\"epsrel\": 1e-4, \"epsabs\": 1e-4} bd", "2. * np.pi, # phi1 **integral_options ) return eaad_int[0]/direct_norm_const def", "lambda x: np.pi, # phi2 lambda x, y: 0.0, lambda", "and placing the file in the precomputed folder. \"\"\" hash_obj", "math import numpy as np import os import scipy import", "coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points),", "(torch.Tensor): Tensor consisting of angles in radian format. \"\"\" radian_tensor", "* 0.5) sp = math.sin(math.radians(yaw) * 0.5) cr = math.cos(math.radians(pitch)", "\"\"\" Method to convert a torch tensor of angles in", "z def ensure_dir_exists(path): \"\"\" Checks if a directory exists and", "To avoid unnecessary divisions, this term does not contain the", "return diff_ang if integral_options is None: integral_options = {\"epsrel\": 1e-2,", "0.0, lambda x: 2. * np.pi, # phi2 lambda x,", "as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) else: sys.exit(\"Unknown lookup", "Bingham Random Vector Arguments: kappas: Von Mises kappa parameters for", "z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not all(z[0][:-1] <= z[0][1:]):", "from pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts roll,", "table with the given options already exists, it is loaded", "\\ * scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0]", "create new one. if os.path.exists(path): with open(path, \"rb\") as dillfile:", "radian_tensor = degree_tensor/180 * math.pi return radian_tensor def generate_coordinates(coords): \"\"\"", "\"Lookup table file not found.\" with open(path, \"rb\") as dillfile:", "phi3): return np.exp(param_kappa[0] * np.cos(phi1)) \\ * np.exp(param_kappa[1] * np.cos(phi2))", "import os import scipy import scipy.integrate as integrate import scipy.special", "the numpy integration that is used to compute it, combines", "of numbers. For the dict, this class assumes that no", "returns all possible triples of coords Parameters: coords: a numpy", "if integral_options is None: integral_options = {\"epsrel\": 1e-2, \"epsabs\": 1e-2}", "z]) quat = quat / np.linalg.norm(quat) return quat def radians(degree_tensor):", "param_mu = np.array([0., 0., 0.]) # radians quat_mu = convert_euler_to_quaternion(", "+ n) self.avg[key] += val[key] * n / (self.count +", "serialized file. Returns a table specific tuple. For the Bingham", "of coords Parameters: coords: a numpy array of coordinates Returns:", "cy * sp * cr z = sy * cp", "building a new one. Arguments: table_type: Type of lookup table", "sp1 * sp2 * np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1) ]))", "in the precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash", "lambda x, y: np.pi, # phi1 **integral_options ) return eaad_int[0]", "options. If type is \"uniform\" this dict must contain: \"bounds\"", "The default is to create a hash based on the", "by it. return aad(x, bd.mode) \\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x)))", "quat def radians(degree_tensor): \"\"\" Method to convert a torch tensor", "the result as the Bingham normalization constant is agnostic to", "new one. if os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_type,", "format. \"\"\" radian_tensor = degree_tensor/180 * math.pi return radian_tensor def", "tuple containins: table_type (str): options (dict): The options used to", "torch from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import", "* n / (self.count + n) else: self.avg *= self.count", "> 1, \\ \"Grid must have more than one point", "lambda x, y: 0.0, lambda x, y: np.pi, # phi1", "eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type, options, path=None): \"\"\" Builds a", "order automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]),", "second coordinate of possible triples z the third coordinate of", "for interpolating the bingham normalization constant. If a lookup table", "contain the # normalization constant. At the end, the result", "the # normalization constant. At the end, the result of", "# on the result as the Bingham normalization constant is", "_compute_bd_lookup_table(coords, nc_options): num_points = len(coords) pool = Pool(max(cpu_count()//2, 1)) def", "pt_idx in the order 2,1,0 vs. 0,1,2 has no impact", "\\ \"Grid must have more than one point per dimension.\"", "0., 0.]) # radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2])", "which the interpolation is evaluated. path: absolute path for the", "np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]]))", "res_tensor = -np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx in enumerate(point_indices):", "integral_options = {\"epsrel\": 1e-2, \"epsabs\": 1e-2} param_mu = np.array([0., 0.,", "manstats BinghamDistribution class. integral_options: Options to pass on to the", "(serialized_type, serialized_options, res_table, coords) \\ = dill.load(dillfile) hash_obj = hashlib.sha256()", "n=1): self.last_val = val n = float(n) if type(val) ==", "\"\"\" radian_tensor = degree_tensor/180 * math.pi return radian_tensor def generate_coordinates(coords):", "case, the tuple containins: table_type (str): options (dict): The options", "x: the first coordinate of possible triples y: the second", "* sr x = cy * cp * sr -", "the result of the integration is # divided by it.", "and creates it otherwise. \"\"\" if not os.path.exists(path): os.makedirs(path) def", "= cy * cp * sr - sy * sp", "* cr - cy * sp * sr quat =", "self.count == 0: self.avg = copy.deepcopy(val) else: for key in", "range(0, num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points,", "the bingham normalization constant. If a lookup table with the", "dillfile: dill.dump((options, res_table), dillfile) return res_table def _compute_bd_lookup_table(coords, nc_options): num_points", "import print_function import copy import dill import hashlib import itertools", "acos_val = np.arccos(np.dot(quat_a, quat_b)) # diff_ang = 2 * np.min([acos_val,", "bd.norm_const def build_bd_lookup_table(table_type, options, path=None): \"\"\" Builds a lookup table", "combines # numerical 2d and 1d integration which is why", "Number of points per dimension. If type is \"nonuniform\" this", "3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points))", "dimension.\" nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = np.linspace(lbound,", "Make pymanstats choose best order automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array(", "res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]", "= np.sin(phi1) sp2 = np.sin(phi2) return integrand_transformed(np.array([ sp1 * sp2", "n) self.avg += val * n / (self.count + n)", "= 0 self.count = 0 def update(self, val, n=1): self.last_val", "math.sin(math.radians(roll) * 0.5) cp = math.cos(math.radians(yaw) * 0.5) sp =", "array of coordinates Returns: x: the first coordinate of possible", "normalization constant. If a lookup table with the given options", "why the order matters for the # actual computation time.", "Angular Deviation of Bingham Random Vector Arguments: bingham_z: Bingham dispersion", "res_table, coords), dillfile) else: sys.exit(\"Unknown lookup table type\") return res_table", "self.last_val = val n = float(n) if type(val) == dict:", "param_kappa[1]) \\ * scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2, phi3): return", "Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no", "direct_norm_const = 8.0 * (np.pi ** 3) \\ * scipy.special.iv(0,", "dispersion parameter in the format expected by the manstats BinghamDistribution", "degree format. Returns: radian_tensor (torch.Tensor): Tensor consisting of angles in", "Bingham normalization constant is agnostic to it. # However, the", "the computation. \"\"\" def __init__(self): self.last_val = 0 self.avg =", "len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z def", "create a hash based on the options and to use", "of possible triples z the third coordinate of possible triples", "build_vm_lookup_table(options, path=None): \"\"\" Builds a lookup table for interpolating the", "return quat def radians(degree_tensor): \"\"\" Method to convert a torch", "= results[idx_pos] return res_tensor class AverageMeter(object): \"\"\"Computes and stores the", "* 0.5) sr = math.sin(math.radians(pitch) * 0.5) w = cy", "not found.\" with open(path, \"rb\") as dillfile: return dill.load(dillfile) def", "already exists, it is loaded and returned instead of building", "pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2],", "it, combines # numerical 2d and 1d integration which is", "res_tensor (numpy.ndarray): The actual lookup table data. \"\"\" assert os.path.exists(path),", "integrator for computing the eaad and the bingham normalization constant.", "integral_options is None: integral_options = {\"epsrel\": 1e-2, \"epsabs\": 1e-2} param_mu", "one. if os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_options, res_table)", "phi3): sp1 = np.sin(phi1) sp2 = np.sin(phi2) return integrand_transformed(np.array([ sp1", "more than one point per dimension.\" nc_options = {\"epsrel\": 1e-3,", "which lookup table was evaluated. For the von Mises case,", "in the order 2,1,0 vs. 0,1,2 has no impact #", "interpolation is evaluated. path: absolute path for the lookup table", "coords[pt_idx[0]], norm_const)) return norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3))", "* np.cos(phi3)) \\ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) ))", ") return eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type, options, path=None): \"\"\"", "sys import torch from pathos.multiprocessing import ProcessingPool as Pool from", "table data. \"\"\" assert os.path.exists(path), \"Lookup table file not found.\"", "lambda x, y: 2. * np.pi, # phi1 **integral_options )", "= np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0 * acos_val return diff_ang", "pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor", "is None: integral_options = {\"epsrel\": 1e-2, \"epsabs\": 1e-2} param_mu =", "numpy integration that is used to compute it, combines #", "coordinates Returns: x: the first coordinate of possible triples y:", "np.linalg.norm(quat) return quat def radians(degree_tensor): \"\"\" Method to convert a", "= integrate.tplquad( integrand_aad, 0.0, 2.0 * np.pi, # phi3 lambda", "the coordinates at which the interpolation is evaluated. path: absolute", "np.array([0., 0., 0.]) # radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]),", "_compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as dillfile: dill.dump((table_type, options, res_table,", "a file name and placing the file in the precomputed", "loaded and returned instead of building a new one. Arguments:", "pool = Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx = point_indices[idx] #", "are added during the computation. \"\"\" def __init__(self): self.last_val =", "have more than one point per dimension.\" nc_options = {\"epsrel\":", "At the end, the result of the integration is #", "= val n = float(n) if type(val) == dict: if", "\"\"\" assert os.path.exists(path), \"Lookup table file not found.\" with open(path,", "# phi3 lambda x: 0.0, lambda x: np.pi, # phi2", "val: self.avg[key] *= self.count / (self.count + n) self.avg[key] +=", "** 2.) * sp2 eaad_int = integrate.tplquad( integrand, 0.0, 2.0", "= np.array([w, x, y, z]) quat = quat / np.linalg.norm(quat)", "results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor def vec_to_bingham_z_many(y):", "options already exists, it is loaded and returned instead of", "== 0: self.avg = copy.deepcopy(val) else: for key in val:", "the scipy integrator for computing the eaad and the bingham", "point_indices[idx] # Indexing pt_idx in the order 2,1,0 vs. 0,1,2", "is why the order matters for the # actual computation", "cr y = sy * cp * sr + cy", "= hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path:", "that returns all possible triples of coords Parameters: coords: a", "and the bingham normalization constant. \"\"\" def aad(quat_a, quat_b): #", "* cp * sr - sy * sp * cr", "new one. Arguments: options: Dict cotaining table options. It must", "nc_wrapper(idx): pt_idx = point_indices[idx] # Indexing pt_idx in the order", "np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC for kappas=[{},", "sys.exit(\"Unknown lookup table type\") return res_table def build_vm_lookup_table(options, path=None): \"\"\"", "\"\"\" def __init__(self): self.last_val = 0 self.avg = 0 self.count", "given type & options.\" else: coords = options[\"coords\"] res_table =", "the third coordinate of possible triples \"\"\" x = coords.reshape(-1,", "the bingham normalization constant. \"\"\" def aad(quat_a, quat_b): acos_val =", "= np.sin(phi2) return integrand_transformed(np.array([ sp1 * sp2 * np.sin(phi3), sp1", "sp1 * np.cos(phi2), np.cos(phi1) ])) * (sp1 ** 2.) *", "coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))", "instead of building a new one. Arguments: table_type: Type of", "= 2 * np.min([acos_val, np.pi - acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a,", "must contain: \"bounds\" = Tuple (lower_bound, upper_bound) representing bounds. \"num_points\"", "integral_options} ) def integrand_transformed(x): # To avoid unnecessary divisions, this", "point_indices[idx] log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \\ +", "* 0.5) w = cy * cp * cr +", "Mises case, it contains: options (dict): The options used to", "\\ * np.exp(param_kappa[1] * np.cos(phi2)) \\ * np.exp(param_kappa[2] * np.cos(phi3))", "von Mises case, it contains: options (dict): The options used", "y, z]) quat = quat / np.linalg.norm(quat) return quat def", "was evaluated. For the von Mises case, it contains: options", "hashlib import itertools import third_party.deep_bingham.bingham_distribution as ms import math import", "from __future__ import print_function import copy import dill import hashlib", "Absolute Angular Deviation of Bingham Random Vector Arguments: bingham_z: Bingham", "num_points, num_points)) for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]]", "for the lookup table (optional). The default is to create", "options, res_table, coords), dillfile) elif table_type == \"nonuniform\": nc_options =", "is used to compute it, combines # numerical 2d and", "def aad(quat_a, quat_b): # acos_val = np.arccos(np.dot(quat_a, quat_b)) # diff_ang", "The actual lookup table data. coords (numpy.ndarray): Coordinates at which", "\"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing table or create new one. if", "= ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\", nc_options) print(\"Computing", "val, n=1): self.last_val = val n = float(n) if type(val)", "1e-2, \"epsabs\": 1e-2} param_mu = np.array([0., 0., 0.]) # radians", "if self.count == 0: self.avg = copy.deepcopy(val) else: for key", "averages over a numbers or dicts of numbers. For the", "np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1) ])) * (sp1 ** 2.)", "self.last_val = val def _compute_vm_lookup_table(coords): num_points = len(coords) pool =", "the dict, this class assumes that no new keys are", "at which lookup table was evaluated. For the von Mises", "is # divided by it. return aad(x, bd.mode) \\ *", "impact # on the result as the Bingham normalization constant", "Expected Absolute Angular Deviation of Bingham Random Vector Arguments: bingham_z:", "enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] =", "self.avg[key] *= self.count / (self.count + n) self.avg[key] += val[key]", "Dict cotaining type specific options. If type is \"uniform\" this", "the given options already exists, it is loaded and returned", "elif table_type == \"nonuniform\": nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7}", "table type\") return res_table def build_vm_lookup_table(options, path=None): \"\"\" Builds a", "= ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options} ) def", "= results[idx_pos] return res_tensor def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2,", "\\ * scipy.special.iv(0, param_kappa[0]) \\ * scipy.special.iv(0, param_kappa[1]) \\ *", "compute it, combines # numerical 2d and 1d integration which", "representing the coordinates at which the interpolation is evaluated. path:", "open(path, \"rb\") as dillfile: (serialized_options, res_table) \\ = dill.load(dillfile) hash_obj", "return integrand_transformed(np.array([ sp1 * sp2 * np.sin(phi3), sp1 * sp2", "(numpy.ndarray): The actual lookup table data. coords (numpy.ndarray): Coordinates at", "Tuple (lower_bound, upper_bound) representing bounds. \"num_points\" = Number of points", "and returned instead of building a new one. Arguments: table_type:", "the von Mises case, it contains: options (dict): The options", "pitch): \"\"\"Converts roll, yaw, pitch to a quaternion. \"\"\" #", "lookup table data. \"\"\" assert os.path.exists(path), \"Lookup table file not", "# diff_ang = 2 * np.min([acos_val, np.pi - acos_val]) acos_val", "point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices)))", "phi2, phi3): return np.exp(param_kappa[0] * np.cos(phi1)) \\ * np.exp(param_kappa[1] *", "nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = np.linspace(lbound, rbound,", "ProcessingPool as Pool from pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll, yaw,", "coords: a numpy array of coordinates Returns: x: the first", "path: path = os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing", "1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z", "actual lookup table data. coords (numpy.ndarray): Coordinates at which lookup", "a new one. Arguments: options: Dict cotaining table options. It", "dillfile: (serialized_options, res_table) \\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options))", "hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash ==", "dict must contain: \"bounds\" = Tuple (lower_bound, upper_bound) representing bounds.", "* np.cos(phi2), np.cos(phi1) ])) * (sp1 ** 2.) * sp2", "For the Bingham case, the tuple containins: table_type (str): options", "table was evaluated. For the von Mises case, it contains:", "phi2 lambda x, y: 0.0, lambda x, y: np.pi, #", "parameter in the format expected by the manstats BinghamDistribution class.", "roll (z), yaw (y), pitch (x) cy = math.cos(math.radians(roll) *", "* aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int = integrate.tplquad(", "def build_bd_lookup_table(table_type, options, path=None): \"\"\" Builds a lookup table for", "If type is \"nonuniform\" this dict must contain a key", "ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\", nc_options) print(\"Computing NC", "triples of coords Parameters: coords: a numpy array of coordinates", "options[\"num_points\"] assert num_points > 1, \\ \"Grid must have more", "convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts roll, yaw, pitch to a quaternion.", "quat_b))) diff_ang = 2.0 * acos_val return diff_ang if integral_options", "2.0 * acos_val return diff_ang if integral_options is None: integral_options", "lookup table type\") return res_table def build_vm_lookup_table(options, path=None): \"\"\" Builds", "triples \"\"\" x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y", "1, 0]].unsqueeze(0) if not all(z[0][:-1] <= z[0][1:]): print(z) return z", "quat_b): # acos_val = np.arccos(np.dot(quat_a, quat_b)) # diff_ang = 2", "hash_obj.hexdigest() assert file_config_hash == config_hash, \\ \"Serialized lookup table does", "0.]), \"numerical\", nc_options) print(\"Computing NC for Z=[{}, {}, {}, 0.0]:", "quat_b))) diff_ang = 2 * acos_val return diff_ang if integral_options", "table for interpolating the bingham normalization constant. If a lookup", "n) self.avg[key] += val[key] * n / (self.count + n)", "os.path.exists(path): os.makedirs(path) def load_lookup_table(path): \"\"\" Loads lookup table from dill", "# Load existing table or create new one. if os.path.exists(path):", "pitch to a quaternion. \"\"\" # roll (z), yaw (y),", "import dill import hashlib import itertools import third_party.deep_bingham.bingham_distribution as ms", "path=None): \"\"\" Builds a lookup table for interpolating the bingham", "return norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results =", "to create a hash based on the options and to", "1e-3, \"epsabs\": 1e-7} coords = np.linspace(lbound, rbound, num_points) res_table =", "itertools import third_party.deep_bingham.bingham_distribution as ms import math import numpy as", "pitch (x) cy = math.cos(math.radians(roll) * 0.5) sy = math.sin(math.radians(roll)", "lookup table data. coords (numpy.ndarray): Coordinates at which lookup table", "scipy.integrate as integrate import scipy.special import sys import torch from", "and the bingham normalization constant. \"\"\" def aad(quat_a, quat_b): acos_val", "-np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1],", "math.degrees(phi3) )) eaad_int = integrate.tplquad( integrand_aad, 0.0, 2.0 * np.pi,", "to radians. Arguments: degree_tensor (torch.Tensor): Tensor consisting of angles in", "numbers. For the dict, this class assumes that no new", "not match given type & options.\" else: coords = options[\"coords\"]", "lookup table (optional). The default is to create a hash", "arrays representing the coordinates at which the interpolation is evaluated.", "convert a torch tensor of angles in degree format to", "the order matters for the # actual computation time. #", "= dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert", "hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash, \\", "this for constructing a file name and placing the file", "is agnostic to it. # However, the numpy integration that", "(lower_bound, upper_bound) representing bounds. \"num_points\" = Number of points per", "\"epsabs\": 1e-7} coords = options[\"coords\"] res_table = _compute_bd_lookup_table(coords, nc_options) with", "hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path", "open(path, \"rb\") as dillfile: return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): \"\"\"", "If a lookup table with the given options already exists,", "sp2 * np.sin(phi3), sp1 * sp2 * np.cos(phi3), sp1 *", "list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor =", "import scipy.special import sys import torch from pathos.multiprocessing import ProcessingPool", "bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options} ) def integrand_transformed(x): # To", "aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0 *", "Von Mises kappa parameters for roll, pitch, yaw. integral_options: Options", "file not found.\" with open(path, \"rb\") as dillfile: return dill.load(dillfile)", "1e-7} coords = np.linspace(lbound, rbound, num_points) res_table = _compute_bd_lookup_table(coords, nc_options)", "= quat / np.linalg.norm(quat) return quat def radians(degree_tensor): \"\"\" Method", "/ (self.count + n) self.avg[key] += val[key] * n /", "import hashlib import itertools import third_party.deep_bingham.bingham_distribution as ms import math", "= val def _compute_vm_lookup_table(coords): num_points = len(coords) pool = Pool()", "0.0, lambda x: np.pi, # phi2 lambda x, y: 0.0,", "= {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = np.linspace(lbound, rbound, num_points)", "if os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_options, res_table) \\", "pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor def vec_to_bingham_z_many(y): z =", "== \"nonuniform\": nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords =", "of Bingham Random Vector Arguments: bingham_z: Bingham dispersion parameter in", "+= val * n / (self.count + n) self.count +=", "A function that returns all possible triples of coords Parameters:", "# Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has", "* sp * cr z = sy * cp *", "= _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as dillfile: dill.dump((table_type, options,", "def update(self, val, n=1): self.last_val = val n = float(n)", "in val: self.avg[key] *= self.count / (self.count + n) self.avg[key]", "expected by the manstats BinghamDistribution class. integral_options: Options to pass", "given options already exists, it is loaded and returned instead", "the end, the result of the integration is # divided", "= np.array([0., 0., 0.]) # radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]),", "coords), dillfile) elif table_type == \"nonuniform\": nc_options = {\"epsrel\": 1e-3,", "& options.\" else: coords = options[\"coords\"] res_table = _compute_vm_lookup_table(coords) with", "\\ * scipy.special.iv(0, param_kappa[1]) \\ * scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1,", "def integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0] * np.cos(phi1)) \\ *", "* (sp1 ** 2.) * sp2 eaad_int = integrate.tplquad( integrand,", "evaluated. path: absolute path for the lookup table (optional). The", "# numerical 2d and 1d integration which is why the", "existing table or create new one. if os.path.exists(path): with open(path,", "of lookup table used. May be 'uniform' or 'nonuniform' options:", "pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2],", "integrand_transformed(np.array([ sp1 * sp2 * np.sin(phi3), sp1 * sp2 *", "8.0 * (np.pi ** 3) \\ * scipy.special.iv(0, param_kappa[0]) \\", "a directory exists and creates it otherwise. \"\"\" if not", "import torch from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing", "as dillfile: (serialized_type, serialized_options, res_table, coords) \\ = dill.load(dillfile) hash_obj", "def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not all(z[0][:-1]", "of points per dimension. If type is \"nonuniform\" this dict", "cp * sr + cy * sp * cr z", "else: sys.exit(\"Unknown lookup table type\") return res_table def build_vm_lookup_table(options, path=None):", "results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] =", "pt_idx = point_indices[idx] # Indexing pt_idx in the order 2,1,0", "match given type & options.\" elif table_type == \"uniform\": #", "z = sy * cp * cr - cy *", "(z), yaw (y), pitch (x) cy = math.cos(math.radians(roll) * 0.5)", "0]].unsqueeze(0) return z def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0)", "the bingham normalization constant. \"\"\" def aad(quat_a, quat_b): # acos_val", "\\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC", "quat = np.array([w, x, y, z]) quat = quat /", "= {\"epsrel\": 1e-4, \"epsabs\": 1e-4} bd = ms.BinghamDistribution( np.eye(4), bingham_z,", "= hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash,", "import math import numpy as np import os import scipy", "type\") return res_table def build_vm_lookup_table(options, path=None): \"\"\" Builds a lookup", "table_type == \"uniform\": # Number of points per axis. (lbound,", "table. res_tensor (numpy.ndarray): The actual lookup table data. coords (numpy.ndarray):", "* sp2 * np.sin(phi3), sp1 * sp2 * np.cos(phi3), sp1", "this dict must contain a key \"coords\" which is a", "yaw (y), pitch (x) cy = math.cos(math.radians(roll) * 0.5) sy", "options: Dict cotaining table options. It must contain a key", "= coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1,", "self.avg = copy.deepcopy(val) else: for key in val: self.avg[key] *=", "= integrate.tplquad( integrand, 0.0, 2.0 * np.pi, # phi3 lambda", "integrate.tplquad( integrand_aad, 0.0, 2.0 * np.pi, # phi3 lambda x:", "Random Vector Arguments: bingham_z: Bingham dispersion parameter in the format", "evaluated. For the von Mises case, it contains: options (dict):", "path: absolute path for the lookup table (optional). The default", "given type & options.\" elif table_type == \"uniform\": # Number", "os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing table or create", "nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const = np.log(8.0) + (3. *", "/ (self.count + n) self.count += n self.last_val = val", "normalization constant. At the end, the result of the integration", "phi1 **integral_options ) return eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type, options,", "\"nonuniform\" this dict must contain a key \"coords\" which is", "sr - sy * sp * cr y = sy", "diff_ang = 2.0 * acos_val return diff_ang if integral_options is", "kappas=[{}, {}, {}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const", "Builds a lookup table for interpolating the bingham normalization constant.", "os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_type, serialized_options, res_table, coords)", "upper_bound) representing bounds. \"num_points\" = Number of points per dimension.", "vs. 0,1,2 has no impact # on the result as", "as integrate import scipy.special import sys import torch from pathos.multiprocessing", "+ np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC for", "class. integral_options: Options to pass on to the scipy integrator", "2.) * sp2 eaad_int = integrate.tplquad( integrand, 0.0, 2.0 *", "\"coords\" which is a numpy arrays representing the coordinates at", "with open(path, \"wb\") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile)", "table (optional). The default is to create a hash based", "cy = math.cos(math.radians(roll) * 0.5) sy = math.sin(math.radians(roll) * 0.5)", "for computing the eaad and the bingham normalization constant. \"\"\"", "{\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = options[\"coords\"] res_table = _compute_bd_lookup_table(coords,", "a key \"coords\" which is a numpy arrays representing the", "options[\"coords\"] res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as dillfile:", "degree format to radians. Arguments: degree_tensor (torch.Tensor): Tensor consisting of", "len(coords) * len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z =", "np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\", nc_options) print(\"Computing NC for", "constant. If a lookup table with the given options already", "normalization constant. \"\"\" def aad(quat_a, quat_b): # acos_val = np.arccos(np.dot(quat_a,", "\"\"\" Builds a lookup table for interpolating the bingham normalization", "*= self.count / (self.count + n) self.avg[key] += val[key] *", "contain a key \"coords\" which is a numpy arrays representing", "def convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts roll, yaw, pitch to a", "scipy import scipy.integrate as integrate import scipy.special import sys import", "np.pi, # phi3 lambda x: 0.0, lambda x: np.pi, #", "coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\", nc_options) print(\"Computing NC for Z=[{}, {},", "keys are added during the computation. \"\"\" def __init__(self): self.last_val", "2 * np.min([acos_val, np.pi - acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))", "np.pi, # phi3 lambda x: 0.0, lambda x: 2. *", "new keys are added during the computation. \"\"\" def __init__(self):", "range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx in", ")) eaad_int = integrate.tplquad( integrand_aad, 0.0, 2.0 * np.pi, #", "log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper,", "no new keys are added during the computation. \"\"\" def", "pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0],", "* sr quat = np.array([w, x, y, z]) quat =", "hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path:", "(y), pitch (x) cy = math.cos(math.radians(roll) * 0.5) sy =", "kappas direct_norm_const = 8.0 * (np.pi ** 3) \\ *", "lookup table for interpolating the bingham normalization constant. If a", "= len(coords) pool = Pool() def nc_wrapper(idx): cur_pt_idx = point_indices[idx]", "avoid unnecessary divisions, this term does not contain the #", "= -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return z def vec_to_bingham_z(y): z", "phi1 **integral_options ) return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected", "lookup table. res_tensor (numpy.ndarray): The actual lookup table data. coords", "= pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points)) for idx_pos,", "= {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = options[\"coords\"] res_table =", "BinghamDistribution class. integral_options: Options to pass on to the scipy", "/ np.linalg.norm(quat) return quat def radians(degree_tensor): \"\"\" Method to convert", "{}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const point_indices = list(itertools.combinations_with_replacement(", "dill.dump((options, res_table), dillfile) return res_table def _compute_bd_lookup_table(coords, nc_options): num_points =", "np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC for kappas=[{}, {}, {}]: {}\".format( coords[cur_pt_idx[2]],", "Angular Deviation of Bingham Random Vector Arguments: kappas: Von Mises", "a lookup table with the given options already exists, it", "np.log(8.0) + (3. * np.log(np.pi)) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\", "= math.cos(math.radians(pitch) * 0.5) sr = math.sin(math.radians(pitch) * 0.5) w", "than one point per dimension.\" nc_options = {\"epsrel\": 1e-3, \"epsabs\":", "quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa = kappas", "numerical 2d and 1d integration which is why the order", "= _compute_vm_lookup_table(coords) with open(path, \"wb\") as dillfile: dill.dump((options, res_table), dillfile)", "diff_ang if integral_options is None: integral_options = {\"epsrel\": 1e-2, \"epsabs\":", "= Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx = point_indices[idx] # Indexing", "np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2, phi3): sp1 = np.sin(phi1)", "lookup table from dill serialized file. Returns a table specific", "num_points)) for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] =", "coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y,", "aad(quat_a, quat_b): # acos_val = np.arccos(np.dot(quat_a, quat_b)) # diff_ang =", "= options[\"coords\"] res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as", "len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))", "# phi1 **integral_options ) return eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type,", "= np.linspace(lbound, rbound, num_points) res_table = _compute_bd_lookup_table(coords, nc_options) with open(path,", "possible triples y: the second coordinate of possible triples z", "the lookup table (optional). The default is to create a", "with open(path, \"rb\") as dillfile: return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None):", "nc_options) with open(path, \"wb\") as dillfile: dill.dump((table_type, options, res_table, coords),", "used to generate the lookup table. res_tensor (numpy.ndarray): The actual", "Returns a table specific tuple. For the Bingham case, the", "__init__(self): self.last_val = 0 self.avg = 0 self.count = 0", ") def integrand_transformed(x): # To avoid unnecessary divisions, this term", "Deviation of Bingham Random Vector Arguments: bingham_z: Bingham dispersion parameter", "return log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results =", "dillfile: dill.dump((table_type, options, res_table, coords), dillfile) else: sys.exit(\"Unknown lookup table", "sp2 = np.sin(phi2) return integrand_transformed(np.array([ sp1 * sp2 * np.sin(phi3),", "res_table def _compute_bd_lookup_table(coords, nc_options): num_points = len(coords) pool = Pool(max(cpu_count()//2,", "def aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0", "def load_lookup_table(path): \"\"\" Loads lookup table from dill serialized file.", "hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash", "math.sin(math.radians(yaw) * 0.5) cr = math.cos(math.radians(pitch) * 0.5) sr =", "return np.exp(param_kappa[0] * np.cos(phi1)) \\ * np.exp(param_kappa[1] * np.cos(phi2)) \\", "one point per dimension.\" nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7}", "cy * sp * sr quat = np.array([w, x, y,", "= len(coords) pool = Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx =", "[2, 1, 0]].unsqueeze(0) return z def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2,", "For the von Mises case, it contains: options (dict): The", "rbound) = options[\"bounds\"] num_points = options[\"num_points\"] assert num_points > 1,", "pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts roll, yaw,", "x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y = coords.reshape(-1,", "at which the interpolation is evaluated. path: absolute path for", "pass on to the scipy integrator for computing the eaad", "open(path, \"wb\") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) else:", "return z def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if", "lookup table used. May be 'uniform' or 'nonuniform' options: Dict", "to use this for constructing a file name and placing", "res_tensor def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return", "= 8.0 * (np.pi ** 3) \\ * scipy.special.iv(0, param_kappa[0])", "cr = math.cos(math.radians(pitch) * 0.5) sr = math.sin(math.radians(pitch) * 0.5)", "is to create a hash based on the options and", "\"\"\" Utilities for learning pipeline.\"\"\" from __future__ import print_function import", "num_points) res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as dillfile:", "if os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_type, serialized_options, res_table,", "bd.mode) \\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2, phi3):", "triples y: the second coordinate of possible triples z the", "Returns: radian_tensor (torch.Tensor): Tensor consisting of angles in radian format.", "x = cy * cp * sr - sy *", "bingham normalization constant. If a lookup table with the given", "np.exp(param_kappa[2] * np.cos(phi3)) \\ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3)", "Options to pass on to the scipy integrator for computing", "pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1],", "dict: if self.count == 0: self.avg = copy.deepcopy(val) else: for", "options: Dict cotaining type specific options. If type is \"uniform\"", "= results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor class", "the format expected by the manstats BinghamDistribution class. integral_options: Options", "not os.path.exists(path): os.makedirs(path) def load_lookup_table(path): \"\"\" Loads lookup table from", "agnostic to it. # However, the numpy integration that is", "options.\" else: coords = options[\"coords\"] res_table = _compute_vm_lookup_table(coords) with open(path,", "Number of points per axis. (lbound, rbound) = options[\"bounds\"] num_points", "triples z the third coordinate of possible triples \"\"\" x", "to it. # However, the numpy integration that is used", "sp * cr z = sy * cp * cr", "np.exp(param_kappa[1] * np.cos(phi2)) \\ * np.exp(param_kappa[2] * np.cos(phi3)) \\ *", "# # TODO: Make pymanstats choose best order automatically. norm_const", "return res_tensor def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0)", "coords = options[\"coords\"] res_table = _compute_vm_lookup_table(coords) with open(path, \"wb\") as", "lookup table was evaluated. For the von Mises case, it", "z the third coordinate of possible triples \"\"\" x =", "(numpy.ndarray): The actual lookup table data. \"\"\" assert os.path.exists(path), \"Lookup", "the averages over a numbers or dicts of numbers. For", "= float(n) if type(val) == dict: if self.count == 0:", "specific tuple. For the Bingham case, the tuple containins: table_type", "\\ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int =", "order 2,1,0 vs. 0,1,2 has no impact # on the", "{}, {}, 0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const", "Tensor consisting of angles in radian format. \"\"\" radian_tensor =", "= np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2 * acos_val return diff_ang", "rbound, num_points) res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as", "that no new keys are added during the computation. \"\"\"", "scipy.special.iv(0, param_kappa[0]) \\ * scipy.special.iv(0, param_kappa[1]) \\ * scipy.special.iv(0, param_kappa[2])", "use this for constructing a file name and placing the", "constant is agnostic to it. # However, the numpy integration", "yaw, pitch to a quaternion. \"\"\" # roll (z), yaw", "and to use this for constructing a file name and", "+ np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0,", "Vector Arguments: kappas: Von Mises kappa parameters for roll, pitch,", "file. Returns a table specific tuple. For the Bingham case,", "of building a new one. Arguments: options: Dict cotaining table", "\"Serialized lookup table does not match given type & options.\"", "val * n / (self.count + n) self.count += n", "# acos_val = np.arccos(np.dot(quat_a, quat_b)) # diff_ang = 2 *", "a hash based on the options and to use this", "eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected Absolute Angular Deviation of", "* np.sin(phi3), sp1 * sp2 * np.cos(phi3), sp1 * np.cos(phi2),", "integrand(phi1, phi2, phi3): sp1 = np.sin(phi1) sp2 = np.sin(phi2) return", "first coordinate of possible triples y: the second coordinate of", "yaw. integral_options: Options to pass on to the scipy integrator", "file_config_hash == config_hash, \\ \"Serialized lookup table does not match", "# To avoid unnecessary divisions, this term does not contain", "**integral_options ) return eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type, options, path=None):", "\"\"\"Converts roll, yaw, pitch to a quaternion. \"\"\" # roll", "unnecessary divisions, this term does not contain the # normalization", "precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest()", "table file not found.\" with open(path, \"rb\") as dillfile: return", "for the # actual computation time. # # TODO: Make", "= 2.0 * acos_val return diff_ang if integral_options is None:", "file name and placing the file in the precomputed folder.", "of possible triples \"\"\" x = coords.reshape(-1, 1).repeat(1, len(coords) *", "= math.cos(math.radians(yaw) * 0.5) sp = math.sin(math.radians(yaw) * 0.5) cr", "\"epsabs\": 1e-7} coords = np.linspace(lbound, rbound, num_points) res_table = _compute_bd_lookup_table(coords,", "it contains: options (dict): The options used to generate the", "{\"epsrel\": 1e-4, \"epsabs\": 1e-4} bd = ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\":", "convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa = kappas direct_norm_const =", "of building a new one. Arguments: table_type: Type of lookup", "x, y, z def ensure_dir_exists(path): \"\"\" Checks if a directory", "representing bounds. \"num_points\" = Number of points per dimension. If", "Parameters: coords: a numpy array of coordinates Returns: x: the", "pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1],", "* sp2 * np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1) ])) *", "choose best order automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]],", "integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0] * np.cos(phi1)) \\ * np.exp(param_kappa[1]", "table_type == \"nonuniform\": nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords", "integrate.tplquad( integrand, 0.0, 2.0 * np.pi, # phi3 lambda x:", "folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest()", "table options. It must contain a key \"coords\" which is", "generate_coordinates(coords): \"\"\" A function that returns all possible triples of", "based on the options and to use this for constructing", "sr = math.sin(math.radians(pitch) * 0.5) w = cy * cp", "* cp * cr - cy * sp * sr", "acos_val return diff_ang if integral_options is None: integral_options = {\"epsrel\":", "* sp2 eaad_int = integrate.tplquad( integrand, 0.0, 2.0 * np.pi,", "# phi1 **integral_options ) return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): \"\"\"", "np.cos(phi3)) \\ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int", "\"\"\" def aad(quat_a, quat_b): # acos_val = np.arccos(np.dot(quat_a, quat_b)) #", "pt_idx[0]] = results[idx_pos] return res_tensor def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:,", "generate the lookup table. res_tensor (numpy.ndarray): The actual lookup table", "(self.count + n) self.avg += val * n / (self.count", "pipeline.\"\"\" from __future__ import print_function import copy import dill import", "param_kappa[2]) def integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0] * np.cos(phi1)) \\", "coordinate of possible triples \"\"\" x = coords.reshape(-1, 1).repeat(1, len(coords)", "to convert a torch tensor of angles in degree format", "hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash,", "radians. Arguments: degree_tensor (torch.Tensor): Tensor consisting of angles in degree", "eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected Absolute Angular Deviation of Bingham Random", "1d integration which is why the order matters for the", "sp1 = np.sin(phi1) sp2 = np.sin(phi2) return integrand_transformed(np.array([ sp1 *", "lambda x: 2. * np.pi, # phi2 lambda x, y:", "be 'uniform' or 'nonuniform' options: Dict cotaining type specific options.", "0.0, 2.0 * np.pi, # phi3 lambda x: 0.0, lambda", "sy * cp * cr - cy * sp *", "norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper,", "\\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest()", "axis. (lbound, rbound) = options[\"bounds\"] num_points = options[\"num_points\"] assert num_points", "z def vec_to_bingham_z(y): z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not", "for kappas=[{}, {}, {}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return", "a lookup table for interpolating the bingham normalization constant. If", "-torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return z def vec_to_bingham_z(y): z =", "a quaternion. \"\"\" # roll (z), yaw (y), pitch (x)", "* cr z = sy * cp * cr -", "x: 0.0, lambda x: np.pi, # phi2 lambda x, y:", "1e-4, \"epsabs\": 1e-4} bd = ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\",", "= hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path", "(torch.Tensor): Tensor consisting of angles in degree format. Returns: radian_tensor", "res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]", "None: integral_options = {\"epsrel\": 1e-2, \"epsabs\": 1e-2} param_mu = np.array([0.,", "dicts of numbers. For the dict, this class assumes that", "with the given options already exists, it is loaded and", "coords[cur_pt_idx[1]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC for kappas=[{}, {},", "The actual lookup table data. \"\"\" assert os.path.exists(path), \"Lookup table", "* cp * cr + sy * sp * sr", "\"\"\" # roll (z), yaw (y), pitch (x) cy =", "= {\"epsrel\": 1e-2, \"epsabs\": 1e-2} param_mu = np.array([0., 0., 0.])", "dillfile) else: sys.exit(\"Unknown lookup table type\") return res_table def build_vm_lookup_table(options,", "{}, 0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const point_indices", "* np.exp(param_kappa[1] * np.cos(phi2)) \\ * np.exp(param_kappa[2] * np.cos(phi3)) \\", "(dict): The options used to generate the lookup table. res_tensor", "if type(val) == dict: if self.count == 0: self.avg =", "a numpy arrays representing the coordinates at which the interpolation", "return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): \"\"\" Expected Absolute Angular Deviation", "sp = math.sin(math.radians(yaw) * 0.5) cr = math.cos(math.radians(pitch) * 0.5)", "\"\"\"Computes and stores the averages over a numbers or dicts", "cp * cr + sy * sp * sr x", "or create new one. if os.path.exists(path): with open(path, \"rb\") as", "np.sin(phi1) sp2 = np.sin(phi2) return integrand_transformed(np.array([ sp1 * sp2 *", "the precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash =", "computation. \"\"\" def __init__(self): self.last_val = 0 self.avg = 0", "a torch tensor of angles in degree format to radians.", "+ n) self.count += n self.last_val = val def _compute_vm_lookup_table(coords):", "of the integration is # divided by it. return aad(x,", "diff_ang = 2 * acos_val return diff_ang if integral_options is", "eaad_von_mises(kappas, integral_options=None): \"\"\" Expected Absolute Angular Deviation of Bingham Random", "coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0,", "table does not match given type & options.\" elif table_type", "\"epsabs\": 1e-4} bd = ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\":", "table from dill serialized file. Returns a table specific tuple.", "aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int = integrate.tplquad( integrand_aad,", "{\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options} ) def integrand_transformed(x): # To avoid", "over a numbers or dicts of numbers. For the dict,", "from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import cpu_count", "hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path = os.path.dirname(__file__)", "np.array([w, x, y, z]) quat = quat / np.linalg.norm(quat) return", "assert num_points > 1, \\ \"Grid must have more than", "+ np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC for kappas=[{}, {}, {}]: {}\".format(", "* cp * sr + cy * sp * cr", "2d and 1d integration which is why the order matters", "res_tensor class AverageMeter(object): \"\"\"Computes and stores the averages over a", "config_hash = hash_obj.hexdigest() if not path: path = os.path.dirname(__file__) \\", "degree_tensor (torch.Tensor): Tensor consisting of angles in degree format. Returns:", "= 0 def update(self, val, n=1): self.last_val = val n", "{\"epsrel\": 1e-2, \"epsabs\": 1e-2} param_mu = np.array([0., 0., 0.]) #", "Coordinates at which lookup table was evaluated. For the von", "\"rb\") as dillfile: (serialized_type, serialized_options, res_table, coords) \\ = dill.load(dillfile)", "* np.pi, # phi2 lambda x, y: 0.0, lambda x,", "tensor of angles in degree format to radians. Arguments: degree_tensor", "folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if", "NC for kappas=[{}, {}, {}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const))", "def _compute_bd_lookup_table(coords, nc_options): num_points = len(coords) pool = Pool(max(cpu_count()//2, 1))", "kappa parameters for roll, pitch, yaw. integral_options: Options to pass", "os.makedirs(path) def load_lookup_table(path): \"\"\" Loads lookup table from dill serialized", "2,1,0 vs. 0,1,2 has no impact # on the result", "normalization constant. \"\"\" def aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))", "it. return aad(x, bd.mode) \\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def", "quaternion. \"\"\" # roll (z), yaw (y), pitch (x) cy", "to pass on to the scipy integrator for computing the", "def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return z", "options.\" elif table_type == \"uniform\": # Number of points per", "* sp * sr quat = np.array([w, x, y, z])", "Bingham Random Vector Arguments: bingham_z: Bingham dispersion parameter in the", "= hash_obj.hexdigest() if not path: path = os.path.dirname(__file__) \\ +", "pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import cpu_count def", "val n = float(n) if type(val) == dict: if self.count", "* scipy.special.iv(0, param_kappa[0]) \\ * scipy.special.iv(0, param_kappa[1]) \\ * scipy.special.iv(0,", "\"rb\") as dillfile: (serialized_options, res_table) \\ = dill.load(dillfile) hash_obj =", "_compute_vm_lookup_table(coords) with open(path, \"wb\") as dillfile: dill.dump((options, res_table), dillfile) return", "0,1,2 has no impact # on the result as the", "on to the scipy integrator for computing the eaad and", "sy * sp * sr x = cy * cp", "coords (numpy.ndarray): Coordinates at which lookup table was evaluated. For", "May be 'uniform' or 'nonuniform' options: Dict cotaining type specific", "'nonuniform' options: Dict cotaining type specific options. If type is", "If type is \"uniform\" this dict must contain: \"bounds\" =", "Tensor consisting of angles in degree format. Returns: radian_tensor (torch.Tensor):", "pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor class AverageMeter(object): \"\"\"Computes and", "def radians(degree_tensor): \"\"\" Method to convert a torch tensor of", "options used to generate the lookup table. res_tensor (numpy.ndarray): The", "* np.pi, # phi1 **integral_options ) return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z,", "= sy * cp * cr - cy * sp", "NC for Z=[{}, {}, {}, 0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],", "cp * cr - cy * sp * sr quat", "np.linspace(lbound, rbound, num_points) res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\")", "* acos_val return diff_ang if integral_options is None: integral_options =", "returned instead of building a new one. Arguments: table_type: Type", "\"numerical\", nc_options) print(\"Computing NC for Z=[{}, {}, {}, 0.0]: {}\".format(", "num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points,", "of points per axis. (lbound, rbound) = options[\"bounds\"] num_points =", "Pool() def nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const = np.log(8.0) +", "Vector Arguments: bingham_z: Bingham dispersion parameter in the format expected", "{}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices = list(itertools.combinations_with_replacement(", "options (dict): The options used to generate the lookup table.", "dillfile: return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): \"\"\" Expected Absolute Angular", "results[idx_pos] return res_tensor def vec_to_bingham_z_many(y): z = -torch.exp(y).cumsum(1)[:, [2, 1,", "= point_indices[idx] log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \\", "def eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected Absolute Angular Deviation of Bingham", "divisions, this term does not contain the # normalization constant.", "to the scipy integrator for computing the eaad and the", "coordinate of possible triples z the third coordinate of possible", "of angles in degree format. Returns: radian_tensor (torch.Tensor): Tensor consisting", "n self.last_val = val def _compute_vm_lookup_table(coords): num_points = len(coords) pool", "3) \\ * scipy.special.iv(0, param_kappa[0]) \\ * scipy.special.iv(0, param_kappa[1]) \\", "= math.sin(math.radians(roll) * 0.5) cp = math.cos(math.radians(yaw) * 0.5) sp", "= results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]]", "== dict: if self.count == 0: self.avg = copy.deepcopy(val) else:", "np.log(np.pi)) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\", "with open(path, \"wb\") as dillfile: dill.dump((options, res_table), dillfile) return res_table", "return res_table def build_vm_lookup_table(options, path=None): \"\"\" Builds a lookup table", "math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa = kappas direct_norm_const = 8.0 *", "not path: path = os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load", "degree_tensor/180 * math.pi return radian_tensor def generate_coordinates(coords): \"\"\" A function", "def build_vm_lookup_table(options, path=None): \"\"\" Builds a lookup table for interpolating", "= results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]]", "which is a numpy arrays representing the coordinates at which", "0.5) w = cy * cp * cr + sy", "# phi3 lambda x: 0.0, lambda x: 2. * np.pi,", "per dimension.\" nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords =", "coords = np.linspace(lbound, rbound, num_points) res_table = _compute_bd_lookup_table(coords, nc_options) with", "* sr + cy * sp * cr z =", "default is to create a hash based on the options", "= results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]]", "third_party.deep_bingham.bingham_distribution as ms import math import numpy as np import", "* np.pi, # phi3 lambda x: 0.0, lambda x: np.pi,", "n) else: self.avg *= self.count / (self.count + n) self.avg", "# radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa", "pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2],", "the first coordinate of possible triples y: the second coordinate", "computing the eaad and the bingham normalization constant. \"\"\" def", "directory exists and creates it otherwise. \"\"\" if not os.path.exists(path):", "options and to use this for constructing a file name", "automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\",", "does not contain the # normalization constant. At the end,", "scipy integrator for computing the eaad and the bingham normalization", "lambda x, y: 0.0, lambda x, y: 2. * np.pi,", "table_type: Type of lookup table used. May be 'uniform' or", "0 def update(self, val, n=1): self.last_val = val n =", "dill import hashlib import itertools import third_party.deep_bingham.bingham_distribution as ms import", "math.pi return radian_tensor def generate_coordinates(coords): \"\"\" A function that returns", "constant. \"\"\" def aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang", "options[\"coords\"] res_table = _compute_vm_lookup_table(coords) with open(path, \"wb\") as dillfile: dill.dump((options,", "bingham_z: Bingham dispersion parameter in the format expected by the", "\"uniform\": # Number of points per axis. (lbound, rbound) =", "Checks if a directory exists and creates it otherwise. \"\"\"", "integration that is used to compute it, combines # numerical", "Returns: x: the first coordinate of possible triples y: the", "np.exp(param_kappa[0] * np.cos(phi1)) \\ * np.exp(param_kappa[1] * np.cos(phi2)) \\ *", "res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\") as dillfile: dill.dump((table_type,", "len(coords) pool = Pool() def nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const", "For the dict, this class assumes that no new keys", "numpy as np import os import scipy import scipy.integrate as", "constructing a file name and placing the file in the", "return eaad_int[0] / bd.norm_const def build_bd_lookup_table(table_type, options, path=None): \"\"\" Builds", "assert file_config_hash == config_hash, \\ \"Serialized lookup table does not", "= results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor def", "lookup table does not match given type & options.\" else:", "print(\"Computing NC for Z=[{}, {}, {}, 0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]],", "and returned instead of building a new one. Arguments: options:", "return res_table def _compute_bd_lookup_table(coords, nc_options): num_points = len(coords) pool =", "= results[idx_pos] res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]]", "eaad and the bingham normalization constant. \"\"\" def aad(quat_a, quat_b):", "Absolute Angular Deviation of Bingham Random Vector Arguments: kappas: Von", "copy import dill import hashlib import itertools import third_party.deep_bingham.bingham_distribution as", "+= n self.last_val = val def _compute_vm_lookup_table(coords): num_points = len(coords)", "integrand_aad, 0.0, 2.0 * np.pi, # phi3 lambda x: 0.0,", "lambda x: 0.0, lambda x: 2. * np.pi, # phi2", "\"\"\" hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not", "used. May be 'uniform' or 'nonuniform' options: Dict cotaining type", "during the computation. \"\"\" def __init__(self): self.last_val = 0 self.avg", "name and placing the file in the precomputed folder. \"\"\"", "or dicts of numbers. For the dict, this class assumes", "key \"coords\" which is a numpy arrays representing the coordinates", "actual lookup table data. \"\"\" assert os.path.exists(path), \"Lookup table file", "* np.pi, # phi3 lambda x: 0.0, lambda x: 2.", "Deviation of Bingham Random Vector Arguments: kappas: Von Mises kappa", "the file in the precomputed folder. \"\"\" hash_obj = hashlib.sha256()", "radian_tensor (torch.Tensor): Tensor consisting of angles in radian format. \"\"\"", "res_tensor (numpy.ndarray): The actual lookup table data. coords (numpy.ndarray): Coordinates", "Arguments: options: Dict cotaining table options. It must contain a", "y = sy * cp * sr + cy *", "= coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x,", "result as the Bingham normalization constant is agnostic to it.", "class AverageMeter(object): \"\"\"Computes and stores the averages over a numbers", "os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_options, res_table) \\ =", "to compute it, combines # numerical 2d and 1d integration", "diff_ang if integral_options is None: integral_options = {\"epsrel\": 1e-4, \"epsabs\":", "ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options} ) def integrand_transformed(x):", "val def _compute_vm_lookup_table(coords): num_points = len(coords) pool = Pool() def", "Dict cotaining table options. It must contain a key \"coords\"", "aad(x, bd.mode) \\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2,", "* sr - sy * sp * cr y =", "not contain the # normalization constant. At the end, the", "file in the precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8'))", "* 0.5) cr = math.cos(math.radians(pitch) * 0.5) sr = math.sin(math.radians(pitch)", "= hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash ==", "np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0 * acos_val return diff_ang if", "log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \\ + np.log(scipy.special.iv(0,", "constant. \"\"\" def aad(quat_a, quat_b): # acos_val = np.arccos(np.dot(quat_a, quat_b))", "cur_pt_idx = point_indices[idx] log_norm_const = np.log(8.0) + (3. * np.log(np.pi))", "math.sin(math.radians(pitch) * 0.5) w = cy * cp * cr", "stores the averages over a numbers or dicts of numbers.", "math.degrees(param_mu[2]) ) param_kappa = kappas direct_norm_const = 8.0 * (np.pi", "sr + cy * sp * cr z = sy", "consisting of angles in degree format. Returns: radian_tensor (torch.Tensor): Tensor", "possible triples of coords Parameters: coords: a numpy array of", "= np.log(8.0) + (3. * np.log(np.pi)) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]]))", "However, the numpy integration that is used to compute it,", "1)) def nc_wrapper(idx): pt_idx = point_indices[idx] # Indexing pt_idx in", "\"wb\") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) elif table_type", "phi3 lambda x: 0.0, lambda x: np.pi, # phi2 lambda", "\"\"\" x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y =", "2.0 * np.pi, # phi3 lambda x: 0.0, lambda x:", "num_points = options[\"num_points\"] assert num_points > 1, \\ \"Grid must", "ms import math import numpy as np import os import", "acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2 * acos_val", "res_table = _compute_vm_lookup_table(coords) with open(path, \"wb\") as dillfile: dill.dump((options, res_table),", "and stores the averages over a numbers or dicts of", "Arguments: degree_tensor (torch.Tensor): Tensor consisting of angles in degree format.", "self.avg[key] += val[key] * n / (self.count + n) else:", "(numpy.ndarray): Coordinates at which lookup table was evaluated. For the", "results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor class AverageMeter(object):", "* np.cos(phi1)) \\ * np.exp(param_kappa[1] * np.cos(phi2)) \\ * np.exp(param_kappa[2]", "lookup table. res_tensor (numpy.ndarray): The actual lookup table data. \"\"\"", "hashlib.sha256() hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path =", "+ cy * sp * cr z = sy *", "= Pool() def nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const = np.log(8.0)", "cotaining table options. It must contain a key \"coords\" which", "one. Arguments: table_type: Type of lookup table used. May be", "np.cos(phi1) ])) * (sp1 ** 2.) * sp2 eaad_int =", "0.0, lambda x, y: np.pi, # phi1 **integral_options ) return", "bd = ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options} )", "in the precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options))", "eaad_int = integrate.tplquad( integrand, 0.0, 2.0 * np.pi, # phi3", "**integral_options ) return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected Absolute", "elif table_type == \"uniform\": # Number of points per axis.", "* scipy.special.iv(0, param_kappa[1]) \\ * scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2,", "norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\", nc_options)", "param_kappa = kappas direct_norm_const = 8.0 * (np.pi ** 3)", "pool = Pool() def nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const =", "{}, {}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices", "results[idx_pos] return res_tensor class AverageMeter(object): \"\"\"Computes and stores the averages", "table data. coords (numpy.ndarray): Coordinates at which lookup table was", "it is loaded and returned instead of building a new", "the options and to use this for constructing a file", "cotaining type specific options. If type is \"uniform\" this dict", "def ensure_dir_exists(path): \"\"\" Checks if a directory exists and creates", "return aad(x, bd.mode) \\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1,", "Arguments: table_type: Type of lookup table used. May be 'uniform'", "numpy array of coordinates Returns: x: the first coordinate of", "self.count / (self.count + n) self.avg += val * n", "is evaluated. path: absolute path for the lookup table (optional).", "lookup table does not match given type & options.\" elif", "bingham normalization constant. \"\"\" def aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a,", "for learning pipeline.\"\"\" from __future__ import print_function import copy import", "\\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[2]])) print(\"Computing NC for kappas=[{}, {}, {}]:", "path for the lookup table (optional). The default is to", "is a numpy arrays representing the coordinates at which the", "# TODO: Make pymanstats choose best order automatically. norm_const =", "= sy * cp * sr + cy * sp", "# However, the numpy integration that is used to compute", "def nc_wrapper(idx): cur_pt_idx = point_indices[idx] log_norm_const = np.log(8.0) + (3.", "self.last_val = 0 self.avg = 0 self.count = 0 def", "the integration is # divided by it. return aad(x, bd.mode)", "phi3 lambda x: 0.0, lambda x: 2. * np.pi, #", "as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) elif table_type ==", "\"Grid must have more than one point per dimension.\" nc_options", "radians(degree_tensor): \"\"\" Method to convert a torch tensor of angles", "yaw, pitch): \"\"\"Converts roll, yaw, pitch to a quaternion. \"\"\"", "It must contain a key \"coords\" which is a numpy", "as the Bingham normalization constant is agnostic to it. #", "hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash, \\ \"Serialized", "a numbers or dicts of numbers. For the dict, this", "dimension. If type is \"nonuniform\" this dict must contain a", "pitch, yaw. integral_options: Options to pass on to the scipy", "np.cos(phi2), np.cos(phi1) ])) * (sp1 ** 2.) * sp2 eaad_int", "a numpy array of coordinates Returns: x: the first coordinate", "table does not match given type & options.\" else: coords", "best order automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],", "sr x = cy * cp * sr - sy", "dill serialized file. Returns a table specific tuple. For the", "* (np.pi ** 3) \\ * scipy.special.iv(0, param_kappa[0]) \\ *", "x, y, z]) quat = quat / np.linalg.norm(quat) return quat", "integral_options = {\"epsrel\": 1e-4, \"epsabs\": 1e-4} bd = ms.BinghamDistribution( np.eye(4),", "* 0.5) sy = math.sin(math.radians(roll) * 0.5) cp = math.cos(math.radians(yaw)", "np.cos(phi2)) \\ * np.exp(param_kappa[2] * np.cos(phi3)) \\ * aad(quat_mu, convert_euler_to_quaternion(", "* len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1,", "must have more than one point per dimension.\" nc_options =", "in degree format. Returns: radian_tensor (torch.Tensor): Tensor consisting of angles", "sp2 eaad_int = integrate.tplquad( integrand, 0.0, 2.0 * np.pi, #", "z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0) return z def vec_to_bingham_z(y):", "res_table, coords), dillfile) elif table_type == \"nonuniform\": nc_options = {\"epsrel\":", "numbers or dicts of numbers. For the dict, this class", "# roll (z), yaw (y), pitch (x) cy = math.cos(math.radians(roll)", "cpu_count def convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts roll, yaw, pitch to", "scipy.special import sys import torch from pathos.multiprocessing import ProcessingPool as", "os import scipy import scipy.integrate as integrate import scipy.special import", "self.count / (self.count + n) self.avg[key] += val[key] * n", "type is \"nonuniform\" this dict must contain a key \"coords\"", "# phi2 lambda x, y: 0.0, lambda x, y: np.pi,", "0.0]: {}\".format( coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], norm_const)) return norm_const point_indices =", "integration which is why the order matters for the #", "\\ * np.exp(param_kappa[2] * np.cos(phi3)) \\ * aad(quat_mu, convert_euler_to_quaternion( math.degrees(phi1),", "dict, this class assumes that no new keys are added", "roll, yaw, pitch to a quaternion. \"\"\" # roll (z),", "x))) def integrand(phi1, phi2, phi3): sp1 = np.sin(phi1) sp2 =", "# phi2 lambda x, y: 0.0, lambda x, y: 2.", "x: np.pi, # phi2 lambda x, y: 0.0, lambda x,", "consisting of angles in radian format. \"\"\" radian_tensor = degree_tensor/180", "eaad_int = integrate.tplquad( integrand_aad, 0.0, 2.0 * np.pi, # phi3", "options, res_table, coords), dillfile) else: sys.exit(\"Unknown lookup table type\") return", "options[\"bounds\"] num_points = options[\"num_points\"] assert num_points > 1, \\ \"Grid", "dillfile) elif table_type == \"nonuniform\": nc_options = {\"epsrel\": 1e-3, \"epsabs\":", "used to compute it, combines # numerical 2d and 1d", "divided by it. return aad(x, bd.mode) \\ * np.exp(np.dot(x, np.dot(np.diag(bingham_z),", "pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[0],", "coords = options[\"coords\"] res_table = _compute_bd_lookup_table(coords, nc_options) with open(path, \"wb\")", "nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = options[\"coords\"] res_table", "np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2, phi3): sp1 = np.sin(phi1) sp2", "import scipy import scipy.integrate as integrate import scipy.special import sys", "0.]) # radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) )", "pt_idx[0]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1],", "added during the computation. \"\"\" def __init__(self): self.last_val = 0", "1).repeat(1, len(coords) * len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z", "to generate the lookup table. res_tensor (numpy.ndarray): The actual lookup", "self.avg *= self.count / (self.count + n) self.avg += val", "y: 0.0, lambda x, y: 2. * np.pi, # phi1", "coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z def ensure_dir_exists(path): \"\"\" Checks", "math.cos(math.radians(yaw) * 0.5) sp = math.sin(math.radians(yaw) * 0.5) cr =", "else: coords = options[\"coords\"] res_table = _compute_vm_lookup_table(coords) with open(path, \"wb\")", "* np.exp(np.dot(x, np.dot(np.diag(bingham_z), x))) def integrand(phi1, phi2, phi3): sp1 =", "of possible triples y: the second coordinate of possible triples", "import itertools import third_party.deep_bingham.bingham_distribution as ms import math import numpy", "= degree_tensor/180 * math.pi return radian_tensor def generate_coordinates(coords): \"\"\" A", "1).flatten().repeat(len(coords)*len(coords)) return x, y, z def ensure_dir_exists(path): \"\"\" Checks if", "= convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa = kappas direct_norm_const", "sp2 * np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1) ])) * (sp1", "table. res_tensor (numpy.ndarray): The actual lookup table data. \"\"\" assert", "coords) \\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash", "precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash =", "containins: table_type (str): options (dict): The options used to generate", "coords), dillfile) else: sys.exit(\"Unknown lookup table type\") return res_table def", "order matters for the # actual computation time. # #", "np.sin(phi3), sp1 * sp2 * np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1)", "integrate import scipy.special import sys import torch from pathos.multiprocessing import", "# normalization constant. At the end, the result of the", "numpy arrays representing the coordinates at which the interpolation is", "the # actual computation time. # # TODO: Make pymanstats", "scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0] * np.cos(phi1))", "case, it contains: options (dict): The options used to generate", "roll, pitch, yaw. integral_options: Options to pass on to the", "cp = math.cos(math.radians(yaw) * 0.5) sp = math.sin(math.radians(yaw) * 0.5)", "nc_options): num_points = len(coords) pool = Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx):", "the interpolation is evaluated. path: absolute path for the lookup", "else: for key in val: self.avg[key] *= self.count / (self.count", "integrand, 0.0, 2.0 * np.pi, # phi3 lambda x: 0.0,", "returned instead of building a new one. Arguments: options: Dict", "math.degrees(phi2), math.degrees(phi3) )) eaad_int = integrate.tplquad( integrand_aad, 0.0, 2.0 *", "x, y: 0.0, lambda x, y: np.pi, # phi1 **integral_options", "options. It must contain a key \"coords\" which is a", "= dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest()", "import scipy.integrate as integrate import scipy.special import sys import torch", "0.5) sr = math.sin(math.radians(pitch) * 0.5) w = cy *", "* 0.5) cp = math.cos(math.radians(yaw) * 0.5) sp = math.sin(math.radians(yaw)", "[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]), \"numerical\", nc_options) print(\"Computing NC for Z=[{},", "return diff_ang if integral_options is None: integral_options = {\"epsrel\": 1e-4,", "\\ \"Serialized lookup table does not match given type &", "if not path: path = os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) #", "0.5) sy = math.sin(math.radians(roll) * 0.5) cp = math.cos(math.radians(yaw) *", "= copy.deepcopy(val) else: for key in val: self.avg[key] *= self.count", "config_hash, \\ \"Serialized lookup table does not match given type", "0.5) cp = math.cos(math.radians(yaw) * 0.5) sp = math.sin(math.radians(yaw) *", "return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected Absolute Angular Deviation", "\"num_points\" = Number of points per dimension. If type is", "math.cos(math.radians(roll) * 0.5) sy = math.sin(math.radians(roll) * 0.5) cp =", "points per dimension. If type is \"nonuniform\" this dict must", "if a directory exists and creates it otherwise. \"\"\" if", "np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2 * acos_val return diff_ang if", "the manstats BinghamDistribution class. integral_options: Options to pass on to", "cp * sr - sy * sp * cr y", "type is \"uniform\" this dict must contain: \"bounds\" = Tuple", "Arguments: kappas: Von Mises kappa parameters for roll, pitch, yaw.", "math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa = kappas direct_norm_const = 8.0", "= math.sin(math.radians(yaw) * 0.5) cr = math.cos(math.radians(pitch) * 0.5) sr", "\"\"\" A function that returns all possible triples of coords", "def __init__(self): self.last_val = 0 self.avg = 0 self.count =", "data. \"\"\" assert os.path.exists(path), \"Lookup table file not found.\" with", "\"numerical\", \"norm_const_options\": integral_options} ) def integrand_transformed(x): # To avoid unnecessary", "x, y: 0.0, lambda x, y: 2. * np.pi, #", "per axis. (lbound, rbound) = options[\"bounds\"] num_points = options[\"num_points\"] assert", "Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx = point_indices[idx] # Indexing pt_idx", "import numpy as np import os import scipy import scipy.integrate", "idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0],", "of angles in radian format. \"\"\" radian_tensor = degree_tensor/180 *", "contain: \"bounds\" = Tuple (lower_bound, upper_bound) representing bounds. \"num_points\" =", "hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash == config_hash, \\", "is \"nonuniform\" this dict must contain a key \"coords\" which", "float(n) if type(val) == dict: if self.count == 0: self.avg", "is loaded and returned instead of building a new one.", "convert_euler_to_quaternion( math.degrees(phi1), math.degrees(phi2), math.degrees(phi3) )) eaad_int = integrate.tplquad( integrand_aad, 0.0,", "no impact # on the result as the Bingham normalization", "type specific options. If type is \"uniform\" this dict must", "coordinates at which the interpolation is evaluated. path: absolute path", "* cr + sy * sp * sr x =", "return res_tensor class AverageMeter(object): \"\"\"Computes and stores the averages over", "hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not path: path =", "= list(itertools.combinations_with_replacement( range(0, num_points), 3)) results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor", "dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(serialized_options)) file_config_hash = hash_obj.hexdigest() assert file_config_hash", "of angles in degree format to radians. Arguments: degree_tensor (torch.Tensor):", "learning pipeline.\"\"\" from __future__ import print_function import copy import dill", "table used. May be 'uniform' or 'nonuniform' options: Dict cotaining", "y: 2. * np.pi, # phi1 **integral_options ) return eaad_int[0]/direct_norm_const", "def integrand_transformed(x): # To avoid unnecessary divisions, this term does", "cr + sy * sp * sr x = cy", "instead of building a new one. Arguments: options: Dict cotaining", "= os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing table or", "this class assumes that no new keys are added during", "not match given type & options.\" elif table_type == \"uniform\":", "import copy import dill import hashlib import itertools import third_party.deep_bingham.bingham_distribution", "with open(path, \"rb\") as dillfile: (serialized_options, res_table) \\ = dill.load(dillfile)", "TODO: Make pymanstats choose best order automatically. norm_const = ms.BinghamDistribution.normalization_constant(", "\"\"\" def aad(quat_a, quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang =", "None: integral_options = {\"epsrel\": 1e-4, \"epsabs\": 1e-4} bd = ms.BinghamDistribution(", "y: 0.0, lambda x, y: np.pi, # phi1 **integral_options )", "= options[\"num_points\"] assert num_points > 1, \\ \"Grid must have", "dillfile: dill.dump((table_type, options, res_table, coords), dillfile) elif table_type == \"nonuniform\":", "res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor def vec_to_bingham_z_many(y): z", "__future__ import print_function import copy import dill import hashlib import", "res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return res_tensor class AverageMeter(object): \"\"\"Computes", "def eaad_von_mises(kappas, integral_options=None): \"\"\" Expected Absolute Angular Deviation of Bingham", "* np.cos(phi3), sp1 * np.cos(phi2), np.cos(phi1) ])) * (sp1 **", "table specific tuple. For the Bingham case, the tuple containins:", "1e-2} param_mu = np.array([0., 0., 0.]) # radians quat_mu =", "lookup table with the given options already exists, it is", "Method to convert a torch tensor of angles in degree", "Bingham case, the tuple containins: table_type (str): options (dict): The", "n) self.count += n self.last_val = val def _compute_vm_lookup_table(coords): num_points", "point per dimension.\" nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords", "pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos] return", "all possible triples of coords Parameters: coords: a numpy array", "ensure_dir_exists(path): \"\"\" Checks if a directory exists and creates it", "z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z def ensure_dir_exists(path):", "integral_options=None): \"\"\" Expected Absolute Angular Deviation of Bingham Random Vector", "+ \"/../precomputed/lookup_{}.dill\".format(config_hash) # Load existing table or create new one.", "update(self, val, n=1): self.last_val = val n = float(n) if", "w = cy * cp * cr + sy *", "in degree format to radians. Arguments: degree_tensor (torch.Tensor): Tensor consisting", "by the manstats BinghamDistribution class. integral_options: Options to pass on", "new one. if os.path.exists(path): with open(path, \"rb\") as dillfile: (serialized_options,", "specific options. If type is \"uniform\" this dict must contain:", "= coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z def ensure_dir_exists(path): \"\"\"", "table or create new one. if os.path.exists(path): with open(path, \"rb\")", "np.pi - acos_val]) acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2", "exists and creates it otherwise. \"\"\" if not os.path.exists(path): os.makedirs(path)", "cy * cp * sr - sy * sp *", "function that returns all possible triples of coords Parameters: coords:", "+ sy * sp * sr x = cy *", "\"bounds\" = Tuple (lower_bound, upper_bound) representing bounds. \"num_points\" = Number", "radians quat_mu = convert_euler_to_quaternion( math.degrees(param_mu[0]), math.degrees(param_mu[1]), math.degrees(param_mu[2]) ) param_kappa =", "-torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0) if not all(z[0][:-1] <= z[0][1:]): print(z) return", "Utilities for learning pipeline.\"\"\" from __future__ import print_function import copy", "angles in radian format. \"\"\" radian_tensor = degree_tensor/180 * math.pi", "np.pi, # phi2 lambda x, y: 0.0, lambda x, y:", "assert os.path.exists(path), \"Lookup table file not found.\" with open(path, \"rb\")", "the second coordinate of possible triples z the third coordinate", "quat_b): acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b))) diff_ang = 2.0 * acos_val", "res_table, coords) \\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options))", "'uniform' or 'nonuniform' options: Dict cotaining type specific options. If", "per dimension. If type is \"nonuniform\" this dict must contain", "integration is # divided by it. return aad(x, bd.mode) \\", "* cr y = sy * cp * sr +", "the order 2,1,0 vs. 0,1,2 has no impact # on", "for constructing a file name and placing the file in", "res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]", "import ProcessingPool as Pool from pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll,", "(3. * np.log(np.pi)) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0,", "sp * sr quat = np.array([w, x, y, z]) quat", "def nc_wrapper(idx): pt_idx = point_indices[idx] # Indexing pt_idx in the", "== \"uniform\": # Number of points per axis. (lbound, rbound)", "self.count += n self.last_val = val def _compute_vm_lookup_table(coords): num_points =", "(self.count + n) self.avg[key] += val[key] * n / (self.count", "coords Parameters: coords: a numpy array of coordinates Returns: x:", "(x) cy = math.cos(math.radians(roll) * 0.5) sy = math.sin(math.radians(roll) *", "return radian_tensor def generate_coordinates(coords): \"\"\" A function that returns all", "np.cos(phi1)) \\ * np.exp(param_kappa[1] * np.cos(phi2)) \\ * np.exp(param_kappa[2] *", "format expected by the manstats BinghamDistribution class. integral_options: Options to", "* scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2, phi3): return np.exp(param_kappa[0] *", "results = pool.map(nc_wrapper, range(len(point_indices))) res_tensor = -np.ones((num_points, num_points, num_points)) for", "# divided by it. return aad(x, bd.mode) \\ * np.exp(np.dot(x,", "dill.dump((table_type, options, res_table, coords), dillfile) else: sys.exit(\"Unknown lookup table type\")", "\\ = dill.load(dillfile) hash_obj = hashlib.sha256() hash_obj.update(serialized_type) hash_obj.update(dill.dumps(serialized_options)) file_config_hash =", "if not os.path.exists(path): os.makedirs(path) def load_lookup_table(path): \"\"\" Loads lookup table", "hash_obj = hashlib.sha256() hash_obj.update(table_type.encode('utf-8')) hash_obj.update(dill.dumps(options)) config_hash = hash_obj.hexdigest() if not", "phi2 lambda x, y: 0.0, lambda x, y: 2. *", "type(val) == dict: if self.count == 0: self.avg = copy.deepcopy(val)", "bingham normalization constant. \"\"\" def aad(quat_a, quat_b): # acos_val =", "\\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \\ +", "Type of lookup table used. May be 'uniform' or 'nonuniform'", "2. * np.pi, # phi2 lambda x, y: 0.0, lambda", "norm_const)) return norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points), 3)) results", "as np import os import scipy import scipy.integrate as integrate", "the tuple containins: table_type (str): options (dict): The options used", "n / (self.count + n) self.count += n self.last_val =", "Arguments: bingham_z: Bingham dispersion parameter in the format expected by", "\"\"\" if not os.path.exists(path): os.makedirs(path) def load_lookup_table(path): \"\"\" Loads lookup", "quat_b)) # diff_ang = 2 * np.min([acos_val, np.pi - acos_val])", "else: self.avg *= self.count / (self.count + n) self.avg +=", "open(path, \"rb\") as dillfile: (serialized_type, serialized_options, res_table, coords) \\ =", "res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos] res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]", "= options[\"coords\"] res_table = _compute_vm_lookup_table(coords) with open(path, \"wb\") as dillfile:", "* np.cos(phi2)) \\ * np.exp(param_kappa[2] * np.cos(phi3)) \\ * aad(quat_mu,", "= Tuple (lower_bound, upper_bound) representing bounds. \"num_points\" = Number of", "\"nonuniform\": nc_options = {\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = options[\"coords\"]", "dillfile: (serialized_type, serialized_options, res_table, coords) \\ = dill.load(dillfile) hash_obj =", "type & options.\" else: coords = options[\"coords\"] res_table = _compute_vm_lookup_table(coords)", "copy.deepcopy(val) else: for key in val: self.avg[key] *= self.count /", "res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos] res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]", ") param_kappa = kappas direct_norm_const = 8.0 * (np.pi **", "print(\"Computing NC for kappas=[{}, {}, {}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]],", "Pool from pathos.multiprocessing import cpu_count def convert_euler_to_quaternion(roll, yaw, pitch): \"\"\"Converts", ") return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None): \"\"\" Expected Absolute Angular", "(sp1 ** 2.) * sp2 eaad_int = integrate.tplquad( integrand, 0.0,", "pymanstats choose best order automatically. norm_const = ms.BinghamDistribution.normalization_constant( np.array( [coords[pt_idx[2]],", "one. Arguments: options: Dict cotaining table options. It must contain", "load_lookup_table(path): \"\"\" Loads lookup table from dill serialized file. Returns", "does not match given type & options.\" elif table_type ==", "len(coords) pool = Pool(max(cpu_count()//2, 1)) def nc_wrapper(idx): pt_idx = point_indices[idx]", "/ (self.count + n) self.avg += val * n /", "sp * cr y = sy * cp * sr", "Random Vector Arguments: kappas: Von Mises kappa parameters for roll,", "options, path=None): \"\"\" Builds a lookup table for interpolating the", "AverageMeter(object): \"\"\"Computes and stores the averages over a numbers or", "= -np.ones((num_points, num_points, num_points)) for idx_pos, pt_idx in enumerate(point_indices): res_tensor[pt_idx[0],", "match given type & options.\" else: coords = options[\"coords\"] res_table", "def generate_coordinates(coords): \"\"\" A function that returns all possible triples", "param_kappa[0]) \\ * scipy.special.iv(0, param_kappa[1]) \\ * scipy.special.iv(0, param_kappa[2]) def", "dill.dump((table_type, options, res_table, coords), dillfile) elif table_type == \"nonuniform\": nc_options", "# actual computation time. # # TODO: Make pymanstats choose", "this term does not contain the # normalization constant. At", "0.5) cr = math.cos(math.radians(pitch) * 0.5) sr = math.sin(math.radians(pitch) *", "hash_obj.hexdigest() if not path: path = os.path.dirname(__file__) \\ + \"/../precomputed/lookup_{}.dill\".format(config_hash)", "n = float(n) if type(val) == dict: if self.count ==", "integral_options: Options to pass on to the scipy integrator for", "if integral_options is None: integral_options = {\"epsrel\": 1e-4, \"epsabs\": 1e-4}", "bounds. \"num_points\" = Number of points per dimension. If type", "(optional). The default is to create a hash based on", "type & options.\" elif table_type == \"uniform\": # Number of", "coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices = list(itertools.combinations_with_replacement( range(0, num_points),", "1e-4} bd = ms.BinghamDistribution( np.eye(4), bingham_z, {\"norm_const_mode\": \"numerical\", \"norm_const_options\": integral_options}", "return x, y, z def ensure_dir_exists(path): \"\"\" Checks if a", "val[key] * n / (self.count + n) else: self.avg *=", "creates it otherwise. \"\"\" if not os.path.exists(path): os.makedirs(path) def load_lookup_table(path):", "radian format. \"\"\" radian_tensor = degree_tensor/180 * math.pi return radian_tensor", "os.path.exists(path), \"Lookup table file not found.\" with open(path, \"rb\") as", "this dict must contain: \"bounds\" = Tuple (lower_bound, upper_bound) representing", "(self.count + n) self.count += n self.last_val = val def", "third coordinate of possible triples \"\"\" x = coords.reshape(-1, 1).repeat(1,", "n / (self.count + n) else: self.avg *= self.count /", "on the options and to use this for constructing a", "\"rb\") as dillfile: return dill.load(dillfile) def eaad_von_mises(kappas, integral_options=None): \"\"\" Expected", "kappas: Von Mises kappa parameters for roll, pitch, yaw. integral_options:", "\"uniform\" this dict must contain: \"bounds\" = Tuple (lower_bound, upper_bound)", "= kappas direct_norm_const = 8.0 * (np.pi ** 3) \\", "{\"epsrel\": 1e-3, \"epsabs\": 1e-7} coords = np.linspace(lbound, rbound, num_points) res_table", "\"norm_const_options\": integral_options} ) def integrand_transformed(x): # To avoid unnecessary divisions,", "+ (3. * np.log(np.pi)) \\ + np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \\ +", "(lbound, rbound) = options[\"bounds\"] num_points = options[\"num_points\"] assert num_points >", "as ms import math import numpy as np import os", "* sp * cr y = sy * cp *", "sp * sr x = cy * cp * sr", "scipy.special.iv(0, param_kappa[1]) \\ * scipy.special.iv(0, param_kappa[2]) def integrand_aad(phi1, phi2, phi3):", "open(path, \"wb\") as dillfile: dill.dump((table_type, options, res_table, coords), dillfile) elif", "import sys import torch from pathos.multiprocessing import ProcessingPool as Pool", "np.pi, # phi1 **integral_options ) return eaad_int[0]/direct_norm_const def eaad_bingham(bingham_z, integral_options=None):", "absolute path for the lookup table (optional). The default is", "time. # # TODO: Make pymanstats choose best order automatically.", "a table specific tuple. For the Bingham case, the tuple", "self.count = 0 def update(self, val, n=1): self.last_val = val", "Expected Absolute Angular Deviation of Bingham Random Vector Arguments: kappas:", "print_function import copy import dill import hashlib import itertools import", "{}]: {}\".format( coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]], log_norm_const)) return log_norm_const point_indices =", "file in the precomputed folder. \"\"\" hash_obj = hashlib.sha256() hash_obj.update(dill.dumps(options))", "which is why the order matters for the # actual", "must contain a key \"coords\" which is a numpy arrays" ]
[ "+ song[\"value\"]) else: print(yellow(\"Something went Wrong...\") + response.status_code) except: print(red(\"Could", "a test number (1=insert, 2=query, 3=requests), runs the test and", "from Chord': print(cyan(\"Preparing Node to depart from Chord...\")) try: response", "nodes_list = json.loads(response.text) print('\\n') for node in nodes_list[\"res\"]: print(green(node[\"ip\"] +", "\"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue elif method_a == 'Run automated", "in whitch the song is stored and the value of", "as ends from utils.colorfy import * from auto.testing import test_trans", "fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']})", "print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Network Overlay': print(cyan(\"Initiating", "method_a == 'Depart from Chord': print(cyan(\"Preparing Node to depart from", "else 's' if test_number not in ('1', '2', '3'): print(yellow(\"Wrong", "inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Delete a", "wish to Search or * to get all songs of", "node in nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"]) + \" \" +", "import time import json style = style_from_dict({ Token.QuestionMark: '#E91E63 bold',", "you must tell me the port. Ex. -p 5000 !!\")", "lambda val: str(val) } ] fetch_a = prompt(fetch_q, style=style) test_number", "print(\"Song found in node with id: \",green(response.text.split(\" \")[0])) print(\"Song value:", "response.status_code) except: print(red(\"Could not establish connection with Node. Couldnt search", "in nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"]) + \" \" + underline(node[\"ip\"]", "'2' else \"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif method_a", "\")[0])) else : print(red(\"Got a bad response status code \"", "response.status_code == 200 and response.text.split(\" \")[1] != \"@!@\": # print(cyan(\"Deleting", "(give 1, 2 or 3)\")) continue print(cyan(\"Running automated test: \")", "cyan(\"...\")) try: response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code", "Title:', 'filter': lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style)", "the node connected to this cli leave the Chord\\n\") autoTests=header(\"Run", "Song Value and inserts them in the Chord\\n\") queryHelp=header(\"Search Song:", "Song: \") + green(response.text.split(\" \")[1]) + ) print(cyan(\"Deleted by node", "except: print(red(\"Could not establish connection with Node. Song wasnt inserted...\"))", "for node in nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"]) + \" \"", "\", end = '') print('\\n') else : print(red(\"Got a bad", "\" + song[\"value\"]) else: print(yellow(\"Something went Wrong...\") + response.status_code) except:", "print(yellow(\"Wrong test number (give 1, 2 or 3)\")) continue print(cyan(\"Running", "insert, 2 = query, 3 = requests)') fetch_q = [", "exiting...\")) exit(0) continue elif method_a == 'Delete a Song': print('Insert", "the song you wish to insert') fetch_q = [ {", "\" + response.status_code)) except: print(red(\"Could not establish connection with Node...\"))", "+ response.status_code)) except: print(red(\"Could not establish connection with Node. Node", "a Song Title and returns the Node in whitch the", "= json.loads(response.text) # print(green(response.text)) # print(cyan())) for node in nodes_list[\"res\"]:", "if response.status_code == 200 and response.text.split(\" \")[1] != \"@!@\": #", "Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold', }) def", "3 = requests)') fetch_q = [ { 'type': 'input', 'name':", "'message': 'Song Title:', 'filter': lambda val: str(val) }, { 'type':", "== 200: if response.text == \"Left the Chord\": print(response.text) print(green(\"Node", "Song Title you wish to Search or * to get", "if response.status_code == 200: if response.text == \"Left the Chord\":", "a Song', \\ 'Search for a Song', \\ 'Delete a", "a Song': print('Insert the Song Title you wish to Search", "Title you wish to delete') fetch_q = [ { 'type':", "'choices': ['Network Overlay', \\ 'Insert a Song', \\ 'Search for", "\"-P\"): my_port = sys.argv[2] my_ip = os.popen('ip addr show '", "cyan(\"This function expects a Song Title and returns the Node", "deleteHelp=header(\"Delete Song: \") + cyan(\"This function expects a Song Title", "str(val) }] fetch_a = prompt(fetch_q, style=style) if fetch_a['key'] == \"*\":", "= { 'type': 'list', 'name': 'method', 'message': 'Select action:', 'choices':", "print(cyan(\"Done!\")) continue elif method_a == 'Exit': os.system('clear') break else: os.system('clear')", "1, 2 or 3)\")) continue print(cyan(\"Running automated test: \") +", "for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) else: print(cyan(\"Searching Song: \") +", "method_a == 'Network Overlay': print(cyan(\"Initiating Network Overlay...\")) try: response =", "+ cyan(\"This functions recreates and prints the current Network Topology(eg.", "2=query, 3=requests), runs the test and returns the chord throughput\")", "Toychord network\")) else: print(red(response.text)) else : print(red(\"Got a bad response", "\") + green(response.text.split(\" \")[0])) else : print(yellow(\"Song doesnt exist in", "green(response.text.split(\" \")[1])) else: print(yellow(\"Song doesnt exist in the Chord\")) except:", "cyan(\"...\")) try: response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if response.status_code", "print('Insert the Song Title you wish to delete') fetch_q =", "len(sys.argv) < 3: print(\"!! you must tell me the port.", "with Node. Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue", "run (1 = insert, 2 = query, 3 = requests)')", "+ ip + ':' + port while True: print('----------------------------------------------------------------------') method_q", "\" \" + underline(node[\"ip\"] + \":\" + node[\"port\"])) for song", "fetch_a['key'] == \"*\": print(cyan(\"Fetching all the songs of the Chord...\"))", "fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']})", "nodes_list[\"res\"][-1]: print(\" -> \", end = '') print('\\n') else :", "bold', Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold', })", "--------------------------------\\n') overlayHelp=header(\"Overlay: \") + cyan(\"This functions recreates and prints the", "Chord\")) except: print(red(\"Could not establish connection with Node. Couldnt search", "'type': 'list', 'name': 'method', 'message': 'Select action:', 'choices': ['Network Overlay',", "'filter': lambda val: str(val) }, { 'type': 'input', 'name': 'value',", "in ('1', '2', '3'): print(yellow(\"Wrong test number (give 1, 2", "Song': print('Insert the Song Title you wish to delete') fetch_q", "True: print('----------------------------------------------------------------------') method_q = { 'type': 'list', 'name': 'method', 'message':", "\"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue elif method_a == 'Run", "fetch_a = prompt(fetch_q, style=style) if fetch_a['key'] == \"*\": print(cyan(\"Fetching all", "response.text.split(\" \")[1] != \"@!@\": # print(cyan(\"Deleting Song: \") + green(response.text.split(\"", "in the Chord\")) print(yellow(\"Couldnt delete it\")) except: print(red(\"Could not establish", "the Chord\")) except: print(red(\"Could not establish connection with Node. Couldnt", "lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style) if fetch_a['key']", "\") + fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL +", "client(ip, port): os.system('clear') cyan('What a beautiful day to enter the", "in node[\"song\"]: print(\" -\" + green(song[\"key\"]) + \" \" +", "a Song': print('Insert a Title-Value pair for the song you", "response.status_code == 200 and response.text.split(\" \")[1] != \"@!@\": print(\"Song found", "inserts them in the Chord\\n\") queryHelp=header(\"Search Song: \") + cyan(\"This", "style = style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.Instruction:", "a beautiful day to enter the cult...') baseURL = 'http://'", "except: print(red(\"Could not establish connection with Node. Couldnt search for", "= requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if response.status_code == 200 and", "'Select action:', 'choices': ['Network Overlay', \\ 'Insert a Song', \\", "...)\\n\") insertHelp=header(\"Insert Song: \") + cyan(\"This functions expects a Song", "wasnt inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Delete", "== '1' else (\"query\" if test_number == '2' else \"requests\"))", "'Help': print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay: \") + cyan(\"This functions recreates", "+ ':' + port while True: print('----------------------------------------------------------------------') method_q = {", "with Node. Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) else:", "!!\") exit(0) if sys.argv[1] in (\"-p\", \"-P\"): my_port = sys.argv[2]", "print(red(\"Could not establish connection with Node. Song wasnt inserted...\")) print(red(\"Unfortunately", "print(response.text) print(green(\"Node is out of Toychord network\")) else: print(red(response.text)) else", "+ cyan(\"This functions expects a Song Title and a Song", "# print(cyan())) for node in nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"]) +", "print(\"Song value: \" + green(response.text.split(\" \")[1])) else: print(yellow(\"Song doesnt exist", "for the song you wish to insert') fetch_q = [", "Overlay...\")) try: response = requests.get(baseURL + ends.c_overlay) if response.status_code ==", "else : print(yellow(\"Song doesnt exist in the Chord\")) print(yellow(\"Couldnt delete", "== \"Left the Chord\": print(response.text) print(green(\"Node is out of Toychord", "ends from utils.colorfy import * from auto.testing import test_trans import", "'filter': lambda val: str(val) } ] fetch_a = prompt(fetch_q, style=style)", "print('\\n') for node in nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\" + node[\"port\"]),", "] fetch_a = prompt(fetch_q, style=style) test_number = fetch_a['test_n'] if fetch_a['test_n']", "departHelp=header(\"Depart: \") + cyan(\"This function makes the node connected to", "ip + ':' + port while True: print('----------------------------------------------------------------------') method_q =", "test_number not in ('1', '2', '3'): print(yellow(\"Wrong test number (give", "lambda val: str(val) }, { 'type': 'input', 'name': 'value', 'message':", "except: print(red(\"Could not establish connection with Node. Song wasnt deleted...\"))", "== 'Search for a Song': print('Insert the Song Title you", "bold', Token.Question: '#<PASSWORD>16 bold', }) def client(ip, port): os.system('clear') cyan('What", "response = requests.get(baseURL + ends.c_overlay) if response.status_code == 200: nodes_list", "requests.get(baseURL + ends.c_query_star) if response.status_code == 200: nodes_list = json.loads(response.text)", "\" + underline(node[\"ip\"] + \":\" + node[\"port\"])) for song in", "!= nodes_list[\"res\"][-1]: print(\" -> \", end = '') print('\\n') else", "\"@!@\": # print(cyan(\"Deleting Song: \") + green(response.text.split(\" \")[1]) + )", "for node in nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\" + node[\"port\"]), end", "print(red(\"Got a bad response status code \" + response.status_code)) except:", "establish connection with Node. Song wasnt inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0)", "response.text == \"Left the Chord\": print(response.text) print(green(\"Node is out of", "port): os.system('clear') cyan('What a beautiful day to enter the cult...')", "'name': 'value', 'message': 'Value:', 'filter': lambda val: str(val) } ]", "!= \"@!@\": # print(cyan(\"Deleting Song: \") + green(response.text.split(\" \")[1]) +", "deleted the song\\n\") departHelp=header(\"Depart: \") + cyan(\"This function makes the", "cyan(\"...\")) try: response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code", "Chord...\")) try: response = requests.get(baseURL + ends.c_query_star) if response.status_code ==", "nodes_list = json.loads(response.text) # print(green(response.text)) # print(cyan())) for node in", "+ cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif method_a == 'Exit': os.system('clear')", "response = requests.get(baseURL + ends.c_depart) if response.status_code == 200: if", "200: nodes_list = json.loads(response.text) # print(green(response.text)) # print(cyan())) for node", "not establish connection with Node. Song wasnt deleted...\")) print(red(\"Unfortunately exiting...\"))", "the Chord\\n\") queryHelp=header(\"Search Song: \") + cyan(\"This function expects a", "response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code == 200", "try: response = requests.get(baseURL + ends.c_depart) if response.status_code == 200:", "query, 3 = requests)') fetch_q = [ { 'type': 'input',", "requests.get(baseURL + ends.c_overlay) if response.status_code == 200: nodes_list = json.loads(response.text)", "Help --------------------------------\\n') overlayHelp=header(\"Overlay: \") + cyan(\"This functions recreates and prints", "Chord\\n\") queryHelp=header(\"Search Song: \") + cyan(\"This function expects a Song", "print(cyan(\"Deleting Song: \") + green(response.text.split(\" \")[1]) + ) print(cyan(\"Deleted by", "'message': 'Test:', 'filter': lambda val: str(val) } ] fetch_a =", "(\"insert\" if test_number == '1' else (\"query\" if test_number ==", "print(cyan(\"Deleted by node with id: \") + green(response.text.split(\" \")[0])) else", "style=style) test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's' if test_number", "'list', 'name': 'method', 'message': 'Select action:', 'choices': ['Network Overlay', \\", "= sys.argv[2] my_ip = os.popen('ip addr show ' + config.NETIFACE", "id: \",green(response.text.split(\" \")[0])) print(\"Song value: \" + green(response.text.split(\" \")[1])) else:", "200: print(cyan(\"Inserted by node with id: \") + green(response.text.split(\" \")[0]))", "delete') fetch_q = [ { 'type': 'input', 'name': 'key', 'message':", "sys import utils.config as config import utils.ends as ends from", "{ 'type': 'input', 'name': 'value', 'message': 'Value:', 'filter': lambda val:", "\":\" + node[\"port\"])) for song in node[\"song\"]: print(\" -\" +", ": print(red(\"Got a bad response status code \" + response.status_code))", "port. Ex. -p 5000 !!\") exit(0) if sys.argv[1] in (\"-p\",", "id: \") + green(response.text.split(\" \")[0])) else : print(yellow(\"Song doesnt exist", "(1 = insert, 2 = query, 3 = requests)') fetch_q", "test number (1=insert, 2=query, 3=requests), runs the test and returns", "+ response.status_code)) except: print(red(\"Could not establish connection with Node. Song", "val: str(val) } ] fetch_a = prompt(fetch_q, style=style) test_number =", "-> \", end = '') print('\\n') else : print(red(\"Got a", "'Search for a Song', \\ 'Delete a Song', \\ 'Depart", "utils.config as config import utils.ends as ends from utils.colorfy import", "== 'Insert a Song': print('Insert a Title-Value pair for the", "overlayHelp=header(\"Overlay: \") + cyan(\"This functions recreates and prints the current", "with Node. Node didnt depart...\")) print(red(\"Unfortunately exiting...\")) break elif method_a", "Title:', 'filter': lambda val: str(val) }, { 'type': 'input', 'name':", "function makes the node connected to this cli leave the", "val: str(val) } ] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Inserting Song:", "sys.argv[1] in (\"-p\", \"-P\"): my_port = sys.argv[2] my_ip = os.popen('ip", "json.loads(response.text) # print(green(response.text)) # print(cyan())) for node in nodes_list[\"res\"]: print(header(\"\\n\"", "green(response.text.split(\" \")[1]) + ) print(cyan(\"Deleted by node with id: \")", "'#E91E63 bold', Token.Selected: '#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold',", "depart from Chord...\")) try: response = requests.get(baseURL + ends.c_depart) if", "and returns the Node in whitch the song is stored", "+ fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL + ends.c_delete", "print(yellow(\"Song doesnt exist in the Chord\")) except: print(red(\"Could not establish", "Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a", "value: \" + green(response.text.split(\" \")[1])) else: print(yellow(\"Song doesnt exist in", "\"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue elif method_a == 'Run automated test':", "day to enter the cult...') baseURL = 'http://' + ip", "':' + port while True: print('----------------------------------------------------------------------') method_q = { 'type':", "= 'http://' + ip + ':' + port while True:", "establish connection with Node. Node didnt depart...\")) print(red(\"Unfortunately exiting...\")) break", "insert') fetch_q = [ { 'type': 'input', 'name': 'key', 'message':", "\"@!@\": print(\"Song found in node with id: \",green(response.text.split(\" \")[0])) print(\"Song", "+ node[\"uid\"]) + \" \" + underline(node[\"ip\"] + \":\" +", "with Node. Song wasnt deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif", "\") + (\"insert\" if test_number == '1' else (\"query\" if", "Song Title you wish to delete') fetch_q = [ {", "and a Song Value and inserts them in the Chord\\n\")", "and inserts them in the Chord\\n\") queryHelp=header(\"Search Song: \") +", "'Help', \\ 'Exit'] } method_a = prompt(method_q, style=style)['method'] os.system('clear') if", "Topology(eg. Node1 -> Node2 -> ...)\\n\") insertHelp=header(\"Insert Song: \") +", "== 'Run automated test': print('Select which test you wish to", "'s' if test_number not in ('1', '2', '3'): print(yellow(\"Wrong test", "print(cyan(\"Deleting Song: \") + fetch_a['key'] + cyan(\"...\")) try: response =", "auto.testing import test_trans import time import json style = style_from_dict({", "Node. Song wasnt inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a", "continue elif method_a == 'Search for a Song': print('Insert the", "awk \\'{ print $2 }\\' | awk -F \"/\" \\'{", "connection with Node. Song wasnt deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue", "cyan(\"This function expects a test number (1=insert, 2=query, 3=requests), runs", "\"-\",autoTests,\"\\n\", ) continue elif method_a == 'Run automated test': print('Select", "song is stored and the value of the song\\n\") deleteHelp=header(\"Delete", "establish connection with Node. Couldnt search for song...\")) print(red(\"Unfortunately exiting...\"))", "\":\" + node[\"port\"]), end = '') if node != nodes_list[\"res\"][-1]:", "automated test: \") + (\"insert\" if test_number == '1' else", "function expects a Song Title and returns the Node who", "test number (give 1, 2 or 3)\")) continue print(cyan(\"Running automated", "'') if node != nodes_list[\"res\"][-1]: print(\" -> \", end =", "print( \" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", )", "Chord\\n\") autoTests=header(\"Run automated tests: \") + cyan(\"This function expects a", "song\\n\") deleteHelp=header(\"Delete Song: \") + cyan(\"This function expects a Song", "== '2' else \"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif", "requests import os from PyInquirer import style_from_dict, Token, prompt import", "lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Deleting Song:", "to Search or * to get all songs of the", "in the Chord\\n\") queryHelp=header(\"Search Song: \") + cyan(\"This function expects", "Chord': print(cyan(\"Preparing Node to depart from Chord...\")) try: response =", "else: os.system('clear') continue if __name__ == '__main__': if len(sys.argv) <", "if __name__ == '__main__': if len(sys.argv) < 3: print(\"!! you", "import utils.config as config import utils.ends as ends from utils.colorfy", "'Insert a Song', \\ 'Search for a Song', \\ 'Delete", "exit(0) if sys.argv[1] in (\"-p\", \"-P\"): my_port = sys.argv[2] my_ip", "with Node. Song wasnt inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif", "== \"*\": print(cyan(\"Fetching all the songs of the Chord...\")) try:", "== '__main__': if len(sys.argv) < 3: print(\"!! you must tell", "+ \":\" + node[\"port\"]), end = '') if node !=", "not establish connection with Node. Song wasnt inserted...\")) print(red(\"Unfortunately exiting...\"))", "\\ 'Delete a Song', \\ 'Depart from Chord', \\ 'Run", "\")[1]) + ) print(cyan(\"Deleted by node with id: \") +", "Chord', \\ 'Run automated test', \\ 'Help', \\ 'Exit'] }", "requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\"", "not establish connection with Node. Couldnt search for song...\")) print(red(\"Unfortunately", "import utils.ends as ends from utils.colorfy import * from auto.testing", "print(red(response.text)) else : print(red(\"Got a bad response status code \"", "5000 !!\") exit(0) if sys.argv[1] in (\"-p\", \"-P\"): my_port =", "200: if response.text == \"Left the Chord\": print(response.text) print(green(\"Node is", "2 = query, 3 = requests)') fetch_q = [ {", "exiting...\")) exit(0) continue elif method_a == 'Help': print('-------------------------------- Help --------------------------------\\n')", "import style_from_dict, Token, prompt import sys import utils.config as config", "'type': 'input', 'name': 'value', 'message': 'Value:', 'filter': lambda val: str(val)", "200: nodes_list = json.loads(response.text) print('\\n') for node in nodes_list[\"res\"]: print(green(node[\"ip\"]", "enter the cult...') baseURL = 'http://' + ip + ':'", "number (give 1, 2 or 3)\")) continue print(cyan(\"Running automated test:", "the test and returns the chord throughput\") print( \" -\",overlayHelp,\"\\n\"", "else \"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif method_a ==", "continue if __name__ == '__main__': if len(sys.argv) < 3: print(\"!!", "break else: os.system('clear') continue if __name__ == '__main__': if len(sys.argv)", "found in node with id: \",green(response.text.split(\" \")[0])) print(\"Song value: \"", ",data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\" \")[1] != \"@!@\":", "response.status_code)) except: print(red(\"Could not establish connection with Node. Song wasnt", "wish to run (1 = insert, 2 = query, 3", "if fetch_a['test_n'] else 's' if test_number not in ('1', '2',", "continue elif method_a == 'Help': print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay: \")", "Network Overlay...\")) try: response = requests.get(baseURL + ends.c_overlay) if response.status_code", "returns the Node in whitch the song is stored and", "'type': 'input', 'name': 'test_n', 'message': 'Test:', 'filter': lambda val: str(val)", "except: print(red(\"Could not establish connection with Node...\")) print(red(\"Unfortunately exiting...\")) exit(0)", "the Song Title you wish to Search or * to", "while True: print('----------------------------------------------------------------------') method_q = { 'type': 'list', 'name': 'method',", "== 'Depart from Chord': print(cyan(\"Preparing Node to depart from Chord...\"))", "Node. Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif", "print(cyan(\"Initiating Network Overlay...\")) try: response = requests.get(baseURL + ends.c_overlay) if", "exiting...\")) exit(0) continue elif method_a == 'Network Overlay': print(cyan(\"Initiating Network", "of the Chord...\")) try: response = requests.get(baseURL + ends.c_query_star) if", "deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Search for", "# print(green(response.text)) # print(cyan())) for node in nodes_list[\"res\"]: print(header(\"\\n\" +", "[ { 'type': 'input', 'name': 'key', 'message': 'Song Title:', 'filter':", "continue elif method_a == 'Network Overlay': print(cyan(\"Initiating Network Overlay...\")) try:", "200 and response.text.split(\" \")[1] != \"@!@\": print(\"Song found in node", "autoTests=header(\"Run automated tests: \") + cyan(\"This function expects a test", "[ { 'type': 'input', 'name': 'test_n', 'message': 'Test:', 'filter': lambda", "node[\"uid\"]) + \" \" + underline(node[\"ip\"] + \":\" + node[\"port\"]))", "establish connection with Node...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a", "import json style = style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7", "+ ends.c_query_star) if response.status_code == 200: nodes_list = json.loads(response.text) #", "-\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue elif", "response = requests.get(baseURL + ends.c_query_star) if response.status_code == 200: nodes_list", "fetch_a = prompt(fetch_q, style=style) print(cyan(\"Inserting Song: \") + fetch_a['key'] +", "Node in whitch the song is stored and the value", "number (1=insert, 2=query, 3=requests), runs the test and returns the", "= prompt(fetch_q, style=style) test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's'", "to this cli leave the Chord\\n\") autoTests=header(\"Run automated tests: \")", "'test_n', 'message': 'Test:', 'filter': lambda val: str(val) } ] fetch_a", "connection with Node. Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0)", "= prompt(fetch_q, style=style) if fetch_a['key'] == \"*\": print(cyan(\"Fetching all the", "'Run automated test': print('Select which test you wish to run", "\")[0])) print(\"Song value: \" + green(response.text.split(\" \")[1])) else: print(yellow(\"Song doesnt", "response.status_code == 200: if response.text == \"Left the Chord\": print(response.text)", "'Song Title:', 'filter': lambda val: str(val) }] fetch_a = prompt(fetch_q,", "'type': 'input', 'name': 'key', 'message': 'Song Title:', 'filter': lambda val:", "exit(0) continue elif method_a == 'Search for a Song': print('Insert", "tests: \") + cyan(\"This function expects a test number (1=insert,", "else: print(yellow(\"Something went Wrong...\") + response.status_code) except: print(red(\"Could not establish", "node connected to this cli leave the Chord\\n\") autoTests=header(\"Run automated", "print(cyan(\"Fetching all the songs of the Chord...\")) try: response =", "\")[0])) else : print(yellow(\"Song doesnt exist in the Chord\")) print(yellow(\"Couldnt", "print(cyan(\"Inserting Song: \") + fetch_a['key'] + cyan(\"...\")) try: response =", "if method_a == 'Depart from Chord': print(cyan(\"Preparing Node to depart", "song in node[\"song\"]: print(\" -\" + green(song[\"key\"]) + \" \"", "is stored and the value of the song\\n\") deleteHelp=header(\"Delete Song:", "songs of the Chord') fetch_q = [ { 'type': 'input',", "not establish connection with Node...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif", "+ port while True: print('----------------------------------------------------------------------') method_q = { 'type': 'list',", "insertHelp=header(\"Insert Song: \") + cyan(\"This functions expects a Song Title", "\\ 'Help', \\ 'Exit'] } method_a = prompt(method_q, style=style)['method'] os.system('clear')", "try: response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code ==", "-> Node2 -> ...)\\n\") insertHelp=header(\"Insert Song: \") + cyan(\"This functions", "test', \\ 'Help', \\ 'Exit'] } method_a = prompt(method_q, style=style)['method']", "print(yellow(\"Couldnt delete it\")) except: print(red(\"Could not establish connection with Node.", "(\"query\" if test_number == '2' else \"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number)))", "Value and inserts them in the Chord\\n\") queryHelp=header(\"Search Song: \")", "style=style) if fetch_a['key'] == \"*\": print(cyan(\"Fetching all the songs of", "+ \" \" + underline(node[\"ip\"] + \":\" + node[\"port\"])) for", "' | grep \"\\<inet\\>\" | awk \\'{ print $2 }\\'", "}] fetch_a = prompt(fetch_q, style=style) if fetch_a['key'] == \"*\": print(cyan(\"Fetching", "}) def client(ip, port): os.system('clear') cyan('What a beautiful day to", "if response.status_code == 200: nodes_list = json.loads(response.text) # print(green(response.text)) #", "print(green(\"Node is out of Toychord network\")) else: print(red(response.text)) else :", "continue elif method_a == 'Exit': os.system('clear') break else: os.system('clear') continue", "+ cyan(\"...\")) try: response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if", "requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\"", "print(yellow(\"Song doesnt exist in the Chord\")) print(yellow(\"Couldnt delete it\")) except:", "= requests.get(baseURL + ends.c_overlay) if response.status_code == 200: nodes_list =", "(1=insert, 2=query, 3=requests), runs the test and returns the chord", "} ] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Inserting Song: \") +", "print(\" -\" + green(song[\"key\"]) + \" \" + song[\"value\"]) else:", "response.status_code)) except: print(red(\"Could not establish connection with Node...\")) print(red(\"Unfortunately exiting...\"))", "cyan(\"This functions recreates and prints the current Network Topology(eg. Node1", "requests.get(baseURL + ends.c_depart) if response.status_code == 200: if response.text ==", "try: response = requests.get(baseURL + ends.c_query_star) if response.status_code == 200:", "== 200: print(cyan(\"Inserted by node with id: \") + green(response.text.split(\"", "\") + cyan(\"This function makes the node connected to this", "elif method_a == 'Exit': os.system('clear') break else: os.system('clear') continue if", "the Chord\": print(response.text) print(green(\"Node is out of Toychord network\")) else:", "node in nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\" + node[\"port\"]), end =", "+ fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL + ends.c_insert", "print('\\n') else : print(red(\"Got a bad response status code \"", "method_a == 'Help': print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay: \") + cyan(\"This", "== 'Exit': os.system('clear') break else: os.system('clear') continue if __name__ ==", ": print(yellow(\"Song doesnt exist in the Chord\")) print(yellow(\"Couldnt delete it\"))", "cyan(\"This functions expects a Song Title and a Song Value", "to get all songs of the Chord') fetch_q = [", "elif method_a == 'Delete a Song': print('Insert the Song Title", "\")[1] != \"@!@\": # print(cyan(\"Deleting Song: \") + green(response.text.split(\" \")[1])", "2 or 3)\")) continue print(cyan(\"Running automated test: \") + (\"insert\"", "'filter': lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Deleting", "exist in the Chord\")) except: print(red(\"Could not establish connection with", "200 and response.text.split(\" \")[1] != \"@!@\": # print(cyan(\"Deleting Song: \")", "ends.c_query ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\" \")[1] !=", "= requests.get(baseURL + ends.c_query_star) if response.status_code == 200: nodes_list =", "Title and a Song Value and inserts them in the", "= json.loads(response.text) print('\\n') for node in nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\"", "went Wrong...\") + response.status_code) except: print(red(\"Could not establish connection with", "all songs of the Chord') fetch_q = [ { 'type':", "except: print(red(\"Could not establish connection with Node. Node didnt depart...\"))", "node with id: \",green(response.text.split(\" \")[0])) print(\"Song value: \" + green(response.text.split(\"", "if response.status_code == 200: print(cyan(\"Inserted by node with id: \")", "fetch_a = prompt(fetch_q, style=style) print(cyan(\"Deleting Song: \") + fetch_a['key'] +", "'3'): print(yellow(\"Wrong test number (give 1, 2 or 3)\")) continue", "response.status_code == 200: nodes_list = json.loads(response.text) # print(green(response.text)) # print(cyan()))", "get all songs of the Chord') fetch_q = [ {", "$2 }\\' | awk -F \"/\" \\'{ print $1 }\\'').read().strip()", "Title and returns the Node who deleted the song\\n\") departHelp=header(\"Depart:", "'Delete a Song': print('Insert the Song Title you wish to", "-\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue elif method_a ==", "try: response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code ==", "Node. Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) else: print(cyan(\"Searching", "print(\" -> \", end = '') print('\\n') else : print(red(\"Got", "!= \"@!@\": print(\"Song found in node with id: \",green(response.text.split(\" \")[0]))", "+ ) print(cyan(\"Deleted by node with id: \") + green(response.text.split(\"", "\")[1])) else: print(yellow(\"Song doesnt exist in the Chord\")) except: print(red(\"Could", "config import utils.ends as ends from utils.colorfy import * from", "method_a == 'Delete a Song': print('Insert the Song Title you", "makes the node connected to this cli leave the Chord\\n\")", "'input', 'name': 'test_n', 'message': 'Test:', 'filter': lambda val: str(val) }", "| awk \\'{ print $2 }\\' | awk -F \"/\"", "a Song Title and a Song Value and inserts them", "stored and the value of the song\\n\") deleteHelp=header(\"Delete Song: \")", "}\\' | awk -F \"/\" \\'{ print $1 }\\'').read().strip() client(my_ip,", "Song Title and returns the Node in whitch the song", "= [ { 'type': 'input', 'name': 'test_n', 'message': 'Test:', 'filter':", "{ 'type': 'list', 'name': 'method', 'message': 'Select action:', 'choices': ['Network", "end = '') if node != nodes_list[\"res\"][-1]: print(\" -> \",", "didnt depart...\")) print(red(\"Unfortunately exiting...\")) break elif method_a == 'Insert a", "= requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200: print(cyan(\"Inserted", "song[\"value\"]) else: print(yellow(\"Something went Wrong...\") + response.status_code) except: print(red(\"Could not", "= insert, 2 = query, 3 = requests)') fetch_q =", "runs the test and returns the chord throughput\") print( \"", "+ fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL + ends.c_query", "'#<PASSWORD>16 bold', }) def client(ip, port): os.system('clear') cyan('What a beautiful", "of the Chord') fetch_q = [ { 'type': 'input', 'name':", "to delete') fetch_q = [ { 'type': 'input', 'name': 'key',", "\") + cyan(\"This functions recreates and prints the current Network", "test_number == '2' else \"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue", "'__main__': if len(sys.argv) < 3: print(\"!! you must tell me", "= fetch_a['test_n'] if fetch_a['test_n'] else 's' if test_number not in", "green(song[\"key\"]) + \" \" + song[\"value\"]) else: print(yellow(\"Something went Wrong...\")", "the value of the song\\n\") deleteHelp=header(\"Delete Song: \") + cyan(\"This", "\\ 'Run automated test', \\ 'Help', \\ 'Exit'] } method_a", "'name': 'key', 'message': 'Song Title:', 'filter': lambda val: str(val) }]", "if sys.argv[1] in (\"-p\", \"-P\"): my_port = sys.argv[2] my_ip =", "response status code \" + response.status_code)) except: print(red(\"Could not establish", "who deleted the song\\n\") departHelp=header(\"Depart: \") + cyan(\"This function makes", "song...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Network Overlay':", "this cli leave the Chord\\n\") autoTests=header(\"Run automated tests: \") +", "fetch_a['test_n'] else 's' if test_number not in ('1', '2', '3'):", "'filter': lambda val: str(val) }] fetch_a = prompt(fetch_q, style=style) if", "Song', \\ 'Depart from Chord', \\ 'Run automated test', \\", "'Network Overlay': print(cyan(\"Initiating Network Overlay...\")) try: response = requests.get(baseURL +", "a Song Value and inserts them in the Chord\\n\") queryHelp=header(\"Search", "fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']})", "(\"-p\", \"-P\"): my_port = sys.argv[2] my_ip = os.popen('ip addr show", "'Test:', 'filter': lambda val: str(val) } ] fetch_a = prompt(fetch_q,", "Song': print('Insert the Song Title you wish to Search or", "wish to insert') fetch_q = [ { 'type': 'input', 'name':", "print(red(\"Unfortunately exiting...\")) exit(0) else: print(cyan(\"Searching Song: \") + fetch_a['key'] +", "the Chord\")) print(yellow(\"Couldnt delete it\")) except: print(red(\"Could not establish connection", "the song\\n\") deleteHelp=header(\"Delete Song: \") + cyan(\"This function expects a", "Token.Answer: '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold', }) def client(ip, port):", "song\\n\") departHelp=header(\"Depart: \") + cyan(\"This function makes the node connected", "chord throughput\") print( \" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\",", "' + config.NETIFACE + ' | grep \"\\<inet\\>\" | awk", "a Song Title and returns the Node who deleted the", "print(cyan(\"Preparing Node to depart from Chord...\")) try: response = requests.get(baseURL", "Chord\")) print(yellow(\"Couldnt delete it\")) except: print(red(\"Could not establish connection with", "function expects a test number (1=insert, 2=query, 3=requests), runs the", "else : print(red(\"Got a bad response status code \" +", "'Song Title:', 'filter': lambda val: str(val) }, { 'type': 'input',", "print('Insert a Title-Value pair for the song you wish to", "them in the Chord\\n\") queryHelp=header(\"Search Song: \") + cyan(\"This function", "response.status_code == 200: print(cyan(\"Inserted by node with id: \") +", "to insert') fetch_q = [ { 'type': 'input', 'name': 'key',", "print(cyan())) for node in nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"]) + \"", "+ cyan(\"...\")) try: response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if", "the port. Ex. -p 5000 !!\") exit(0) if sys.argv[1] in", "\") + cyan(\"This functions expects a Song Title and a", "'name': 'key', 'message': 'Song Title:', 'filter': lambda val: str(val) },", "\") + cyan(\"This function expects a Song Title and returns", "continue print(cyan(\"Running automated test: \") + (\"insert\" if test_number ==", "__name__ == '__main__': if len(sys.argv) < 3: print(\"!! you must", "and response.text.split(\" \")[1] != \"@!@\": print(\"Song found in node with", "songs of the Chord...\")) try: response = requests.get(baseURL + ends.c_query_star)", "import requests import os from PyInquirer import style_from_dict, Token, prompt", "style=style)['method'] os.system('clear') if method_a == 'Depart from Chord': print(cyan(\"Preparing Node", "'Insert a Song': print('Insert a Title-Value pair for the song", "Song', \\ 'Delete a Song', \\ 'Depart from Chord', \\", "\")[1] != \"@!@\": print(\"Song found in node with id: \",green(response.text.split(\"", "print(red(\"Unfortunately exiting...\")) break elif method_a == 'Insert a Song': print('Insert", "print('Insert the Song Title you wish to Search or *", "import os from PyInquirer import style_from_dict, Token, prompt import sys", "style_from_dict, Token, prompt import sys import utils.config as config import", "test: \") + (\"insert\" if test_number == '1' else (\"query\"", "print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay: \") + cyan(\"This functions recreates and", "delete it\")) except: print(red(\"Could not establish connection with Node. Song", "to depart from Chord...\")) try: response = requests.get(baseURL + ends.c_depart)", "prompt(method_q, style=style)['method'] os.system('clear') if method_a == 'Depart from Chord': print(cyan(\"Preparing", "id: \") + green(response.text.split(\" \")[0])) else : print(red(\"Got a bad", "break elif method_a == 'Insert a Song': print('Insert a Title-Value", "elif method_a == 'Network Overlay': print(cyan(\"Initiating Network Overlay...\")) try: response", "elif method_a == 'Help': print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay: \") +", "3: print(\"!! you must tell me the port. Ex. -p", "depart...\")) print(red(\"Unfortunately exiting...\")) break elif method_a == 'Insert a Song':", "+ ends.c_overlay) if response.status_code == 200: nodes_list = json.loads(response.text) print('\\n')", "method_a == 'Insert a Song': print('Insert a Title-Value pair for", "print(cyan(\"Searching Song: \") + fetch_a['key'] + cyan(\"...\")) try: response =", "a Song': print('Insert the Song Title you wish to delete')", "in the Chord\")) except: print(red(\"Could not establish connection with Node.", "me the port. Ex. -p 5000 !!\") exit(0) if sys.argv[1]", "print(green(response.text)) # print(cyan())) for node in nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"])", "not in ('1', '2', '3'): print(yellow(\"Wrong test number (give 1,", "'Delete a Song', \\ 'Depart from Chord', \\ 'Run automated", "print(header(\"\\n\" + node[\"uid\"]) + \" \" + underline(node[\"ip\"] + \":\"", "print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Delete a Song':", "= os.popen('ip addr show ' + config.NETIFACE + ' |", "+ config.NETIFACE + ' | grep \"\\<inet\\>\" | awk \\'{", "connected to this cli leave the Chord\\n\") autoTests=header(\"Run automated tests:", "tell me the port. Ex. -p 5000 !!\") exit(0) if", "str(val) } ] fetch_a = prompt(fetch_q, style=style) test_number = fetch_a['test_n']", "'#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold', }) def client(ip,", "which test you wish to run (1 = insert, 2", "exiting...\")) break elif method_a == 'Insert a Song': print('Insert a", "'key', 'message': 'Song Title:', 'filter': lambda val: str(val) }] fetch_a", "ends.c_overlay) if response.status_code == 200: nodes_list = json.loads(response.text) print('\\n') for", "the Chord...\")) try: response = requests.get(baseURL + ends.c_query_star) if response.status_code", "the current Network Topology(eg. Node1 -> Node2 -> ...)\\n\") insertHelp=header(\"Insert", "you wish to run (1 = insert, 2 = query,", "ends.c_depart) if response.status_code == 200: if response.text == \"Left the", "of the song\\n\") deleteHelp=header(\"Delete Song: \") + cyan(\"This function expects", "'#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold',", "'message': 'Value:', 'filter': lambda val: str(val) } ] fetch_a =", "Node didnt depart...\")) print(red(\"Unfortunately exiting...\")) break elif method_a == 'Insert", "by node with id: \") + green(response.text.split(\" \")[0])) else :", "print(yellow(\"Something went Wrong...\") + response.status_code) except: print(red(\"Could not establish connection", "automated test', \\ 'Help', \\ 'Exit'] } method_a = prompt(method_q,", "and response.text.split(\" \")[1] != \"@!@\": # print(cyan(\"Deleting Song: \") +", "whitch the song is stored and the value of the", ") continue elif method_a == 'Run automated test': print('Select which", "network\")) else: print(red(response.text)) else : print(red(\"Got a bad response status", "it\")) except: print(red(\"Could not establish connection with Node. Song wasnt", "cyan(\"This function makes the node connected to this cli leave", "\" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue", "print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Search for a", "or * to get all songs of the Chord') fetch_q", "expects a Song Title and returns the Node who deleted", "'name': 'test_n', 'message': 'Test:', 'filter': lambda val: str(val) } ]", "method_q = { 'type': 'list', 'name': 'method', 'message': 'Select action:',", "os.system('clear') break else: os.system('clear') continue if __name__ == '__main__': if", "connection with Node...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a ==", "nodes_list[\"res\"]: print(header(\"\\n\" + node[\"uid\"]) + \" \" + underline(node[\"ip\"] +", "Song: \") + cyan(\"This function expects a Song Title and", "-\" + green(song[\"key\"]) + \" \" + song[\"value\"]) else: print(yellow(\"Something", "continue elif method_a == 'Run automated test': print('Select which test", "Chord') fetch_q = [ { 'type': 'input', 'name': 'key', 'message':", "test_number == '1' else (\"query\" if test_number == '2' else", "'key', 'message': 'Song Title:', 'filter': lambda val: str(val) }, {", "if test_number == '2' else \"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\"))", "elif method_a == 'Insert a Song': print('Insert a Title-Value pair", "prompt(fetch_q, style=style) print(cyan(\"Deleting Song: \") + fetch_a['key'] + cyan(\"...\")) try:", "'Value:', 'filter': lambda val: str(val) } ] fetch_a = prompt(fetch_q,", "'#<PASSWORD> bold', Token.Question: '#<PASSWORD>16 bold', }) def client(ip, port): os.system('clear')", "nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\" + node[\"port\"]), end = '') if", "try: response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if response.status_code ==", "style=style) print(cyan(\"Deleting Song: \") + fetch_a['key'] + cyan(\"...\")) try: response", "Token.Question: '#<PASSWORD>16 bold', }) def client(ip, port): os.system('clear') cyan('What a", "+ (\"insert\" if test_number == '1' else (\"query\" if test_number", "'method', 'message': 'Select action:', 'choices': ['Network Overlay', \\ 'Insert a", "\",green(response.text.split(\" \")[0])) print(\"Song value: \" + green(response.text.split(\" \")[1])) else: print(yellow(\"Song", "= [ { 'type': 'input', 'name': 'key', 'message': 'Song Title:',", "* to get all songs of the Chord') fetch_q =", "functions expects a Song Title and a Song Value and", "fetch_q = [ { 'type': 'input', 'name': 'test_n', 'message': 'Test:',", "my_port = sys.argv[2] my_ip = os.popen('ip addr show ' +", "wasnt deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Search", "prints the current Network Topology(eg. Node1 -> Node2 -> ...)\\n\")", "for a Song': print('Insert the Song Title you wish to", "+ \":\" + node[\"port\"])) for song in node[\"song\"]: print(\" -\"", "'Run automated test', \\ 'Help', \\ 'Exit'] } method_a =", "in nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\" + node[\"port\"]), end = '')", "the chord throughput\") print( \" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\",", "the song\\n\") departHelp=header(\"Depart: \") + cyan(\"This function makes the node", "test': print('Select which test you wish to run (1 =", "expects a Song Title and a Song Value and inserts", "Overlay': print(cyan(\"Initiating Network Overlay...\")) try: response = requests.get(baseURL + ends.c_overlay)", "fetch_a = prompt(fetch_q, style=style) test_number = fetch_a['test_n'] if fetch_a['test_n'] else", "from utils.colorfy import * from auto.testing import test_trans import time", "print(green(node[\"ip\"] + \":\" + node[\"port\"]), end = '') if node", "\"*\": print(cyan(\"Fetching all the songs of the Chord...\")) try: response", "status code \" + response.status_code)) except: print(red(\"Could not establish connection", "def client(ip, port): os.system('clear') cyan('What a beautiful day to enter", "continue elif method_a == 'Delete a Song': print('Insert the Song", "lambda val: str(val) } ] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Inserting", "Song wasnt deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a ==", "= prompt(method_q, style=style)['method'] os.system('clear') if method_a == 'Depart from Chord':", "cult...') baseURL = 'http://' + ip + ':' + port", "cyan('What a beautiful day to enter the cult...') baseURL =", "print(red(\"Could not establish connection with Node. Couldnt search for song...\"))", "-p 5000 !!\") exit(0) if sys.argv[1] in (\"-p\", \"-P\"): my_port", "'Depart from Chord': print(cyan(\"Preparing Node to depart from Chord...\")) try:", "'Search for a Song': print('Insert the Song Title you wish", "the Chord') fetch_q = [ { 'type': 'input', 'name': 'key',", "a Title-Value pair for the song you wish to insert')", "is out of Toychord network\")) else: print(red(response.text)) else : print(red(\"Got", "str(val) }, { 'type': 'input', 'name': 'value', 'message': 'Value:', 'filter':", "+ green(response.text.split(\" \")[1])) else: print(yellow(\"Song doesnt exist in the Chord\"))", "'Exit': os.system('clear') break else: os.system('clear') continue if __name__ == '__main__':", "in (\"-p\", \"-P\"): my_port = sys.argv[2] my_ip = os.popen('ip addr", "== 200 and response.text.split(\" \")[1] != \"@!@\": # print(cyan(\"Deleting Song:", "function expects a Song Title and returns the Node in", "out of Toychord network\")) else: print(red(response.text)) else : print(red(\"Got a", "+ \" \" + song[\"value\"]) else: print(yellow(\"Something went Wrong...\") +", "with id: \") + green(response.text.split(\" \")[0])) else : print(yellow(\"Song doesnt", "prompt(fetch_q, style=style) print(cyan(\"Inserting Song: \") + fetch_a['key'] + cyan(\"...\")) try:", "Network Topology(eg. Node1 -> Node2 -> ...)\\n\") insertHelp=header(\"Insert Song: \")", "os.system('clear') if method_a == 'Depart from Chord': print(cyan(\"Preparing Node to", "end = '') print('\\n') else : print(red(\"Got a bad response", "elif method_a == 'Search for a Song': print('Insert the Song", "exiting...\")) exit(0) else: print(cyan(\"Searching Song: \") + fetch_a['key'] + cyan(\"...\"))", "Node. Song wasnt deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a", "'input', 'name': 'key', 'message': 'Song Title:', 'filter': lambda val: str(val)", "action:', 'choices': ['Network Overlay', \\ 'Insert a Song', \\ 'Search", "print(red(\"Could not establish connection with Node. Node didnt depart...\")) print(red(\"Unfortunately", "os from PyInquirer import style_from_dict, Token, prompt import sys import", "search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) else: print(cyan(\"Searching Song: \")", "Song wasnt inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a ==", "establish connection with Node. Song wasnt deleted...\")) print(red(\"Unfortunately exiting...\")) exit(0)", "throughput\") print( \" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\",", "or 3)\")) continue print(cyan(\"Running automated test: \") + (\"insert\" if", "== 200: nodes_list = json.loads(response.text) # print(green(response.text)) # print(cyan())) for", "node[\"song\"]: print(\" -\" + green(song[\"key\"]) + \" \" + song[\"value\"])", "test and returns the chord throughput\") print( \" -\",overlayHelp,\"\\n\" \"", "from PyInquirer import style_from_dict, Token, prompt import sys import utils.config", "style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer:", "print(cyan(\"Running automated test: \") + (\"insert\" if test_number == '1'", "queryHelp=header(\"Search Song: \") + cyan(\"This function expects a Song Title", "try: response = requests.get(baseURL + ends.c_overlay) if response.status_code == 200:", "\" + response.status_code)) except: print(red(\"Could not establish connection with Node.", "requests)') fetch_q = [ { 'type': 'input', 'name': 'test_n', 'message':", "Node to depart from Chord...\")) try: response = requests.get(baseURL +", "not establish connection with Node. Node didnt depart...\")) print(red(\"Unfortunately exiting...\"))", "from auto.testing import test_trans import time import json style =", "print('----------------------------------------------------------------------') method_q = { 'type': 'list', 'name': 'method', 'message': 'Select", "grep \"\\<inet\\>\" | awk \\'{ print $2 }\\' | awk", "node with id: \") + green(response.text.split(\" \")[0])) else : print(red(\"Got", "\\ 'Depart from Chord', \\ 'Run automated test', \\ 'Help',", "cli leave the Chord\\n\") autoTests=header(\"Run automated tests: \") + cyan(\"This", "Couldnt search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) else: print(cyan(\"Searching Song:", "functions recreates and prints the current Network Topology(eg. Node1 ->", "expects a test number (1=insert, 2=query, 3=requests), runs the test", "my_ip = os.popen('ip addr show ' + config.NETIFACE + '", "a bad response status code \" + response.status_code)) except: print(red(\"Could", "\\'{ print $2 }\\' | awk -F \"/\" \\'{ print", "of Toychord network\")) else: print(red(response.text)) else : print(red(\"Got a bad", "3)\")) continue print(cyan(\"Running automated test: \") + (\"insert\" if test_number", "if response.text == \"Left the Chord\": print(response.text) print(green(\"Node is out", "Chord\": print(response.text) print(green(\"Node is out of Toychord network\")) else: print(red(response.text))", "connection with Node. Song wasnt inserted...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue", "+ green(response.text.split(\" \")[0])) else : print(yellow(\"Song doesnt exist in the", "'Exit'] } method_a = prompt(method_q, style=style)['method'] os.system('clear') if method_a ==", "the songs of the Chord...\")) try: response = requests.get(baseURL +", "current Network Topology(eg. Node1 -> Node2 -> ...)\\n\") insertHelp=header(\"Insert Song:", "| awk -F \"/\" \\'{ print $1 }\\'').read().strip() client(my_ip, my_port)", "if response.status_code == 200 and response.text.split(\" \")[1] != \"@!@\": print(\"Song", "== 'Help': print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay: \") + cyan(\"This functions", "sys.argv[2] my_ip = os.popen('ip addr show ' + config.NETIFACE +", "= '') if node != nodes_list[\"res\"][-1]: print(\" -> \", end", "\"requests\")) + cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif method_a == 'Exit':", "you wish to delete') fetch_q = [ { 'type': 'input',", "for a Song', \\ 'Delete a Song', \\ 'Depart from", "for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Network", "str(val) } ] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Inserting Song: \")", "the Song Title you wish to delete') fetch_q = [", "style=style) print(cyan(\"Inserting Song: \") + fetch_a['key'] + cyan(\"...\")) try: response", "{ 'type': 'input', 'name': 'test_n', 'message': 'Test:', 'filter': lambda val:", "if test_number == '1' else (\"query\" if test_number == '2'", "Song Title and returns the Node who deleted the song\\n\")", "connection with Node. Node didnt depart...\")) print(red(\"Unfortunately exiting...\")) break elif", "== 200 and response.text.split(\" \")[1] != \"@!@\": print(\"Song found in", "os.system('clear') cyan('What a beautiful day to enter the cult...') baseURL", "if node != nodes_list[\"res\"][-1]: print(\" -> \", end = '')", "+ node[\"port\"]), end = '') if node != nodes_list[\"res\"][-1]: print(\"", "exit(0) continue elif method_a == 'Network Overlay': print(cyan(\"Initiating Network Overlay...\"))", "Chord...\")) try: response = requests.get(baseURL + ends.c_depart) if response.status_code ==", "= style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.Instruction: '#<PASSWORD>',", "= requests)') fetch_q = [ { 'type': 'input', 'name': 'test_n',", "a Song', \\ 'Delete a Song', \\ 'Depart from Chord',", "from Chord', \\ 'Run automated test', \\ 'Help', \\ 'Exit']", "str(val) }] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Deleting Song: \") +", "'message': 'Select action:', 'choices': ['Network Overlay', \\ 'Insert a Song',", "node != nodes_list[\"res\"][-1]: print(\" -> \", end = '') print('\\n')", "leave the Chord\\n\") autoTests=header(\"Run automated tests: \") + cyan(\"This function", "as config import utils.ends as ends from utils.colorfy import *", "pair for the song you wish to insert') fetch_q =", "response.text.split(\" \")[1] != \"@!@\": print(\"Song found in node with id:", "ends.c_query_star) if response.status_code == 200: nodes_list = json.loads(response.text) # print(green(response.text))", "bold', }) def client(ip, port): os.system('clear') cyan('What a beautiful day", "port while True: print('----------------------------------------------------------------------') method_q = { 'type': 'list', 'name':", "+ green(response.text.split(\" \")[1]) + ) print(cyan(\"Deleted by node with id:", "}, { 'type': 'input', 'name': 'value', 'message': 'Value:', 'filter': lambda", "} ] fetch_a = prompt(fetch_q, style=style) test_number = fetch_a['test_n'] if", "Song', \\ 'Search for a Song', \\ 'Delete a Song',", "= prompt(fetch_q, style=style) print(cyan(\"Deleting Song: \") + fetch_a['key'] + cyan(\"...\"))", "with id: \",green(response.text.split(\" \")[0])) print(\"Song value: \" + green(response.text.split(\" \")[1]))", "addr show ' + config.NETIFACE + ' | grep \"\\<inet\\>\"", "you wish to Search or * to get all songs", "print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif method_a == 'Exit': os.system('clear') break else:", "-> ...)\\n\") insertHelp=header(\"Insert Song: \") + cyan(\"This functions expects a", "{ 'type': 'input', 'name': 'key', 'message': 'Song Title:', 'filter': lambda", "\") + green(response.text.split(\" \")[1]) + ) print(cyan(\"Deleted by node with", "cyan(\"...\")) print(blue(test_trans(test_number))) print(cyan(\"Done!\")) continue elif method_a == 'Exit': os.system('clear') break", "= requests.get(baseURL + ends.c_depart) if response.status_code == 200: if response.text", "\" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\", \"-\",deleteHelp,\"\\n\", \"-\",departHelp,\"\\n\", \"-\",autoTests,\"\\n\", ) continue elif method_a", "os.system('clear') continue if __name__ == '__main__': if len(sys.argv) < 3:", "+ response.status_code)) except: print(red(\"Could not establish connection with Node...\")) print(red(\"Unfortunately", "json.loads(response.text) print('\\n') for node in nodes_list[\"res\"]: print(green(node[\"ip\"] + \":\" +", "* from auto.testing import test_trans import time import json style", "method_a == 'Search for a Song': print('Insert the Song Title", "# print(cyan(\"Deleting Song: \") + green(response.text.split(\" \")[1]) + ) print(cyan(\"Deleted", "print('Select which test you wish to run (1 = insert,", ") print(cyan(\"Deleted by node with id: \") + green(response.text.split(\" \")[0]))", ",data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200: print(cyan(\"Inserted by node with id:", "utils.ends as ends from utils.colorfy import * from auto.testing import", "print(cyan(\"Inserted by node with id: \") + green(response.text.split(\" \")[0])) else", "\") + green(response.text.split(\" \")[0])) else : print(red(\"Got a bad response", "node with id: \") + green(response.text.split(\" \")[0])) else : print(yellow(\"Song", "] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Inserting Song: \") + fetch_a['key']", "import * from auto.testing import test_trans import time import json", "show ' + config.NETIFACE + ' | grep \"\\<inet\\>\" |", "utils.colorfy import * from auto.testing import test_trans import time import", "Song Title and a Song Value and inserts them in", "Search or * to get all songs of the Chord')", "Token.Selected: '#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.Question: '#<PASSWORD>16", "\"Left the Chord\": print(response.text) print(green(\"Node is out of Toychord network\"))", "underline(node[\"ip\"] + \":\" + node[\"port\"])) for song in node[\"song\"]: print(\"", "doesnt exist in the Chord\")) except: print(red(\"Could not establish connection", "\\ 'Insert a Song', \\ 'Search for a Song', \\", "and returns the chord throughput\") print( \" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\",", "== 'Delete a Song': print('Insert the Song Title you wish", "+ ends.c_query ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\" \")[1]", "+ ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\" \")[1]", "= requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code == 200 and", "+ green(response.text.split(\" \")[0])) else : print(red(\"Got a bad response status", "with id: \") + green(response.text.split(\" \")[0])) else : print(red(\"Got a", "+ ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200: print(cyan(\"Inserted by node", "response.status_code)) except: print(red(\"Could not establish connection with Node. Node didnt", "if response.status_code == 200: nodes_list = json.loads(response.text) print('\\n') for node", "Title and returns the Node in whitch the song is", "val: str(val) }, { 'type': 'input', 'name': 'value', 'message': 'Value:',", "method_a = prompt(method_q, style=style)['method'] os.system('clear') if method_a == 'Depart from", "} method_a = prompt(method_q, style=style)['method'] os.system('clear') if method_a == 'Depart", "in node with id: \",green(response.text.split(\" \")[0])) print(\"Song value: \" +", "== 200: nodes_list = json.loads(response.text) print('\\n') for node in nodes_list[\"res\"]:", "exit(0) continue elif method_a == 'Delete a Song': print('Insert the", "the Node who deleted the song\\n\") departHelp=header(\"Depart: \") + cyan(\"This", "exit(0) continue elif method_a == 'Help': print('-------------------------------- Help --------------------------------\\n') overlayHelp=header(\"Overlay:", "fetch_q = [ { 'type': 'input', 'name': 'key', 'message': 'Song", "prompt(fetch_q, style=style) if fetch_a['key'] == \"*\": print(cyan(\"Fetching all the songs", "elif method_a == 'Run automated test': print('Select which test you", "print $2 }\\' | awk -F \"/\" \\'{ print $1", "code \" + response.status_code)) except: print(red(\"Could not establish connection with", "bold', Token.Selected: '#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD> bold', Token.Question:", "+ ends.c_depart) if response.status_code == 200: if response.text == \"Left", "}] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Deleting Song: \") + fetch_a['key']", "'Depart from Chord', \\ 'Run automated test', \\ 'Help', \\", "if fetch_a['key'] == \"*\": print(cyan(\"Fetching all the songs of the", "response.status_code == 200: nodes_list = json.loads(response.text) print('\\n') for node in", "json style = style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold',", "song you wish to insert') fetch_q = [ { 'type':", "Title you wish to Search or * to get all", "response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200:", "with Node...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Help':", "else (\"query\" if test_number == '2' else \"requests\")) + cyan(\"...\"))", "Wrong...\") + response.status_code) except: print(red(\"Could not establish connection with Node.", "Song: \") + fetch_a['key'] + cyan(\"...\")) try: response = requests.post(baseURL", "test_trans import time import json style = style_from_dict({ Token.QuestionMark: '#E91E63", "\"\\<inet\\>\" | awk \\'{ print $2 }\\' | awk -F", "+ cyan(\"...\")) try: response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if", "['Network Overlay', \\ 'Insert a Song', \\ 'Search for a", "'input', 'name': 'value', 'message': 'Value:', 'filter': lambda val: str(val) }", "exist in the Chord\")) print(yellow(\"Couldnt delete it\")) except: print(red(\"Could not", "if len(sys.argv) < 3: print(\"!! you must tell me the", "prompt import sys import utils.config as config import utils.ends as", "\" + green(response.text.split(\" \")[1])) else: print(yellow(\"Song doesnt exist in the", "test you wish to run (1 = insert, 2 =", "automated tests: \") + cyan(\"This function expects a test number", "Token.QuestionMark: '#E91E63 bold', Token.Selected: '#673AB7 bold', Token.Instruction: '#<PASSWORD>', Token.Answer: '#<PASSWORD>", "a Song', \\ 'Depart from Chord', \\ 'Run automated test',", "node[\"port\"]), end = '') if node != nodes_list[\"res\"][-1]: print(\" ->", "Overlay', \\ 'Insert a Song', \\ 'Search for a Song',", "| grep \"\\<inet\\>\" | awk \\'{ print $2 }\\' |", "all the songs of the Chord...\")) try: response = requests.get(baseURL", "= prompt(fetch_q, style=style) print(cyan(\"Inserting Song: \") + fetch_a['key'] + cyan(\"...\"))", "wish to delete') fetch_q = [ { 'type': 'input', 'name':", "= '') print('\\n') else : print(red(\"Got a bad response status", "\\ 'Search for a Song', \\ 'Delete a Song', \\", "+ node[\"port\"])) for song in node[\"song\"]: print(\" -\" + green(song[\"key\"])", "config.NETIFACE + ' | grep \"\\<inet\\>\" | awk \\'{ print", "+ green(song[\"key\"]) + \" \" + song[\"value\"]) else: print(yellow(\"Something went", "the Chord\\n\") autoTests=header(\"Run automated tests: \") + cyan(\"This function expects", "Node2 -> ...)\\n\") insertHelp=header(\"Insert Song: \") + cyan(\"This functions expects", "recreates and prints the current Network Topology(eg. Node1 -> Node2", "exit(0) else: print(cyan(\"Searching Song: \") + fetch_a['key'] + cyan(\"...\")) try:", "Node. Node didnt depart...\")) print(red(\"Unfortunately exiting...\")) break elif method_a ==", "song...\")) print(red(\"Unfortunately exiting...\")) exit(0) else: print(cyan(\"Searching Song: \") + fetch_a['key']", "\") + cyan(\"This function expects a test number (1=insert, 2=query,", "green(response.text.split(\" \")[0])) else : print(red(\"Got a bad response status code", "'http://' + ip + ':' + port while True: print('----------------------------------------------------------------------')", "'2', '3'): print(yellow(\"Wrong test number (give 1, 2 or 3)\"))", "to enter the cult...') baseURL = 'http://' + ip +", "'name': 'method', 'message': 'Select action:', 'choices': ['Network Overlay', \\ 'Insert", "test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's' if test_number not", "method_a == 'Run automated test': print('Select which test you wish", "Token, prompt import sys import utils.config as config import utils.ends", "beautiful day to enter the cult...') baseURL = 'http://' +", "< 3: print(\"!! you must tell me the port. Ex.", "bad response status code \" + response.status_code)) except: print(red(\"Could not", "exiting...\")) exit(0) continue elif method_a == 'Search for a Song':", "+ cyan(\"This function expects a Song Title and returns the", "for song in node[\"song\"]: print(\" -\" + green(song[\"key\"]) + \"", "val: str(val) }] fetch_a = prompt(fetch_q, style=style) if fetch_a['key'] ==", "= query, 3 = requests)') fetch_q = [ { 'type':", "val: str(val) }] fetch_a = prompt(fetch_q, style=style) print(cyan(\"Deleting Song: \")", "+ underline(node[\"ip\"] + \":\" + node[\"port\"])) for song in node[\"song\"]:", "('1', '2', '3'): print(yellow(\"Wrong test number (give 1, 2 or", "method_a == 'Exit': os.system('clear') break else: os.system('clear') continue if __name__", "print(red(\"Could not establish connection with Node...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue", "Node who deleted the song\\n\") departHelp=header(\"Depart: \") + cyan(\"This function", "PyInquirer import style_from_dict, Token, prompt import sys import utils.config as", "'message': 'Song Title:', 'filter': lambda val: str(val) }] fetch_a =", "and returns the Node who deleted the song\\n\") departHelp=header(\"Depart: \")", "fetch_a['test_n'] if fetch_a['test_n'] else 's' if test_number not in ('1',", "== 'Network Overlay': print(cyan(\"Initiating Network Overlay...\")) try: response = requests.get(baseURL", "response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']}) if response.status_code == 200", "else: print(cyan(\"Searching Song: \") + fetch_a['key'] + cyan(\"...\")) try: response", "Title-Value pair for the song you wish to insert') fetch_q", "expects a Song Title and returns the Node in whitch", "Node...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Help': print('--------------------------------", "green(response.text.split(\" \")[0])) else : print(yellow(\"Song doesnt exist in the Chord\"))", "and prints the current Network Topology(eg. Node1 -> Node2 ->", "'1' else (\"query\" if test_number == '2' else \"requests\")) +", "'value', 'message': 'Value:', 'filter': lambda val: str(val) } ] fetch_a", "prompt(fetch_q, style=style) test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's' if", "the song is stored and the value of the song\\n\")", "+ response.status_code) except: print(red(\"Could not establish connection with Node. Couldnt", "Node1 -> Node2 -> ...)\\n\") insertHelp=header(\"Insert Song: \") + cyan(\"This", "the cult...') baseURL = 'http://' + ip + ':' +", "node[\"port\"])) for song in node[\"song\"]: print(\" -\" + green(song[\"key\"]) +", "the Node in whitch the song is stored and the", "Song: \") + cyan(\"This functions expects a Song Title and", "+ cyan(\"This function expects a test number (1=insert, 2=query, 3=requests),", "3=requests), runs the test and returns the chord throughput\") print(", "Ex. -p 5000 !!\") exit(0) if sys.argv[1] in (\"-p\", \"-P\"):", "ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200: print(cyan(\"Inserted by node with", "time import json style = style_from_dict({ Token.QuestionMark: '#E91E63 bold', Token.Selected:", "ends.c_delete ,data={'key':fetch_a['key']}) if response.status_code == 200 and response.text.split(\" \")[1] !=", "from Chord...\")) try: response = requests.get(baseURL + ends.c_depart) if response.status_code", "Song': print('Insert a Title-Value pair for the song you wish", "search for song...\")) print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a ==", "if test_number not in ('1', '2', '3'): print(yellow(\"Wrong test number", "+ cyan(\"This function makes the node connected to this cli", "automated test': print('Select which test you wish to run (1", "requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']}) if response.status_code == 200: print(cyan(\"Inserted by", "print(\"!! you must tell me the port. Ex. -p 5000", "returns the Node who deleted the song\\n\") departHelp=header(\"Depart: \") +", "+ ' | grep \"\\<inet\\>\" | awk \\'{ print $2", "\\ 'Exit'] } method_a = prompt(method_q, style=style)['method'] os.system('clear') if method_a", "and the value of the song\\n\") deleteHelp=header(\"Delete Song: \") +", "import test_trans import time import json style = style_from_dict({ Token.QuestionMark:", "else: print(red(response.text)) else : print(red(\"Got a bad response status code", "print(red(\"Could not establish connection with Node. Song wasnt deleted...\")) print(red(\"Unfortunately", "'') print('\\n') else : print(red(\"Got a bad response status code", "you wish to insert') fetch_q = [ { 'type': 'input',", "to run (1 = insert, 2 = query, 3 =", "else: print(yellow(\"Song doesnt exist in the Chord\")) except: print(red(\"Could not", "value of the song\\n\") deleteHelp=header(\"Delete Song: \") + cyan(\"This function", "\" \" + song[\"value\"]) else: print(yellow(\"Something went Wrong...\") + response.status_code)", "os.popen('ip addr show ' + config.NETIFACE + ' | grep", "doesnt exist in the Chord\")) print(yellow(\"Couldnt delete it\")) except: print(red(\"Could", "import sys import utils.config as config import utils.ends as ends", "returns the chord throughput\") print( \" -\",overlayHelp,\"\\n\" \" -\",insertHelp,\"\\n\", \"-\",queryHelp,\"\\n\",", "must tell me the port. Ex. -p 5000 !!\") exit(0)", "print(red(\"Unfortunately exiting...\")) exit(0) continue elif method_a == 'Help': print('-------------------------------- Help", "baseURL = 'http://' + ip + ':' + port while" ]
[ "node in objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin:", "in object: objTypeName = cmds.objectType(skinJoint) if objTypeName == 'joint': split_name", "import json import re class WeightCopyPaste(): def main(self, skinMeshes, mode='copy',", "except Exception as e: print e.message return self.filePath = self.protect_pat+'\\\\'", "l=True) ad_node = [] for node in objects: children =", "print e.message print 'Error !! Skin bind failed : '", "dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file, end_file] left_list_list = [] right_list_list", "mode == 'copy': self.weightCopy() if mode == 'paste': self.weightPaste() def", "= cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster + '", "str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\\\')", "os.makedirs(self.protect_path) except Exception as e: print e.message return self.filePath =", "dummyParent=dummy, mode='cut') influences = cmds.ls(influences, l=True, tr=True) # バインド dstSkinCluster", "the weight:', ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang( en='Transfer bind influences:',", "' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード #", ".mmi') maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi') bindMethod = cmds.getAttr(srcSkinCluster", "skinMesh + '] >>> [' + dst + ']' dstSkinCluster", "スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:' ).output()", "is None: return srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if", "+= [node]+children #print len(ad_node) objects = set(ad_node) #print len(objects) if", "massege01 = lang.Lang( en=': It does not perform the transfer", "objects = cmds.ls(type='transform') if not objects: return mute_flag = 1", "xml_name): if self.method == 'index' or self.method == 'over': cmds.deformerWeights(xml_name,", "[] for node in objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if", "ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 '''", "im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster, e=True,", "lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list): #", "print 'Error !! Skin bind failed : ' + skinMesh", "try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh),", "ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def weightCopy(self): saveData =", "type='string') # 可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility) else: print(str(skinJoint) +", "except Exception as e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i])", "self.apiName = os.path.join(self.filePath, self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し if mode", "if returnInfluences: return influences else: return True def symmetry_weight(srcNode=None, dstNode=None,", "= cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll", "'closestJoint', 'oneToOne'], normalize=True, noMirror=True ) if logTransfer: print massege02 +", "= json.load(f) l_list = save_data.keys() r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list)", "transfer of weight because it is not a skin mesh.',", "l_list = save_data.keys() r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception", "= os.path.splitext(file) if ext == '.xml': xml_name = file else:", "influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if", "= str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try: with open(self.fileName, 'r') as", "en=': It does not perform the transfer of weight because", "' Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 = lang.Lang( en='No", "utf-8 -*- from maya import mel from maya import cmds", "dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except", "'\\\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath +", "ext = os.path.splitext(file) if ext == '.xml': xml_name = file", "' .mi') bindMethod = cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights =", "+ massege01 return False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster =", "side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18) new_joint_name = split_name.replace(side_name.replace('.',", "' .skm') dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences =", "skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag = 0 for skin", "files: name, ext = os.path.splitext(file) if ext == '.xml': xml_name", "[] transferedMesh.append(temp) for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create')", "weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple", "cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh", "bind failed : ' + skinMesh continue else: dstSkinCluster =", "tsb=True, ) dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent')", "dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if srcNode is None: return", "transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst,", "mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。", "True all_influences = list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh", "d=False) if not srcSkinCluster: if logTransfer: print skinMesh + massege01", "saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod']", "self.protect_path = os.path.join(self.scene_path, 'weight_protector') try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except", "# コピーかペーストをそれぞれ呼び出し if mode == 'copy': self.weightCopy() if mode ==", "else: return True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元", "not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\')) # 末尾\\\\が必要なので注意 else: # ある場合は中身を削除", "ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning is", "dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if", "os.path.splitext(file) if ext == '.xml': xml_name = file else: #", "object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する", "not objects: all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02,", "save_data = json.load(f) l_list = save_data.keys() r_list = save_data.values() left_list_list.append(l_list)", "skinMesh + '] >>> [' + dst + ']' #親子付けを戻す", "mid_l_list = ['_L_', '_l_', '_Left_', '_left_'] mid_r_list = ['_R_', '_r_',", "True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか", "return influences else: return True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): '''", "'_left'] end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right',", "is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True) ad_node", ">>> [' + dst + ']' dstSkinCluster = dstSkinCluster[0] if", "= load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not isinstance(object, list): temp =", "xml_name = file else: # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__')", "# 末尾\\\\が必要なので注意 else: # ある場合は中身を削除 files = os.listdir(self.filePath) if files", "= lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang(", "middle_file, end_file] left_list_list = [] right_list_list = [] for i,", "'l_', 'Left_', 'left_'] start_r_list = ['R_', 'r_', 'Right_', 'right_'] mid_l_list", "= engine self.memShapes = {} self.target = tgt self.pasteMode =", "cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') >", "ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました') .output() msg05", "'_R..', '_r..', '_Right', '_right'] def_left_list_list = [start_l_list, mid_l_list, end_l_list] def_right_list_list", "= [start_file, middle_file, end_file] left_list_list = [] right_list_list = []", "self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster =", "os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r') as f: save_data = json.load(f)", "'_r_', '_Right_', '_right_'] end_l_list = ['_L', '_l', '_L.', '_l.', '_L..',", "with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def", "maya import mel from maya import cmds from . import", "in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes =", "'\\\\' + xml_name): if self.method == 'index' or self.method ==", "open(save_file, 'r') as f: save_data = json.load(f) l_list = save_data.keys()", "#print 'joint setting :', split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint", "if not objects: return mute_flag = 1 skin_list = []", "srcSkinCluster: if logTransfer: print skinMesh + massege01 return False #", "→barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes self.saveName =", "not isinstance(self.skinMeshes, list): temp = self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp)", "def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか '''", "s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules():", "db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh == msg02: objects = cmds.ls(type='transform')", "str(skinMesh) else: print 'Not exist seved weight XML file :", "else: print 'Not exist seved weight XML file : '", "# ある場合は中身を削除 files = os.listdir(self.filePath) if files is not None:", "engine self.memShapes = {} self.target = tgt self.pasteMode = {'index':1,", "= cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster + '", "ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:'", "# 可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility) else: print(str(skinJoint) + '", "= skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] =", "= dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] =", "return mute_flag = 1 skin_list = [] for node in", "{} self.target = tgt self.pasteMode = {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する", "dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す", "transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh')", "logTransfer: print skinMesh + massege01 return False # スキンクラスタがなかったら関数抜ける #", "all_mesh == msg02: objects = cmds.ls(type='transform') if not objects: return", "saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod = saveData[';bindMethod']", "main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default',", "= str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName", "if not skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag", "'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile = skinMesh", "e=True, forceNormalizeWeights=True) print 'Weight paste to : ' + str(skinMesh)", "maxInfluences = saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences", "リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list): temp = transferedMesh transferedMesh =", "['L_', 'l_', 'Left_', 'left_'] start_r_list = ['R_', 'r_', 'Right_', 'right_']", "# スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)", "e.message return self.filePath = self.protect_pat+'\\\\' + self.saveName self.fileName = os.path.join(self.filePath,", "path == 'default': self.filePath = os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' + self.saveName", "os.path.join(self.scene_path, 'weight_protector') try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as", "'] >>> [' + dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent')", "+ skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def", "saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights']", "= save_data.keys() r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception as", "XML file : ' + skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes,", "export=True, deformer=srcSkinCluster, path=self.filePath + '\\\\') with open(self.fileName, 'w') as f:", "'Error !! Skin bind failed : ' + skinMesh continue", "for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)): for j, lr_list", "maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences += influences", "= j + 1 if side:#対象が見つかってたら全部抜ける side_name = lr break", "+ str(skinMesh) else: print 'Not exist seved weight XML file", "#インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if", "new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility) else: print(str(skinJoint)", "= dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya': files", "dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得", "paste to : ' + str(skinMesh) else: print 'Not exist", "except Exception as e: print e.message print 'Error !! Skin", "if mode == 'copy': self.weightCopy() if mode == 'paste': self.weightPaste()", "saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences']", "'\\\\') with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f)", "from maya import cmds from . import lang from .", "ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): '''", "en='Transfer the weight:', ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang( en='Transfer bind", "'auto': weightFile = skinMesh else: weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく", "0: mute_flag = 0 for skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag)", "cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list", "スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint +", "end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json'", "「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在", "saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences']", "for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut')", "objects: all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03,", "= ['_R_', '_r_', '_Right_', '_right_'] end_l_list = ['_L', '_l', '_L.',", "= [] self.skinMeshes.append(temp) # ファイルパスを生成しておく if path == 'default': self.filePath", "'auto': weightFile = skinMesh else: weightFile = self.weightFile dstSkinCluster =", "files = os.listdir(self.filePath) print files if len(files) == 2: for", "1: if re.search(lr, split_name): side = j + 1 if", "srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not srcSkinCluster: if logTransfer:", "'__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') xml_name = meshName", "to : ' + str(skinMesh) else: print 'Not exist seved", "'_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right'] def_left_list_list = [start_l_list,", "+ 1 if i == 1: if re.search(lr, split_name): side", "inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list): temp", "[start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file", "for skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto':", "inf=True) #ジョイントを取得 for skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False)", "# self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate']", "スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName):", "side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint", "else: # ある場合は中身を削除 files = os.listdir(self.filePath) if files is not", "skinMesh in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster:", "スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01", "msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning is disabled',", "a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang( en='Transfer", "skinJoint in object: objTypeName = cmds.objectType(skinJoint) if objTypeName == 'joint':", "sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0) if self.engine", "side = 0 side_name = '' for i, (l_list, r_list)", "# スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + '", "in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile =", "Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste'", "os.path.join(self.filePath, self.saveName + '.json') self.apiName = os.path.join(self.filePath, self.saveName + '.skn')", "if i == 1: if re.search(lr, split_name): side = j", "バインド dstSkinCluster = cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod,", "# 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster", "transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang( en=': It does", "process all of mesh in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 =", "in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r') as f:", "os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\')) # 末尾\\\\が必要なので注意 else: # ある場合は中身を削除 files", "self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue #", "msg05 = lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects", "='transform') ad_node += [node]+children #print len(ad_node) objects = set(ad_node) #print", "lr break if side: break if side: break #print 'joint", "# リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster", "type='mesh') if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if", "']' dstSkinCluster = dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint',", "'_r.', '_R..', '_r..', '_Right', '_right'] def_left_list_list = [start_l_list, mid_l_list, end_l_list]", "{} # 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\')) #", "objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin: continue skin_list.append(skin)", "msg01 = lang.Lang( en='No mesh selection.\\nWould you like to process", "== 1: if re.search(lr, split_name): side = j + 1", "'\\\\' + file) skinFlag = False all_influences = [] for", "dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if logTransfer: print massege03 +", "= self.protect_pat+'\\\\' + self.saveName self.fileName = os.path.join(self.filePath, self.saveName + '.json')", "+ '\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight paste to :", "side: break #print 'joint setting :', split_name, side, side_name #", "t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh ==", "object.append(temp) for skinJoint in object: objTypeName = cmds.objectType(skinJoint) if objTypeName", "if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0) if", "dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ',", "= '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector') try: if not", "in objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin: continue", "dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: #", "normalizeWeights = saveData[';normalizeWeights'] influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy,", "viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5)", "'joint': split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name", "'Left_', 'left_'] start_r_list = ['R_', 'r_', 'Right_', 'right_'] mid_l_list =", "= cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True,", "s=True, pa=True, type='mesh') if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') #", "common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh') if", ".nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate']", "'__colon__') cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\\\') with", "= skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name = ''", "self.memShapes = {} self.target = tgt self.pasteMode = {'index':1, 'nearest':3}", "'Not exist seved weight XML file : ' + skinMesh", "[] right_list_list = [] for i, save_file in enumerate(save_files): if", "as e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return", "It does not perform the transfer of weight because it", "q=True, inf=True) sub_influences = list(set(all_influences) - set(influences)) if sub_influences: cmds.skinCluster(skinMesh,", "os.path.exists(self.fileName): try: with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData", "sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh,", "nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes =", "# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: # バインド dstSkinCluster = cmds.skinCluster(", "#子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst, s=True,", "threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes self.saveName = saveName self.method =", "= cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] = dropoffRate", "pa=True, type='mesh') if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー", "bindMethod = saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences = saveData[';influences'] #", "#print len(objects) if not objects: all_mesh = cmds.confirmDialog(m=msg01, t='', b=", "cmds from . import lang from . import common import", "if re.match(lr, split_name): side = j + 1 if i", "maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights", "# ファイルパスを生成しておく if path == 'default': self.filePath = os.getenv('MAYA_APP_DIR') +", "is not None: for file in files: os.remove(self.filePath + '\\\\'", "# 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception", "= {} # 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\'))", "'_L.', '_l.', '_L..', '_l..', '_Left', '_left'] end_r_list = ['_R', '_r',", "== 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile =", "''' #ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not", "ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン", "influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True ) if logTransfer: print massege02", "ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules() #", "symWeight is False or dstNode is None: return transfer_weight(srcNode, dstNode,", "in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster", "'_r..', '_Right', '_right'] def_left_list_list = [start_l_list, mid_l_list, end_l_list] def_right_list_list =", "'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\\\') else: cmds.deformerWeights(xml_name,", "= os.path.join(self.filePath, self.saveName + '.json') self.apiName = os.path.join(self.filePath, self.saveName +", "exist seved weight XML file : ' + skinMesh #", "[] self.skinMeshes.append(temp) # ファイルパスを生成しておく if path == 'default': self.filePath =", "normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True,", "cmds.setAttr(skinJoint + '.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18)", "= [] for i, save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try:", "weight:', ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:'", "common import os import json import re class WeightCopyPaste(): def", "cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True,", "+ ' .mi') bindMethod = cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights", "inf=True) sub_influences = list(set(all_influences) - set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True,", "# コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml', export=True,", "e.message print 'Error !! Skin bind failed : ' +", "スキンクラスタを取得 if srcNode is None: return srcShapes = cmds.listRelatives(srcNode, s=True,", "if re.match(lr[::-1], split_name[::-1]): side = j + 1 if side:#対象が見つかってたら全部抜ける", "dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if", "'[' + skinMesh + '] >>> [' + dst +", "= os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif path == 'project':", "q=True, inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] =", "return True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先", "= all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh),", "ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18) new_joint_name = split_name.replace(side_name.replace('.', ''), '')", "'r_', 'Right_', 'right_'] mid_l_list = ['_L_', '_l_', '_Left_', '_left_'] mid_r_list", "= cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode", "def toggle_mute_skinning(): msg01 = lang.Lang( en='No mesh selection.\\nWould you like", "= os.path.join(self.scene_path, 'weight_protector') try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception", "visibility=False) if symWeight is False or dstNode is None: return", "self.saveName = saveName self.method = method self.weightFile = weightFile self.threshold", "except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not", ".skm') dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster", "qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list): temp = transferedMesh", "cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')", "as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True,", "' + str(objTypeName) + ' Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning():", "スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: # バインド dstSkinCluster = cmds.skinCluster( dst,", "type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not srcSkinCluster:", "mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if logTransfer: print massege03", "start_r_list = ['R_', 'r_', 'Right_', 'right_'] mid_l_list = ['_L_', '_l_',", "lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning", "files if len(files) == 2: for file in files: name,", "== 'joint': split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side = 0", "if self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto':", "returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0]", "= maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences +=", "[] for i, save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with", "2: for file in files: name, ext = os.path.splitext(file) if", "= 0 side_name = '' for i, (l_list, r_list) in", "new_joint_name = split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定 cmds.setAttr(skinJoint + '.otherType',", "'+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」", "= cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)", "print skinMesh + massege01 return False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく", "os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file =", "2: if re.match(lr[::-1], split_name[::-1]): side = j + 1 if", "dstNode, transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster", "1 if i == 1: if re.search(lr, split_name): side =", "r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception as e: print", "s=True, pa=True, type='mesh') if not shapes: # もしメッシュがなかったら continue #", "'paste': self.weightPaste() def weightPaste(self): dummy = cmds.spaceLocator() for skinMesh in", "print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list", "+ xml_name): if self.method == 'index' or self.method == 'over':", "'.type', 18) new_joint_name = split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定 cmds.setAttr(skinJoint", "cmds.ls(influences, l=True, tr=True) # バインド dstSkinCluster = cmds.skinCluster( skinMesh, influences,", "# コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') xml_name = meshName +", "[] for node in objects: children = cmds.ls(cmds.listRelatives(node, ad=True, f=True),", "= set(ad_node) #print len(objects) if not objects: all_mesh = cmds.confirmDialog(m=msg01,", "self.weightPaste() def weightPaste(self): dummy = cmds.spaceLocator() for skinMesh in self.skinMeshes:", "srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行", "transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue", "objects = cmds.ls(sl=True, l=True) ad_node = [] for node in", "cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster,", "= str(meshName).replace(':', '__colon__') xml_name = meshName + '.xml' if os.path.isfile(self.filePath", "ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight is False or dstNode is", "s=True, d=False) if not srcSkinCluster: if logTransfer: print skinMesh +", "= cmds.ls(type='transform') if not objects: return mute_flag = 1 skin_list", "print massege02 + '[' + skinMesh + '] >>> ['", "ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue #", "dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e: print", "else: weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__')", "= saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避", "skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name = '' for", "[] object.append(temp) for skinJoint in object: objTypeName = cmds.objectType(skinJoint) if", "ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース", "ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04", "= {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list): temp", "not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences =", "en='No mesh selection.\\nWould you like to process all of mesh", "'left_'] start_r_list = ['R_', 'r_', 'Right_', 'right_'] mid_l_list = ['_L_',", "os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as e: print e.message return self.filePath", "= list(set(all_influences) - set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True,", "if not dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try:", "visibility) else: print(str(skinJoint) + ' : ' + str(objTypeName) +", "continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag = 0 for", "'__colon__') xml_name = meshName + '.xml' if os.path.isfile(self.filePath + '\\\\'", "cmds.bakePartialHistory(skinMesh, ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')", "1 skin_list = [] for node in objects: skin =", "Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':',", "common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return influences else: return True def", "in enumerate(lr_list): if i == 0: if re.match(lr, split_name): side", "is None: return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode,", "= lang.Lang( en='No mesh selection.\\nWould you like to process all", "skinMesh = skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh),", "json.load(f) # ロード # self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod']", "for j, lr_list in enumerate([l_list, r_list]): for k, lr in", "for skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight", "if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True,", "dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName", "リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list): temp = self.skinMeshes self.skinMeshes =", "= cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not", "+ ' Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 = lang.Lang(", "+ '\\\\' + file) skinFlag = False all_influences = []", ".dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster", "'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector') try:", "'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh,", "親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as", "== 'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector')", "cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, )", "cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label',", "srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if srcShapes: srcSkinCluster =", "re.search(lr, split_name): side = j + 1 if i ==", "[node]+children #print len(ad_node) objects = set(ad_node) #print len(objects) if not", "末尾\\\\が必要なので注意 else: # ある場合は中身を削除 files = os.listdir(self.filePath) if files is", "['_L_', '_l_', '_Left_', '_left_'] mid_r_list = ['_R_', '_r_', '_Right_', '_right_']", "cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster =", "for node in objects: children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type", "not None: for file in files: os.remove(self.filePath + '\\\\' +", "もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster')", "'' for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)): for j,", "file in files: os.remove(self.filePath + '\\\\' + file) skinFlag =", "all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03)", "= j + 1 if i == 2: if re.match(lr[::-1],", "「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能", "or dstNode is None: return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes", "from . import common import os import json import re", "lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました') .output()", "not dstSkinCluster: # バインド dstSkinCluster = cmds.skinCluster( dst, influences, omi=maintainMaxInfluences,", "cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')", "''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse '''", "set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0)", "side_name = lr break if side: break if side: break", "= ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right'] def_left_list_list", "cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し", "= file else: # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') #", "srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate", "= saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences =", "saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes: srcSkinCluster =", "pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster:", "normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_', 'l_', 'Left_', 'left_']", "'_l..', '_Left', '_left'] end_r_list = ['_R', '_r', '_R.', '_r.', '_R..',", "isinstance(transferedMesh, list): temp = transferedMesh transferedMesh = [] transferedMesh.append(temp) for", "inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences", "= os.path.join(self.filePath, self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し if mode ==", "if i == 2: if re.match(lr[::-1], split_name[::-1]): side = j", "'.xml' if os.path.isfile(self.filePath + '\\\\' + xml_name): if self.method ==", "'.xml': xml_name = file else: # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|',", ".bm') normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster,", "lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang( en='Skinning", "+ ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] =", "ai=sub_influences, lw=True, ug=True, wt=0, ps=0) if self.engine == 'maya': #", "== 'default': self.filePath = os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif", "== 'auto': weightFile = skinMesh else: weightFile = self.weightFile dstSkinCluster", "saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights']", "transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True", "= skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e: print e.message print 'Error", "in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03 =", "'.otherType', new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility) else:", "continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True,", "srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster,", "skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e: print e.message print 'Error !!", "influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_', 'l_', 'Left_',", "= srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate =", "j + 1 if i == 1: if re.search(lr, split_name):", "#ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 = lang.Lang( en='No mesh selection.\\nWould you", "not a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang(", "import mel from maya import cmds from . import lang", "nw=normalizeWeights, tsb=True, ) if logTransfer: print massege03 + '[' +", "ウェイト情報を保存する関数 def weightCopy(self): saveData = {} # 保存ディレクトリが無かったら作成 if not", "os.path.join(self.filePath, self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し if mode == 'copy':", "in files: os.remove(self.filePath + '\\\\' + file) skinFlag = False", "returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか", "+ '] >>> [' + dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy,", "def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_', 'l_', 'Left_', 'left_'] start_r_list", "files: os.remove(self.filePath + '\\\\' + file) skinFlag = False all_influences", "def weightCopy(self): saveData = {} # 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath):", "sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if logTransfer: print massege03 + '['", "dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file =", "type='skinCluster') if not skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0:", "mute_flag = 1 skin_list = [] for node in objects:", "if self.engine == 'maya': files = os.listdir(self.filePath) print files if", "because it is not a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output()", "+ ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi') bindMethod", "+ '\\\\' + xml_name): if self.method == 'index' or self.method", "start_l_list = ['L_', 'l_', 'Left_', 'left_'] start_r_list = ['R_', 'r_',", "・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang(", "normalize=True, noMirror=True ) if logTransfer: print massege02 + '[' +", "dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'],", "transferedMesh.append(temp) for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy,", "= weightFile self.threshold = threshold self.engine = engine self.memShapes =", "= skinMeshes self.saveName = saveName self.method = method self.weightFile =", "mode='delete') if returnInfluences: return influences else: return True def symmetry_weight(srcNode=None,", "if self.method == 'index' or self.method == 'over': cmds.deformerWeights(xml_name, im=True,", ": '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定", "'index' or self.method == 'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath", "== 2: if re.match(lr[::-1], split_name[::-1]): side = j + 1", "transferedMesh = [] transferedMesh.append(temp) for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy", "fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。", "influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if logTransfer:", "for file in files: name, ext = os.path.splitext(file) if ext", "lr_list in enumerate([l_list, r_list]): for k, lr in enumerate(lr_list): if", "skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False):", "if not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences", "バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for skinJoint in", "if os.path.exists(self.fileName): try: with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード", "False all_influences = [] for skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh,", "スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster +", "= saveData[';normalizeWeights'] influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut')", "all of mesh in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes',", "ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output()", "mute_flag) if mute_flag == 0: cmds.confirmDialog(m=msg04) if mute_flag == 1:", "logTransfer: print massege02 + '[' + skinMesh + '] >>>", "cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName = str(weightFile).replace('|',", "+ 1 if i == 2: if re.match(lr[::-1], split_name[::-1]): side", "massege02 = lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:' ).output() massege03 =", "like to process all of mesh in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output()", "path=self.filePath + '\\\\') with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード", "- set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0,", "dst + ']' dstSkinCluster = dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster,", "cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster')", "リストタイプじゃなかったらリストに変換する if not isinstance(object, list): temp = object object =", "== 'paste': self.weightPaste() def weightPaste(self): dummy = cmds.spaceLocator() for skinMesh", "split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name =", "= cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode),", "sub_influences = list(set(all_influences) - set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences,", "# ウェイト情報を保存する関数 def weightCopy(self): saveData = {} # 保存ディレクトリが無かったら作成 if", "common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences = cmds.ls(influences, l=True, tr=True) # バインド", "srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences = list(set(all_influences) -", "+ 1 if side:#対象が見つかってたら全部抜ける side_name = lr break if side:", "f=True), type ='transform') ad_node += [node]+children #print len(ad_node) objects =", ").output() if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] #", "= [] transferedMesh.append(temp) for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy =", "= list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes:", "self.weightCopy() if mode == 'paste': self.weightPaste() def weightPaste(self): dummy =", "= saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences =", "side:#対象が見つかってたら全部抜ける side_name = lr break if side: break if side:", ": ' + skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) #", "スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')", "right_list_list.append(r_list) except Exception as e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else:", "[' + dst + ']' dstSkinCluster = dstSkinCluster[0] if transferWeight:", "''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules()", "== 'auto': weightFile = skinMesh else: weightFile = self.weightFile #", "+ '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True)", "os.remove(self.filePath + '\\\\' + file) skinFlag = False all_influences =", "break if side: break #print 'joint setting :', split_name, side,", "common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e:", "']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return", "+ '.type', 18) new_joint_name = split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定", ":', split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side)", "cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\\\') with open(self.fileName,", "ad=True, f=True), type ='transform') ad_node += [node]+children #print len(ad_node) objects", "dstNode is None: return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes =", "self.pasteMode = {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list):", "0 for skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag ==", "# スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not", "middle_file = dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file,", ".output() msg05 = lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True)", "with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f)", "selection.\\nWould you like to process all of mesh in this", "in objects: children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform') ad_node", "== 'index' or self.method == 'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster,", "mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_',", "pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0],", "'.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def", "else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def joint_label(object, visibility=False): '''", "cmds.setAttr(skinJoint + '.drawLabel', visibility) else: print(str(skinJoint) + ' : '", "in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag == 0: cmds.confirmDialog(m=msg04) if", "# スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint", "influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster =", "mode='cut') shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh') if not shapes:", "temp = object object = [] object.append(temp) for skinJoint in", "cmds.spaceLocator() for skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile ==", "'_L..', '_l..', '_Left', '_left'] end_r_list = ['_R', '_r', '_R.', '_r.',", "enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r') as f: save_data", "len(ad_node) objects = set(ad_node) #print len(objects) if not objects: all_mesh", "if side:#対象が見つかってたら全部抜ける side_name = lr break if side: break if", "file) skinFlag = False all_influences = [] for skinMesh in", "cmds.setAttr(skinJoint + '.type', 18) new_joint_name = split_name.replace(side_name.replace('.', ''), '') #", "ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲", "isinstance(self.skinMeshes, list): temp = self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp) #", "' + skinMesh continue else: dstSkinCluster = dstSkinCluster[0] tempSkinNode =", "import common import os import json import re class WeightCopyPaste():", "スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod", "ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0]", ".nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する", "tsb=True, ) if logTransfer: print massege03 + '[' + skinMesh", "cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform') ad_node += [node]+children #print len(ad_node)", "this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No',", "msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04 =", "dstSkinCluster = cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights,", "if not srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく #", "self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し if mode == 'copy': self.weightCopy()", "+ '\\\\')) # 末尾\\\\が必要なので注意 else: # ある場合は中身を削除 files = os.listdir(self.filePath)", "self.skinMeshes = skinMeshes self.saveName = saveName self.method = method self.weightFile", "right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def joint_label(object, visibility=False):", "left_list_list, right_list_list def joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 '''", "'.drawLabel', visibility) else: print(str(skinJoint) + ' : ' + str(objTypeName)", "lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects = cmds.ls(sl=True,", "surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_', 'l_',", "if mode == 'paste': self.weightPaste() def weightPaste(self): dummy = cmds.spaceLocator()", "mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e: print e.message", "a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット", "スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True)", "== 'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\\\') else:", "['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right'] def_left_list_list =", "def_left_list_list = [start_l_list, mid_l_list, end_l_list] def_right_list_list = [start_r_list, mid_r_list, end_r_list]", "# qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list): temp =", "= [] for node in objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster')", "cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: # バインド dstSkinCluster", "end_file = dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file, end_file] left_list_list =", "maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi') bindMethod = cmds.getAttr(srcSkinCluster +", "type ='transform') ad_node += [node]+children #print len(ad_node) objects = set(ad_node)", "en='Skinning is disabled', ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang( en='Skinning is", "mel from maya import cmds from . import lang from", "e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list,", "cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh,", "path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight paste to", "print files if len(files) == 2: for file in files:", "xml_name = meshName + '.xml' if os.path.isfile(self.filePath + '\\\\' +", "returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang( en=': It does not perform", "self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile = skinMesh", "[' + dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy,", "# 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences = cmds.ls(influences, l=True, tr=True)", "import cmds from . import lang from . import common", "return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True, pa=True,", "バインド dstSkinCluster = cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod,", "== msg02: objects = cmds.ls(type='transform') if not objects: return mute_flag", "= saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences =", "transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster =", "self.saveName + '.json') self.apiName = os.path.join(self.filePath, self.saveName + '.skn') #", "side = j + 1 if i == 2: if", "class WeightCopyPaste(): def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2,", "= True all_influences = list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for", "os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif path == 'project': self.scene_path", "def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1,", "# ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True):", "f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド", "'_right_'] end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left',", "dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode =", "cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0) if self.engine ==", "= saveName self.method = method self.weightFile = weightFile self.threshold =", "'\\\\')) # 末尾\\\\が必要なので注意 else: # ある場合は中身を削除 files = os.listdir(self.filePath) if", "(l_list, r_list) in enumerate(zip(left_list_list, right_list_list)): for j, lr_list in enumerate([l_list,", "'right_'] mid_l_list = ['_L_', '_l_', '_Left_', '_left_'] mid_r_list = ['_R_',", "= cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True,", "lr in enumerate(lr_list): if i == 0: if re.match(lr, split_name):", "= cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster + '", "left_list_list, right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not isinstance(object, list):", "influences skinFlag = True all_influences = list(set(all_influences)) saveData[';influences'] = all_influences", "of mesh in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output()", "e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0) if self.engine == 'maya':", "子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences = cmds.ls(influences, l=True, tr=True) #", "massege01 return False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0]", "not dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try: with", "1 if i == 2: if re.match(lr[::-1], split_name[::-1]): side =", "from maya import mel from maya import cmds from .", "# リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list): temp = self.skinMeshes self.skinMeshes", "+ ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences", "not skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag =", "cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #", "positionTolerance=self.threshold, path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight paste", "if logTransfer: print massege03 + '[' + skinMesh + ']", "icn='question',ds=msg03) if all_mesh == msg02: objects = cmds.ls(type='transform') if not", "skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster", "list): temp = object object = [] object.append(temp) for skinJoint", "f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) # ロード # self.visibility", "cmds.listRelatives(dst, s=True, pa=True, type='mesh') if not shapes: # もしメッシュがなかったら continue", ".mi') bindMethod = cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster", "influences #saveData[';influences'] = influences skinFlag = True all_influences = list(set(all_influences))", "open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) #", "!! Skin bind failed : ' + skinMesh continue else:", "method self.weightFile = weightFile self.threshold = threshold self.engine = engine", "print massege03 + '[' + skinMesh + '] >>> ['", "skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster +", "noMirror=True ) if logTransfer: print massege02 + '[' + skinMesh", "# ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18) new_joint_name = split_name.replace(side_name.replace('.', ''),", "weightFile = skinMesh else: weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName", "right_list_list)): for j, lr_list in enumerate([l_list, r_list]): for k, lr", "list): temp = self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp) # ファイルパスを生成しておく", "self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName", "pa=True, type='mesh') if not shapes: # もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ", "if files is not None: for file in files: os.remove(self.filePath", "massege03 + '[' + skinMesh + '] >>> [' +", "objects: return mute_flag = 1 skin_list = [] for node", "= srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences = list(set(all_influences)", "mid_l_list, end_l_list] def_right_list_list = [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path =", "self.threshold = threshold self.engine = engine self.memShapes = {} self.target", "influences = cmds.ls(influences, l=True, tr=True) # バインド dstSkinCluster = cmds.skinCluster(", "「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes", "dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst, s=True, pa=True,", "dstSkinCluster = cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights,", "self.weightFile = weightFile self.threshold = threshold self.engine = engine self.memShapes", "# ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) # ロード # self.visibility =", "ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True ) if", "saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False): if viewmsg:", "self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp) # ファイルパスを生成しておく if path ==", "= cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + '", "= lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list):", ": ' + str(objTypeName) + ' Skip Command') #ウェイトのミュートをトグル def", "self.method = method self.weightFile = weightFile self.threshold = threshold self.engine", "+= influences #saveData[';influences'] = influences skinFlag = True all_influences =", "type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0],", "cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5) '''", "dstSkinCluster: # バインド dstSkinCluster = cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences,", "[] for skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass", "en='Skinning is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True)", "str(objTypeName) + ' Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 =", "skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for skinJoint in skinJointAll:", "'_left_'] mid_r_list = ['_R_', '_r_', '_Right_', '_right_'] end_l_list = ['_L',", "cb=msg03, icn='question',ds=msg03) if all_mesh == msg02: objects = cmds.ls(type='transform') if", "if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] # リストを渡されたときのための保険", "is disabled', ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang( en='Skinning is enabled',", "= dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file, end_file]", "+ ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences", "= saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences = cmds.ls(influences,", "transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang( en=': It", "os.makedirs(os.path.dirname(self.filePath + '\\\\')) # 末尾\\\\が必要なので注意 else: # ある場合は中身を削除 files =", "Skin bind failed : ' + skinMesh continue else: dstSkinCluster", "method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False): if viewmsg: cmds.inViewMessage(", "cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences']", "[msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh == msg02: objects", "= object object = [] object.append(temp) for skinJoint in object:", "# 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする", "end_file] left_list_list = [] right_list_list = [] for i, save_file", "1 if side:#対象が見つかってたら全部抜ける side_name = lr break if side: break", "['_R_', '_r_', '_Right_', '_right_'] end_l_list = ['_L', '_l', '_L.', '_l.',", "saveData[';normalizeWeights'] = normalizeWeights all_influences += influences #saveData[';influences'] = influences skinFlag", "msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh == msg02: objects =", "cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight paste to : ' +", "= influences skinFlag = True all_influences = list(set(all_influences)) saveData[';influences'] =", "処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if", "' : ' + str(objTypeName) + ' Skip Command') #ウェイトのミュートをトグル", "+ '\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif path == 'project': self.scene_path =", "if i == 0: if re.match(lr, split_name): side = j", "srcSkinCluster = srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences =", "= ['R_', 'r_', 'Right_', 'right_'] mid_l_list = ['_L_', '_l_', '_Left_',", "massege03 = lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh,", "in enumerate(zip(left_list_list, right_list_list)): for j, lr_list in enumerate([l_list, r_list]): for", "print e.message return self.filePath = self.protect_pat+'\\\\' + self.saveName self.fileName =", "= normalizeWeights all_influences += influences #saveData[';influences'] = influences skinFlag =", "# スキンクラスタを取得 if srcNode is None: return srcShapes = cmds.listRelatives(srcNode,", "import re class WeightCopyPaste(): def main(self, skinMeshes, mode='copy', saveName='default', method='index',", "False or dstNode is None: return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True)", "skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag = 0", "try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as e: print", "ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。", "weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes self.saveName = saveName", "if logTransfer: print skinMesh + massege01 return False # スキンクラスタがなかったら関数抜ける", "= bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences += influences #saveData[';influences'] =", "f: save_data = json.load(f) l_list = save_data.keys() r_list = save_data.values()", ") dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode", "= lr break if side: break if side: break #print", "srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster", "コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster,", "if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75,", "ad_node += [node]+children #print len(ad_node) objects = set(ad_node) #print len(objects)", "['R_', 'r_', 'Right_', 'right_'] mid_l_list = ['_L_', '_l_', '_Left_', '_left_']", "# 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint +", "= tgt self.pasteMode = {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if not", "if side: break #print 'joint setting :', split_name, side, side_name", "mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return influences else: return", "= cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if", "' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences =", "saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh,", "0: if re.match(lr, split_name): side = j + 1 if", "split_name[::-1]): side = j + 1 if side:#対象が見つかってたら全部抜ける side_name =", "-*- coding: utf-8 -*- from maya import mel from maya", "b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03) if all_mesh == msg02:", "mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file =", "disabled', ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました')", "is not a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 =", "if not isinstance(transferedMesh, list): temp = transferedMesh transferedMesh = []", "ps=0) if self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile ==", "saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。", "可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility) else: print(str(skinJoint) + ' :", "+ ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw') influences", "' + str(skinMesh) else: print 'Not exist seved weight XML", "+ str(objTypeName) + ' Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01", "#print len(ad_node) objects = set(ad_node) #print len(objects) if not objects:", "{'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list): temp =", "side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side) # ラベルタイプを”その他”に設定", "Exception as e: print e.message return self.filePath = self.protect_pat+'\\\\' +", "→Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes self.saveName = saveName self.method", "+ ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences:", "end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']", "ug=True, wt=0, ps=0) if self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if", "# スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster", "as e: print e.message return self.filePath = self.protect_pat+'\\\\' + self.saveName", "''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、", "== 'maya': files = os.listdir(self.filePath) print files if len(files) ==", "self.method == 'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\\\')", "threshold self.engine = engine self.memShapes = {} self.target = tgt", "i == 1: if re.search(lr, split_name): side = j +", "the transfer of weight because it is not a skin", "srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: #", "dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try: with open(self.fileName,", "visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if", ". import common import os import json import re class", "'default': self.filePath = os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif path", "skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster +", "= ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left'] end_r_list", "open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh,", "= method self.weightFile = weightFile self.threshold = threshold self.engine =", "18) new_joint_name = split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定 cmds.setAttr(skinJoint +", "mode='cut') influences = cmds.ls(influences, l=True, tr=True) # バインド dstSkinCluster =", "logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang( en=': It does not", "right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not isinstance(object, list): temp", "ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if srcNode is", "= cmds.ls(sl=True, l=True) ad_node = [] for node in objects:", "split_name): side = j + 1 if i == 2:", "os.listdir(self.filePath) if files is not None: for file in files:", "list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes: srcSkinCluster", "not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as e: print e.message return", "path='default', viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop',", "str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try: with open(self.fileName, 'r') as f:", "# srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not srcSkinCluster: if", "' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi') bindMethod =", "def weightPaste(self): dummy = cmds.spaceLocator() for skinMesh in self.skinMeshes: #", "for i, save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file,", "temp = transferedMesh transferedMesh = [] transferedMesh.append(temp) for dst in", "# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__') if", "'Right_', 'right_'] mid_l_list = ['_L_', '_l_', '_Left_', '_left_'] mid_r_list =", "if side: break if side: break #print 'joint setting :',", "os import json import re class WeightCopyPaste(): def main(self, skinMeshes,", "skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences", "symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if", "skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag == 0: cmds.confirmDialog(m=msg04) if mute_flag", "+ '.otherType', new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint + '.drawLabel', visibility)", "saveName self.method = method self.weightFile = weightFile self.threshold = threshold", "save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception as e: print e.message left_list_list.append(def_left_list_list[i])", "= lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang(", "maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] = normalizeWeights", "+ '\\\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath", "= str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath +", "all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')", "skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang( en='Transfer the", "srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False)", "break if side: break if side: break #print 'joint setting", "skinMeshes self.saveName = saveName self.method = method self.weightFile = weightFile", "None: for file in files: os.remove(self.filePath + '\\\\' + file)", "mute_flag = 0 for skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if", "'copy': self.weightCopy() if mode == 'paste': self.weightPaste() def weightPaste(self): dummy", "= cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if", "left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def joint_label(object,", "cmds.ls(type='transform') if not objects: return mute_flag = 1 skin_list =", "= split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name,", "side = j + 1 if i == 1: if", "'r') as f: save_data = json.load(f) l_list = save_data.keys() r_list", "= os.listdir(self.filePath) print files if len(files) == 2: for file", "# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue", "+ '[' + skinMesh + '] >>> [' + dst", "srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し", "# Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName =", "= 0 for skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag", "save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r') as", "ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list =", "# もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]),", "influences else: return True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数", "転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster =", "== 'copy': self.weightCopy() if mode == 'paste': self.weightPaste() def weightPaste(self):", "左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type',", "srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for", "weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') #", "メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。", "skinMesh else: weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|',", "file else: # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく", "+ file) skinFlag = False all_influences = [] for skinMesh", "# バインド dstSkinCluster = cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate,", "shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh') if not shapes: #", "skinMesh else: weightFile = self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') #", "+ '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\\\') with open(self.fileName, 'w')", "right_list_list def joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく", "lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:' ).output() massege03 = lang.Lang( en='Transfer", "cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag == 0: cmds.confirmDialog(m=msg04) if mute_flag ==", "ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True) ad_node = []", "maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster +", "tgt=1, path='default', viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode,", "srcNode is None: return srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh')", "symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if srcNode is None: return srcShapes", "= skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya': files = os.listdir(self.filePath) print", "'.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18) new_joint_name =", "= common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh')", "if not shapes: # もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得", "self.engine = engine self.memShapes = {} self.target = tgt self.pasteMode", "= str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') xml_name", "+ skinMesh continue else: dstSkinCluster = dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく", "not isinstance(object, list): temp = object object = [] object.append(temp)", "bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences += influences #saveData[';influences'] = influences", "コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') xml_name = meshName + '.xml'", "+ '.drawLabel', visibility) else: print(str(skinJoint) + ' : ' +", "= lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects =", "self.saveName elif path == 'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path", ").output() massege03 = lang.Lang( en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output() if", "dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod", "for node in objects: skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if not", "logTransfer: print massege03 + '[' + skinMesh + '] >>>", "mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False): if", "isinstance(object, list): temp = object object = [] object.append(temp) for", "children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform') ad_node += [node]+children", "self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster:", "r_list) in enumerate(zip(left_list_list, right_list_list)): for j, lr_list in enumerate([l_list, r_list]):", "'_right'] def_left_list_list = [start_l_list, mid_l_list, end_l_list] def_right_list_list = [start_r_list, mid_r_list,", "transferedMesh transferedMesh = [] transferedMesh.append(temp) for dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意", "= self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp) # ファイルパスを生成しておく if path", "= cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent',", "''' massege01 = lang.Lang( en=': It does not perform the", "+ self.saveName self.fileName = os.path.join(self.filePath, self.saveName + '.json') self.apiName =", "viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True,", "deformer=srcSkinCluster, path=self.filePath + '\\\\') with open(self.fileName, 'w') as f: #", "setting :', split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side',", "''), '') # スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string') #", "nw=normalizeWeights, tsb=True, ) dstSkinCluster = dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy,", ": ' + skinMesh continue else: dstSkinCluster = dstSkinCluster[0] tempSkinNode", "set(ad_node) #print len(objects) if not objects: all_mesh = cmds.confirmDialog(m=msg01, t='',", "not shapes: # もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster", "if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\')) # 末尾\\\\が必要なので注意 else: #", "'nearest':3} # リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes, list): temp = self.skinMeshes", "returnInfluences: return influences else: return True def symmetry_weight(srcNode=None, dstNode=None, symWeight=True):", "scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output()", "+ '.json') self.apiName = os.path.join(self.filePath, self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し", "'maya': files = os.listdir(self.filePath) print files if len(files) == 2:", "for file in files: os.remove(self.filePath + '\\\\' + file) skinFlag", "influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences =", "# 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile = skinMesh else:", "object object = [] object.append(temp) for skinJoint in object: objTypeName", "json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数", "WeightCopyPaste(): def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto', threshold=0.2, engine='maya',", "self.method == 'index' or self.method == 'over': cmds.deformerWeights(xml_name, im=True, method=self.method,", "skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') #", ". import lang from . import common import os import", "= cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin: continue skin_list.append(skin) if cmds.getAttr(skin[0]+'.envelope')", "type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく", "return False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod", "# ロード # self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate", "''' # スキンクラスタを取得 if srcNode is None: return srcShapes =", ">>> [' + dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除", "toggle_mute_skinning(): msg01 = lang.Lang( en='No mesh selection.\\nWould you like to", "+ ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード", "-*- from maya import mel from maya import cmds from", "0 side_name = '' for i, (l_list, r_list) in enumerate(zip(left_list_list,", "k, lr in enumerate(lr_list): if i == 0: if re.match(lr,", "self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile", "dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] = bindMethod", "スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster:", "load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not isinstance(object, list): temp = object", "len(files) == 2: for file in files: name, ext =", "if srcNode is None: return srcShapes = cmds.listRelatives(srcNode, s=True, pa=True,", "ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) # ロード # self.visibility = saveData['visibility']#セーブデータ読み込み", "import os import json import re class WeightCopyPaste(): def main(self,", "str(meshName).replace(':', '__colon__') xml_name = meshName + '.xml' if os.path.isfile(self.filePath +", "dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files = [start_file,", "continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') #", ".output() cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True) ad_node = [] for", "= 1 skin_list = [] for node in objects: skin", "not srcSkinCluster: if logTransfer: print skinMesh + massege01 return False", "= dstSkinCluster[0] # 親子付けを戻す common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent') tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく", "side_name = '' for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)):", "cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences = list(set(all_influences) - set(influences)) if sub_influences:", "cmds.getAttr(srcSkinCluster + ' .mi') bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')", "enabled', ja=u'スキニングが有効になりました') .output() cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True) ad_node =", "#ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return influences else: return True", "= ['L_', 'l_', 'Left_', 'left_'] start_r_list = ['R_', 'r_', 'Right_',", "os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file", "= os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json'", "skinFlag = True all_influences = list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS", "skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正", "right_list_list = [] for i, save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら", "i == 0: if re.match(lr, split_name): side = j +", "coding: utf-8 -*- from maya import mel from maya import", "deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True)", "pos='midCenterTop', fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数 mode→コピーするかペーストするか'copy'or'paste' saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定 method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。", "+ '.skn') # コピーかペーストをそれぞれ呼び出し if mode == 'copy': self.weightCopy() if", "of weight because it is not a skin mesh.', ja=u':", "'__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml',", "'_Left_', '_left_'] mid_r_list = ['_R_', '_r_', '_Right_', '_right_'] end_l_list =", "転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 =", "self.filePath = self.protect_pat+'\\\\' + self.saveName self.fileName = os.path.join(self.filePath, self.saveName +", "def_right_list_list = [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'),", "left_list_list = [] right_list_list = [] for i, save_file in", "logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse", "= transferedMesh transferedMesh = [] transferedMesh.append(temp) for dst in transferedMesh:", "save_files = [start_file, middle_file, end_file] left_list_list = [] right_list_list =", "dstSkinCluster = dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name',", "ファイルパスを生成しておく if path == 'default': self.filePath = os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\'", "object: objTypeName = cmds.objectType(skinJoint) if objTypeName == 'joint': split_name =", "skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫) transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue logTransfer→ログ表示するかどうか returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse ''' massege01 = lang.Lang( en=':", "r=True) # ウェイト情報を保存する関数 def weightCopy(self): saveData = {} # 保存ディレクトリが無かったら作成", "= dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file, end_file] left_list_list = []", "cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\\\') else: cmds.deformerWeights(xml_name, im=True,", "shapes: # もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ # スキンクラスタの有無を取得 dstSkinCluster =", "meshName = str(weightFile).replace('|', '__pipe__') if os.path.exists(self.fileName): try: with open(self.fileName, 'r')", "for skinJoint in object: objTypeName = cmds.objectType(skinJoint) if objTypeName ==", "Exception as e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i])", "ja=u'スキニングは無効になりました') .output() msg05 = lang.Lang( en='Skinning is enabled', ja=u'スキニングが有効になりました') .output()", "seved weight XML file : ' + skinMesh # ダミー親削除", "objTypeName == 'joint': split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side =", "type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName = str(weightFile).replace('|', '__pipe__')", "= meshName + '.xml' if os.path.isfile(self.filePath + '\\\\' + xml_name):", "'__pipe__') if os.path.exists(self.fileName): try: with open(self.fileName, 'r') as f: #", "None: return transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True) dstShapes = cmds.listRelatives(dstNode, s=True,", "# バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for skinJoint", "enumerate(zip(left_list_list, right_list_list)): for j, lr_list in enumerate([l_list, r_list]): for k,", "+ '.side', side) # ラベルタイプを”その他”に設定 cmds.setAttr(skinJoint + '.type', 18) new_joint_name", "if all_mesh == msg02: objects = cmds.ls(type='transform') if not objects:", "tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod =", "tr=True) # バインド dstSkinCluster = cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences,", "skinningMethod = saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences", "s=True, pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False)", "to process all of mesh in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02", "''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if srcNode", "method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」 「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest", "= json.load(f) # ロード # self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod =", "== 0: if re.match(lr, split_name): side = j + 1", "break #print 'joint setting :', split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央", "= cmds.objectType(skinJoint) if objTypeName == 'joint': split_name = skinJoint.split('|')[-1] #", "it is not a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02", "re.match(lr[::-1], split_name[::-1]): side = j + 1 if side:#対象が見つかってたら全部抜ける side_name", "barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes self.saveName", "= cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: # バインド", "dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr') maintainMaxInfluences = cmds.getAttr(srcSkinCluster +", "deformer=dstSkinCluster, path=self.filePath + '\\\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True,", "re.match(lr, split_name): side = j + 1 if i ==", "cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True )", "保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\')) # 末尾\\\\が必要なので注意 else:", "= cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh',", "if mute_flag == 0: cmds.confirmDialog(m=msg04) if mute_flag == 1: cmds.confirmDialog(m=msg05)", "= cmds.listRelatives(dst, s=True, pa=True, type='mesh') if not shapes: # もしメッシュがなかったら", "json.load(f) l_list = save_data.keys() r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except", "# スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name = '' for i,", "dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files = [start_file, middle_file, end_file] left_list_list", "end_l_list] def_right_list_list = [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join(", "q=True, inf=True) #ジョイントを取得 for skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint,", "split_name.replace(side_name.replace('.', ''), '') # スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string')", "with open(save_file, 'r') as f: save_data = json.load(f) l_list =", "as f: save_data = json.load(f) l_list = save_data.keys() r_list =", "if not srcSkinCluster: if logTransfer: print skinMesh + massege01 return", "skin = cmds.ls(cmds.listHistory(node), type='skinCluster') if not skin: continue skin_list.append(skin) if", "と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在 →barycentric、bylinearはMaya2016Extention2から利用可能 weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。 →Mayaコピー時にファイル名指定すると複数保存できないので注意。 threshold→nearest,barycentricの位置検索範囲 ''' self.skinMeshes = skinMeshes", "failed : ' + skinMesh continue else: dstSkinCluster = dstSkinCluster[0]", "lang.Lang( en='No mesh selection.\\nWould you like to process all of", "surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True ) if logTransfer: print", "False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod =", "mesh selection.\\nWould you like to process all of mesh in", "objects = set(ad_node) #print len(objects) if not objects: all_mesh =", "# スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0]", "+ self.saveName elif path == 'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1])", "file : ' + skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True)", "normalizeWeights all_influences += influences #saveData[';influences'] = influences skinFlag = True", "re class WeightCopyPaste(): def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto',", "if not dstSkinCluster: # バインド dstSkinCluster = cmds.skinCluster( dst, influences,", "'joint setting :', split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint +", "ロード # self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate =", "list): temp = transferedMesh transferedMesh = [] transferedMesh.append(temp) for dst", "<reponame>jdrese/SIWeightEditor<filename>Contents/scripts/siweighteditor/weight.py<gh_stars>1-10 # -*- coding: utf-8 -*- from maya import mel", "# 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath + '\\\\')) # 末尾\\\\が必要なので注意", "for k, lr in enumerate(lr_list): if i == 0: if", "self.weightFile == 'auto': weightFile = skinMesh else: weightFile = self.weightFile", "'\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif path == 'project': self.scene_path = '/'.join(cmds.file(q=True,", "transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可)", "+ ']' dstSkinCluster = dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster,", "#ラベリングルールをロードしておく left_list_list, right_list_list = load_joint_label_rules() # リストタイプじゃなかったらリストに変換する if not isinstance(object,", "saveData = json.load(f) # ロード # self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod", "None: return srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if srcShapes:", "list(set(all_influences) - set(influences)) if sub_influences: cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True,", "'_Left', '_left'] end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..',", "'.skn') # コピーかペーストをそれぞれ呼び出し if mode == 'copy': self.weightCopy() if mode", "does not perform the transfer of weight because it is", "type='mesh') if not shapes: # もしメッシュがなかったら continue # 処理を中断して次のオブジェクトへ #", "' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] = skinningMethod", "f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData, f) def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False,", "if not objects: all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03],", "method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print", "try: with open(save_file, 'r') as f: save_data = json.load(f) l_list", "joint_label(skinJoint, visibility=False) if symWeight is False or dstNode is None:", "in files: name, ext = os.path.splitext(file) if ext == '.xml':", "e: print e.message return self.filePath = self.protect_pat+'\\\\' + self.saveName self.fileName", "[start_l_list, mid_l_list, end_l_list] def_right_list_list = [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path", "i, save_file in enumerate(save_files): if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r')", "j + 1 if i == 2: if re.match(lr[::-1], split_name[::-1]):", "= skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster = srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster", "print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip", "= [start_l_list, mid_l_list, end_l_list] def_right_list_list = [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする", "= '' for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)): for", "= cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not srcSkinCluster: if logTransfer: print", "q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード # リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list):", "dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya': files =", "'\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight paste to : '", "if srcShapes: srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster:", "if os.path.isfile(self.filePath + '\\\\' + xml_name): if self.method == 'index'", "= [] right_list_list = [] for i, save_file in enumerate(save_files):", "if not isinstance(self.skinMeshes, list): temp = self.skinMeshes self.skinMeshes = []", "cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string') # 可視性設定 cmds.setAttr(skinJoint + '.drawLabel',", "left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def joint_label(object, visibility=False): ''' ジョイントラベル設定関数", "ext == '.xml': xml_name = file else: # Pipeはファイル名に出来ないので変換しておく meshName", "= threshold self.engine = engine self.memShapes = {} self.target =", "self.protect_pat+'\\\\' + self.saveName self.fileName = os.path.join(self.filePath, self.saveName + '.json') self.apiName", "skinMesh + massege01 return False # スキンクラスタがなかったら関数抜ける # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster", "en='Transfer bind influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す", "srcSkinCluster[0] skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm') dropoffRate = cmds.getAttr(srcSkinCluster", "= saveData[';dropoffRate'] maintainMaxInfluences = saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod =", "= maintainMaxInfluences saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] =", "right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可", "saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights = saveData[';normalizeWeights'] influences = saveData[';influences']", "import lang from . import common import os import json", "split_name, side, side_name # 左右のラベルを設定、どちらでもないときは中央 cmds.setAttr(skinJoint + '.side', side) #", "saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] = dropoffRate saveData[';maintainMaxInfluences'] = maintainMaxInfluences saveData[';maxInfluences']", "for skinMesh in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not", "= cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: meshName =", "os.path.isfile(self.filePath + '\\\\' + xml_name): if self.method == 'index' or", "forceNormalizeWeights=True) print 'Weight paste to : ' + str(skinMesh) else:", "#ロードできなかった時の初期値 start_l_list = ['L_', 'l_', 'Left_', 'left_'] start_r_list = ['R_',", "if re.search(lr, split_name): side = j + 1 if i", ") if logTransfer: print massege03 + '[' + skinMesh +", "common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh') if not", "if len(files) == 2: for file in files: name, ext", "load_joint_label_rules(): #ロードできなかった時の初期値 start_l_list = ['L_', 'l_', 'Left_', 'left_'] start_r_list =", "== '.xml': xml_name = file else: # Pipeはファイル名に出来ないので変換しておく meshName =", "weightFile = self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if", "if logTransfer: print massege02 + '[' + skinMesh + ']", "'_Right_', '_right_'] end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..',", "for skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except: pass #", "return self.filePath = self.protect_pat+'\\\\' + self.saveName self.fileName = os.path.join(self.filePath, self.saveName", "lang.Lang( en=': It does not perform the transfer of weight", "symmetry_weight(srcNode=None, dstNode=None, symWeight=True): ''' ウェイトシンメトリする関数 srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' #", "if not isinstance(object, list): temp = object object = []", "end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']", "objects: children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform') ad_node +=", "is False or dstNode is None: return transfer_weight(srcNode, dstNode, transferWeight=False,", "if ext == '.xml': xml_name = file else: # Pipeはファイル名に出来ないので変換しておく", "wt=0, ps=0) if self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile", "= dstSkinCluster[0] if transferWeight: cmds.copySkinWeights( ss=srcSkinCluster, ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint',", "i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)): for j, lr_list in", "srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったら次に移行", "ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True,", "# ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def weightCopy(self): saveData", ") if logTransfer: print massege02 + '[' + skinMesh +", "j, lr_list in enumerate([l_list, r_list]): for k, lr in enumerate(lr_list):", "= ['_L_', '_l_', '_Left_', '_left_'] mid_r_list = ['_R_', '_r_', '_Right_',", "bindMethod = cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster +", "# ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight is False or dstNode", "meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath", "cmds.objectType(skinJoint) if objTypeName == 'joint': split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定", "saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences += influences #saveData[';influences']", "= self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく", "else: dstSkinCluster = dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine ==", "ad_node = [] for node in objects: children = cmds.ls(cmds.listRelatives(node,", "リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster =", "else: weightFile = self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする", "cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def weightCopy(self): saveData = {}", "type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not dstSkinCluster: # バインド dstSkinCluster =", "file in files: name, ext = os.path.splitext(file) if ext ==", "saveData = {} # 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath +", "= save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception as e: print e.message", "> 0: mute_flag = 0 for skin in skin_list: cmds.setAttr(skin[0]+'.envelope',", "in self.skinMeshes: srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue", "i == 2: if re.match(lr[::-1], split_name[::-1]): side = j +", "'oneToOne'], normalize=True, noMirror=True ) if logTransfer: print massege02 + '['", "def joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list,", "読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile = skinMesh else: weightFile", "threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl>", "isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] # リストを渡されたときのための保険 #", "ppt=True) except: pass # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if", "elif path == 'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path =", "files = os.listdir(self.filePath) if files is not None: for file", "#ジョイントを取得 for skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if", "# -*- coding: utf-8 -*- from maya import mel from", "if symWeight is False or dstNode is None: return transfer_weight(srcNode,", "influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences = list(set(all_influences) - set(influences))", "mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません' ).output() massege02 = lang.Lang( en='Transfer the weight:',", "save_data.keys() r_list = save_data.values() left_list_list.append(l_list) right_list_list.append(r_list) except Exception as e:", "files is not None: for file in files: os.remove(self.filePath +", "you like to process all of mesh in this scene?.',", "+ skinMesh + '] >>> [' + dst + ']'", "not perform the transfer of weight because it is not", "amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5) ''' ウェイトデータの保存、読み込み関数", "= lang.Lang( en=': It does not perform the transfer of", "influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod'] = skinningMethod saveData[';dropoffRate'] =", "meshName = str(meshName).replace(':', '__colon__') xml_name = meshName + '.xml' if", "cmds.ls(sl=True, l=True) ad_node = [] for node in objects: children", "「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。 「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。 「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、 ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。 「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。 nearest と", "omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) if logTransfer: print", ").output() massege02 = lang.Lang( en='Transfer the weight:', ja=u'ウェイトを転送:' ).output() massege03", "+ ' : ' + str(objTypeName) + ' Skip Command')", "= [] for node in objects: children = cmds.ls(cmds.listRelatives(node, ad=True,", "dst in transferedMesh: #子供のノード退避用ダミーペアレントを用意 dummy = common.TemporaryReparent().main(mode='create') common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut') shapes", "ある場合は中身を削除 files = os.listdir(self.filePath) if files is not None: for", "weightPaste(self): dummy = cmds.spaceLocator() for skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名", "cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not", "mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster = dstSkinCluster[0] #", "スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0] influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences", "skinJoint in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight is", "# スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True,", "+ '.xml' if os.path.isfile(self.filePath + '\\\\' + xml_name): if self.method", "as e: print e.message print 'Error !! Skin bind failed", "from . import lang from . import common import os", "if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得", "= cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster", "saveData[';maxInfluences'] = maxInfluences saveData[';bindMethod'] = bindMethod saveData[';normalizeWeights'] = normalizeWeights all_influences", "self.visibility = saveData['visibility']#セーブデータ読み込み skinningMethod = saveData[';skinningMethod'] dropoffRate = saveData[';dropoffRate'] maintainMaxInfluences", "name, ext = os.path.splitext(file) if ext == '.xml': xml_name =", "= cmds.getAttr(srcSkinCluster + ' .mi') bindMethod = cmds.getAttr(srcSkinCluster + '", "bind influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh", "'_l_', '_Left_', '_left_'] mid_r_list = ['_R_', '_r_', '_Right_', '_right_'] end_l_list", "self.filePath = os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' + self.saveName elif path ==", "path=self.filePath + '\\\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold,", "skin_list = [] for node in objects: skin = cmds.ls(cmds.listHistory(node),", "= cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得 for skinJoint in skinJointAll: #", "skinMesh continue else: dstSkinCluster = dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if", "dummy = cmds.spaceLocator() for skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if", "= lang.Lang(en='Yes', ja=u'はい').output() msg03 = lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang(", "if objTypeName == 'joint': split_name = skinJoint.split('|')[-1] # スケルトン名にLRが含まれているかどうかを判定 side", "'/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector') try: if not os.path.exists(self.protect_path):", "as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) # ロード #", "str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') cmds.deformerWeights(meshName +", "weight XML file : ' + skinMesh # ダミー親削除 cmds.delete(dummy)", "'') # スケルトン名設定 cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string') # 可視性設定", "objTypeName = cmds.objectType(skinJoint) if objTypeName == 'joint': split_name = skinJoint.split('|')[-1]", "+ dst + ']' dstSkinCluster = dstSkinCluster[0] if transferWeight: cmds.copySkinWeights(", "# リストタイプじゃなかったらリストに変換する if not isinstance(transferedMesh, list): temp = transferedMesh transferedMesh", "= j + 1 if i == 1: if re.search(lr,", "skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya': files = os.listdir(self.filePath) print files", "スケルトン名にLRが含まれているかどうかを判定 side = 0 side_name = '' for i, (l_list,", "engine='maya', tgt=1, path='default', viewmsg=False): if viewmsg: cmds.inViewMessage( amg='<hl>Simple Weight</hl> :", "visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list =", "im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster,", "#左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file", "self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector') try: if", "tgt self.pasteMode = {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if not isinstance(self.skinMeshes,", "= dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files =", "'_l.', '_L..', '_l..', '_Left', '_left'] end_r_list = ['_R', '_r', '_R.',", "else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method, worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\\\')", "cmds.getAttr(srcSkinCluster + ' .mmi') maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')", "' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw') influences =", "side = j + 1 if side:#対象が見つかってたら全部抜ける side_name = lr", "weightFile = skinMesh else: weightFile = self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh),", "omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, ) dstSkinCluster = dstSkinCluster[0]", "type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったらfor分の次に移行 srcSkinCluster = srcSkinCluster[0]", "all_influences = list(set(all_influences)) saveData[';influences'] = all_influences #インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS for skinMesh in", "= {} self.target = tgt self.pasteMode = {'index':1, 'nearest':3} #", "Skip Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 = lang.Lang( en='No mesh", "skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag == 0: cmds.confirmDialog(m=msg04)", "#saveData[';influences'] = influences skinFlag = True all_influences = list(set(all_influences)) saveData[';influences']", "not srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく", "d=False) cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0], mirrorMode='YZ', surfaceAssociation='closestComponent', influenceAssociation='label', normalize=True) def load_joint_label_rules(): #ロードできなかった時の初期値", "tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく except Exception as e: print e.message print", "print 'Not exist seved weight XML file : ' +", "[start_file, middle_file, end_file] left_list_list = [] right_list_list = [] for", "all_influences = [] for skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True)", "self.saveName self.fileName = os.path.join(self.filePath, self.saveName + '.json') self.apiName = os.path.join(self.filePath,", "'Scripting_Files') start_file = dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json'", "= skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')", "self.engine == 'maya': files = os.listdir(self.filePath) print files if len(files)", "node in objects: children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform')", "weightFile self.threshold = threshold self.engine = engine self.memShapes = {}", "self.target = tgt self.pasteMode = {'index':1, 'nearest':3} # リストタイプじゃなかったらリストに変換する if", "== 2: for file in files: name, ext = os.path.splitext(file)", "lang from . import common import os import json import", "= self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする if not", "common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return influences else:", "if cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag = 0 for skin in", "enumerate([l_list, r_list]): for k, lr in enumerate(lr_list): if i ==", "not isinstance(transferedMesh, list): temp = transferedMesh transferedMesh = [] transferedMesh.append(temp)", "return left_list_list, right_list_list def joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。", "# バインド dstSkinCluster = cmds.skinCluster( dst, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate,", "worldSpace=True, positionTolerance=self.threshold, path=self.filePath + '\\\\') cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True) print 'Weight", "self.skinMeshes.append(temp) # ファイルパスを生成しておく if path == 'default': self.filePath = os.getenv('MAYA_APP_DIR')", "if os.path.exists(save_file):#保存ファイルが存在したら try: with open(save_file, 'r') as f: save_data =", "weight because it is not a skin mesh.', ja=u': スキンメッシュではないのでウェイトの転送を行いません'", "joint_label(object, visibility=False): ''' ジョイントラベル設定関数 object→オブジェクト、リスト形式可 visibility→ラベルの可視性、省略可能。デフォルトFalse。 ''' #ラベリングルールをロードしておく left_list_list, right_list_list", "j + 1 if side:#対象が見つかってたら全部抜ける side_name = lr break if", "saveData[';normalizeWeights'] influences = saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences", "str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__') xml_name =", "try: with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData =", "'.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\\\') with open(self.fileName, 'w') as", "+ dst + ']' #親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete')", "left_list_list.append(l_list) right_list_list.append(r_list) except Exception as e: print e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i])", "cmds.getAttr(srcSkinCluster + ' .nw') influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) saveData[';skinningMethod']", "influences:', ja=u'バインド状態を転送:' ).output() if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh =", "object = [] object.append(temp) for skinJoint in object: objTypeName =", "lw=True, ug=True, wt=0, ps=0) if self.engine == 'maya': # 読みに行くセーブファイル名を指定、autoならメッシュ名", "path == 'project': self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path,", "cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences, mi=maxInfluences, dr=dropoffRate, sm=skinningMethod, nw=normalizeWeights, tsb=True, )", "' + skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数", "= [] for skinMesh in self.skinMeshes: try: cmds.bakePartialHistory(skinMesh, ppt=True) except:", "'Weight paste to : ' + str(skinMesh) else: print 'Not", "in enumerate([l_list, r_list]): for k, lr in enumerate(lr_list): if i", "temp = self.skinMeshes self.skinMeshes = [] self.skinMeshes.append(temp) # ファイルパスを生成しておく if", "cmds.selectMode(o=True) objects = cmds.ls(sl=True, l=True) ad_node = [] for node", "dstSkinCluster = dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya':", "maya import cmds from . import lang from . import", "cmds.listConnections(skinMesh+'.inMesh', s=True, d=False) if not srcSkinCluster: if logTransfer: print skinMesh", "else: # Pipeはファイル名に出来ないので変換しておく meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName", "skinMesh # ダミー親削除 cmds.delete(dummy) cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def weightCopy(self):", "# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正 srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') # srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh',", "= skinMesh else: weightFile = self.weightFile dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')", "= skinMesh else: weightFile = self.weightFile # Pipeはファイル名に出来ないので変換しておく meshName =", "return srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh') if srcShapes: srcSkinCluster", "for skin in skin_list: cmds.setAttr(skin[0]+'.envelope', mute_flag) if mute_flag == 0:", "mode == 'paste': self.weightPaste() def weightPaste(self): dummy = cmds.spaceLocator() for", "# リストタイプじゃなかったらリストに変換する if not isinstance(object, list): temp = object object", "= saveData[';maintainMaxInfluences'] maxInfluences = saveData[';maxInfluences'] bindMethod = saveData[';bindMethod'] normalizeWeights =", "'] >>> [' + dst + ']' dstSkinCluster = dstSkinCluster[0]", "e.message left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) else: left_list_list.append(def_left_list_list[i]) right_list_list.append(def_right_list_list[i]) return left_list_list, right_list_list def", "cmds.ls(cmds.listHistory(srcNode), type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll =", "['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left'] end_r_list =", "start_file = dir_path+'/joint_rule_start.json' middle_file = dir_path+'/joint_rule_middle.json' end_file = dir_path+'/joint_rule_end.json' save_files", "list): # 転送元がリストだった場合、最初のメッシュのみ取り出す skinMesh = skinMesh[0] # リストを渡されたときのための保険 # ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正", "weightCopy(self): saveData = {} # 保存ディレクトリが無かったら作成 if not os.path.exists(self.filePath): os.makedirs(os.path.dirname(self.filePath", "= [start_r_list, mid_r_list, end_r_list] #左右対称設定ファイルからルールをロードする dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files')", "meshName = str(weightFile).replace('|', '__pipe__') # コロンはファイル名に出来ないので変換しておく meshName = str(meshName).replace(':', '__colon__')", "self.fileName = os.path.join(self.filePath, self.saveName + '.json') self.apiName = os.path.join(self.filePath, self.saveName", "cmds.getAttr(srcSkinCluster + ' .bm') normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')", "enumerate(lr_list): if i == 0: if re.match(lr, split_name): side =", "else: print(str(skinJoint) + ' : ' + str(objTypeName) + '", "continue # スキンクラスタがなかったら次に移行 tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく # スキンクラスタのパラメータ色々を取得しておく srcSkinCluster =", "skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile == 'auto': weightFile", "skinFlag = False all_influences = [] for skinMesh in self.skinMeshes:", "if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as e: print e.message", "+ '\\\\') with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード json.dump(saveData,", "#親子付けを戻す common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent') #ダミーペアレントを削除 common.TemporaryReparent().main(dummyParent=dummy, mode='delete') if returnInfluences: return influences", "skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight is False or", "コピーかペーストをそれぞれ呼び出し if mode == 'copy': self.weightCopy() if mode == 'paste':", "all_influences += influences #saveData[';influences'] = influences skinFlag = True all_influences", "'.json') self.apiName = os.path.join(self.filePath, self.saveName + '.skn') # コピーかペーストをそれぞれ呼び出し if", "= os.listdir(self.filePath) if files is not None: for file in", "'_Right', '_right'] def_left_list_list = [start_l_list, mid_l_list, end_l_list] def_right_list_list = [start_r_list,", "cmds.select(self.skinMeshes, r=True) # ウェイト情報を保存する関数 def weightCopy(self): saveData = {} #", "type='skinCluster') # スキンクラスタがあったらジョイントラベルを設定してウェイトミラー if srcSkinCluster: # バインド状態を転送する関数呼び出し skinJointAll = cmds.skinCluster(srcSkinCluster,", "sceneName=True).split('/')[:-1]) self.protect_path = os.path.join(self.scene_path, 'weight_protector') try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path)", "saveData[';influences'] # 子のノードがトランスフォームならダミーに親子付けして退避 common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut') influences = cmds.ls(influences, l=True,", "self.skinMeshes = [] self.skinMeshes.append(temp) # ファイルパスを生成しておく if path == 'default':", "'_R.', '_r.', '_R..', '_r..', '_Right', '_right'] def_left_list_list = [start_l_list, mid_l_list,", "mesh in this scene?.', ja=u'選択メッシュがありません。\\nシーン内のすべてのメッシュを処理しますか?').output() msg02 = lang.Lang(en='Yes', ja=u'はい').output() msg03", "meshName + '.xml' if os.path.isfile(self.filePath + '\\\\' + xml_name): if", "'_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left'] end_r_list = ['_R',", "msg02: objects = cmds.ls(type='transform') if not objects: return mute_flag =", "Command') #ウェイトのミュートをトグル def toggle_mute_skinning(): msg01 = lang.Lang( en='No mesh selection.\\nWould", "= cmds.skinCluster(srcSkinCluster, q=True, inf=True) sub_influences = list(set(all_influences) - set(influences)) if", "perform the transfer of weight because it is not a", "+ '] >>> [' + dst + ']' dstSkinCluster =", "method=self.method, deformer=dstSkinCluster, path=self.filePath + '\\\\') else: cmds.deformerWeights(xml_name, im=True, deformer=dstSkinCluster, method=self.method,", "'weight_protector') try: if not os.path.exists(self.protect_path): os.makedirs(self.protect_path) except Exception as e:", "msg04 = lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました') .output() msg05 =", "= [] object.append(temp) for skinJoint in object: objTypeName = cmds.objectType(skinJoint)", "split_name): side = j + 1 if i == 1:", "print 'Weight paste to : ' + str(skinMesh) else: print", "os.listdir(self.filePath) print files if len(files) == 2: for file in", "e: print e.message print 'Error !! Skin bind failed :", "= lang.Lang(en='No', ja=u'いいえ').output() msg04 = lang.Lang( en='Skinning is disabled', ja=u'スキニングは無効になりました')", "continue else: dstSkinCluster = dstSkinCluster[0] tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine", ": ' + str(skinMesh) else: print 'Not exist seved weight", "r_list]): for k, lr in enumerate(lr_list): if i == 0:", "l=True, tr=True) # バインド dstSkinCluster = cmds.skinCluster( skinMesh, influences, omi=maintainMaxInfluences,", "def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数", "''' self.skinMeshes = skinMeshes self.saveName = saveName self.method = method", "len(objects) if not objects: all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02,", "side: break if side: break #print 'joint setting :', split_name,", "= False all_influences = [] for skinMesh in self.skinMeshes: try:", "or self.method == 'over': cmds.deformerWeights(xml_name, im=True, method=self.method, deformer=dstSkinCluster, path=self.filePath +", "json import re class WeightCopyPaste(): def main(self, skinMeshes, mode='copy', saveName='default',", "mid_r_list = ['_R_', '_r_', '_Right_', '_right_'] end_l_list = ['_L', '_l',", "if self.weightFile == 'auto': weightFile = skinMesh else: weightFile =", "'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード saveData = json.load(f) # ロード", "dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh') dstSkinCluster = cmds.listConnections(dstShapes[0] +", "in skinJointAll: # ジョイントラベル設定関数呼び出し joint_label(skinJoint, visibility=False) if symWeight is False", "= cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform') ad_node += [node]+children #print", "if path == 'default': self.filePath = os.getenv('MAYA_APP_DIR') + '\\\\Scripting_Files\\\\weight\\\\' +", "transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True): ''' スキンウェイトの転送関数 転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド ・引数 skinMesh→転送元メッシュ(1個,リスト形式でも可) transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫)", "= cmds.ls(influences, l=True, tr=True) # バインド dstSkinCluster = cmds.skinCluster( skinMesh,", "srcNode→反転元 dstNode→反転先 symWeight→ウェイトミラーするかどうか ''' # スキンクラスタを取得 if srcNode is None:", "cmds.ls(cmds.listHistory(skinMesh), type='skinCluster') if not srcSkinCluster: continue # スキンクラスタがなかったら次に移行 tempSkinNode =", "massege02 + '[' + skinMesh + '] >>> [' +", "not objects: return mute_flag = 1 skin_list = [] for", "ds=dstSkinCluster, surfaceAssociation='closestPoint', influenceAssociation=['name', 'closestJoint', 'oneToOne'], normalize=True, noMirror=True ) if logTransfer:", "cmds.getAttr(skin[0]+'.envelope') > 0: mute_flag = 0 for skin in skin_list:", "= cmds.spaceLocator() for skinMesh in self.skinMeshes: # 読みに行くセーブファイル名を指定、autoならメッシュ名 if self.weightFile", "Exception as e: print e.message print 'Error !! Skin bind", "tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく if self.engine == 'maya': files = os.listdir(self.filePath)" ]
[ "package where I make use of graphs to indicate relationships", "the other version of pyConText where each object has a", "the targets. This provides for much simpler code than what", "either express or implied. #See the License for the specific", "under the Apache License, Version 2.0 (the \"License\"); #you may", "what exists in the other version of pyConText where each", "of the pyConText package where I make use of graphs", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless", "or agreed to in writing, software #distributed under the License", "targets. This provides for much simpler code than what exists", "the pyConText package where I make use of graphs to", "os version = {} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as f0: exec(f0.read(), version)", "and modifiers identified in the text; edges of the graphs", "between the targets. This provides for much simpler code than", "structures as we could chain together items\"\"\" import os version", "by applicable law or agreed to in writing, software #distributed", "version of pyConText where each object has a dictionary of", "for the specific language governing permissions and #limitations under the", "a directional graph could ultimately simplify our itemData structures as", "kept in sync with each other. Also it is hoped", "Also it is hoped that the use of a directional", "License for the specific language governing permissions and #limitations under", "law or agreed to in writing, software #distributed under the", "in sync with each other. Also it is hoped that", "This provides for much simpler code than what exists in", "of pyConText where each object has a dictionary of __modifies", "in compliance with the License. #You may obtain a copy", "that must be kept in sync with each other. Also", "each other. Also it is hoped that the use of", "not use this file except in compliance with the License.", "exists in the other version of pyConText where each object", "other version of pyConText where each object has a dictionary", "sync with each other. Also it is hoped that the", "together items\"\"\" import os version = {} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as", "I make use of graphs to indicate relationships between targets", "itemData structures as we could chain together items\"\"\" import os", "<filename>pyConTextNLP/__init__.py<gh_stars>1-10 #Copyright 2010 <NAME> # #Licensed under the Apache License,", "hoped that the use of a directional graph could ultimately", "of a directional graph could ultimately simplify our itemData structures", "with the License. #You may obtain a copy of the", "# #Unless required by applicable law or agreed to in", "edges of the graphs are relationships between the targets. This", "of graphs to indicate relationships between targets and modifiers. Nodes", "OR CONDITIONS OF ANY KIND, either express or implied. #See", "License. \"\"\"This is an alternative implementation of the pyConText package", "items\"\"\" import os version = {} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as f0:", "2.0 (the \"License\"); #you may not use this file except", "pyConText where each object has a dictionary of __modifies and", "(the \"License\"); #you may not use this file except in", "ANY KIND, either express or implied. #See the License for", "or implied. #See the License for the specific language governing", "governing permissions and #limitations under the License. \"\"\"This is an", "the targets and modifiers identified in the text; edges of", "each object has a dictionary of __modifies and __modifiedby that", "alternative implementation of the pyConText package where I make use", "we could chain together items\"\"\" import os version = {}", "in the text; edges of the graphs are relationships between", "text; edges of the graphs are relationships between the targets.", "simplify our itemData structures as we could chain together items\"\"\"", "#You may obtain a copy of the License at #", "indicate relationships between targets and modifiers. Nodes of thegraphs are", "the License for the specific language governing permissions and #limitations", "modifiers. Nodes of thegraphs are the targets and modifiers identified", "our itemData structures as we could chain together items\"\"\" import", "\"\"\"This is an alternative implementation of the pyConText package where", "under the License is distributed on an \"AS IS\" BASIS,", "targets and modifiers identified in the text; edges of the", "much simpler code than what exists in the other version", "identified in the text; edges of the graphs are relationships", "could ultimately simplify our itemData structures as we could chain", "simpler code than what exists in the other version of", "as we could chain together items\"\"\" import os version =", "and modifiers. Nodes of thegraphs are the targets and modifiers", "KIND, either express or implied. #See the License for the", "where each object has a dictionary of __modifies and __modifiedby", "#See the License for the specific language governing permissions and", "that the use of a directional graph could ultimately simplify", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required", "in writing, software #distributed under the License is distributed on", "# http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed", "of thegraphs are the targets and modifiers identified in the", "could chain together items\"\"\" import os version = {} with", "distributed on an \"AS IS\" BASIS, #WITHOUT WARRANTIES OR CONDITIONS", "the Apache License, Version 2.0 (the \"License\"); #you may not", "language governing permissions and #limitations under the License. \"\"\"This is", "must be kept in sync with each other. Also it", "{} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as f0: exec(f0.read(), version) __version__ = version['__version__']", "and __modifiedby that must be kept in sync with each", "graphs to indicate relationships between targets and modifiers. Nodes of", "are the targets and modifiers identified in the text; edges", "is an alternative implementation of the pyConText package where I", "in the other version of pyConText where each object has", "except in compliance with the License. #You may obtain a", "writing, software #distributed under the License is distributed on an", "pyConText package where I make use of graphs to indicate", "file except in compliance with the License. #You may obtain", "it is hoped that the use of a directional graph", "thegraphs are the targets and modifiers identified in the text;", "to in writing, software #distributed under the License is distributed", "BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable", "#Unless required by applicable law or agreed to in writing,", "to indicate relationships between targets and modifiers. Nodes of thegraphs", "and #limitations under the License. \"\"\"This is an alternative implementation", "code than what exists in the other version of pyConText", "2010 <NAME> # #Licensed under the Apache License, Version 2.0", "use this file except in compliance with the License. #You", "express or implied. #See the License for the specific language", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "use of graphs to indicate relationships between targets and modifiers.", "implied. #See the License for the specific language governing permissions", "an \"AS IS\" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY", "provides for much simpler code than what exists in the", "__modifies and __modifiedby that must be kept in sync with", "# #Licensed under the Apache License, Version 2.0 (the \"License\");", "the License. \"\"\"This is an alternative implementation of the pyConText", "is hoped that the use of a directional graph could", "has a dictionary of __modifies and __modifiedby that must be", "License. #You may obtain a copy of the License at", "OF ANY KIND, either express or implied. #See the License", "#limitations under the License. \"\"\"This is an alternative implementation of", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by", "graphs are relationships between the targets. This provides for much", "specific language governing permissions and #limitations under the License. \"\"\"This", "<NAME> # #Licensed under the Apache License, Version 2.0 (the", "version = {} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as f0: exec(f0.read(), version) __version__", "Version 2.0 (the \"License\"); #you may not use this file", "is distributed on an \"AS IS\" BASIS, #WITHOUT WARRANTIES OR", "on an \"AS IS\" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF", "Nodes of thegraphs are the targets and modifiers identified in", "between targets and modifiers. Nodes of thegraphs are the targets", "for much simpler code than what exists in the other", "# # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or", "implementation of the pyConText package where I make use of", "#distributed under the License is distributed on an \"AS IS\"", "targets and modifiers. Nodes of thegraphs are the targets and", "relationships between the targets. This provides for much simpler code", "other. Also it is hoped that the use of a", "graph could ultimately simplify our itemData structures as we could", "the graphs are relationships between the targets. This provides for", "the License. #You may obtain a copy of the License", "be kept in sync with each other. Also it is", "use of a directional graph could ultimately simplify our itemData", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "applicable law or agreed to in writing, software #distributed under", "#Licensed under the Apache License, Version 2.0 (the \"License\"); #you", "may obtain a copy of the License at # #", "chain together items\"\"\" import os version = {} with open(os.path.join(os.path.dirname(__file__),\"version.py\"))", "this file except in compliance with the License. #You may", "compliance with the License. #You may obtain a copy of", "under the License. \"\"\"This is an alternative implementation of the", "than what exists in the other version of pyConText where", "\"AS IS\" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "a dictionary of __modifies and __modifiedby that must be kept", "are relationships between the targets. This provides for much simpler", "IS\" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "object has a dictionary of __modifies and __modifiedby that must", "License, Version 2.0 (the \"License\"); #you may not use this", "dictionary of __modifies and __modifiedby that must be kept in", "of __modifies and __modifiedby that must be kept in sync", "make use of graphs to indicate relationships between targets and", "agreed to in writing, software #distributed under the License is", "permissions and #limitations under the License. \"\"\"This is an alternative", "import os version = {} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as f0: exec(f0.read(),", "CONDITIONS OF ANY KIND, either express or implied. #See the", "modifiers identified in the text; edges of the graphs are", "#you may not use this file except in compliance with", "License is distributed on an \"AS IS\" BASIS, #WITHOUT WARRANTIES", "the specific language governing permissions and #limitations under the License.", "an alternative implementation of the pyConText package where I make", "__modifiedby that must be kept in sync with each other.", "may not use this file except in compliance with the", "\"License\"); #you may not use this file except in compliance", "required by applicable law or agreed to in writing, software", "the text; edges of the graphs are relationships between the", "http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to", "ultimately simplify our itemData structures as we could chain together", "#Copyright 2010 <NAME> # #Licensed under the Apache License, Version", "#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "where I make use of graphs to indicate relationships between", "Apache License, Version 2.0 (the \"License\"); #you may not use", "= {} with open(os.path.join(os.path.dirname(__file__),\"version.py\")) as f0: exec(f0.read(), version) __version__ =", "directional graph could ultimately simplify our itemData structures as we", "at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law", "relationships between targets and modifiers. Nodes of thegraphs are the", "the use of a directional graph could ultimately simplify our", "with each other. Also it is hoped that the use", "software #distributed under the License is distributed on an \"AS", "of the graphs are relationships between the targets. This provides", "the License is distributed on an \"AS IS\" BASIS, #WITHOUT" ]
[ "_cfg = {} for setup in cfg.keys(): _cfg[setup] = {}", "to include in the table. The type can be anything", "that the matching meta values are not # unique for", "from one and only one instrument ' 'configuration with a", "self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None): \"\"\" Return the", "unique configurations, but the frame types have not been set", "Set the default if requested and 'calib' doesn't exist yet", "at the heart of PypeItMetaData. Args: files (:obj:`str`, :obj:`list`): One", "+= 'Continuing, but the following frames may be empty or", "provides the configurations themselves. This is mostly a convenience function", "If None, this is set by :func:`unique_configurations`. force (:obj:`bool`, optional):", "# Get the list of frames of this type without", "'calibbit' not in self.keys(): msgs.error('Calibration groups are not set. First", "Ignore calibration groups in the provided list. Raises: PypeItError: Raised", "self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files assumed to be from a", "option. Args: flag_unknown (:obj:`bool`, optional): Instead of crashing out if", "'setup' or 'calibbit' columns haven't been defined. \"\"\" if 'setup'", "One or more 0-indexed rows in the table with the", "dictionary with the types designated by the user. The file", "np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close() # TODO:", "# undefined ('None') configurations #setup = np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist()", "single calibration group, if the 'calib' column does not exist", "msgs.warn('Ignoring {0} frames with configuration set to None.'.format( np.sum(ignore))) self.configs", "= self.construct_obstime(row) if obstime is None else obstime tiso =", "to that group. ngroups = 0 for i in range(len(self)):", "configuration. The metadata keywords in the dictionary should be the", "latter is provided. \"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx):", "setup = np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup: setup.remove('None') # Make", "{0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found", "framebit, setup, calib, and calibbit. sort_col (:obj:`str`, optional): Name of", "rows (`numpy.ndarray`_, optional): A boolean vector selecting the rows of", "internal table so that it is unaltered output_tbl = self.table.copy()", "The list of unique setup names. A second returned object", "frames cannot be assigned to a configuration, the spectrograph defined", "msgs.warn('When setting the instrument configuration for {0} '.format(ftype) + 'frames,", "setup lines setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get", "in _cfg.keys(): del _cfg['None'] return _cfg def unique_configurations(self, force=False, copy=False,", "the provided row. The master key is the combination of", "The configuration ID is the same as included in the", "strict=strict, usrdata=usrdata)) # Merge with user data, if present if", "open(ofile, 'w') for setup in cfgs.keys(): # Get the subtable", "the calibration group of each frame; see :attr:`calib_bitmask`. Args: global_frames", "If it can't it will just add the column anyway,", "key in the provided table. \"\"\" meta_data_model = meta.get_meta_data_model() #", "science frames are associated with one calibration group. TODO: Is", "of calibration groups.\"\"\" return None if self.calib_bitmask is None else", "= self.type_bitmask.flagged(type_bits, flag='standard') for b, f, ra, dec in zip(type_bits[indx],", "set of configuration keys # meaning that the instrument setup", "pypeit_setup. If the user edits back in a frame that", "to be used in the fits table> \"\"\" # Columns", "usrdata[key][nones] = None # Rest # Allow for str RA,", "d, cfg in _configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] =", "input configuration Args: row (astropy.table.Row): From fitstbl config (dict): Defines", "indx if return_index else setups def _get_cfgs(self, copy=False, rm_none=False): \"\"\"", "The metadata keywords in the dictionary should be the same", "# Select frames indx = self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is", "set_configurations.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists. Use", "the table and specify any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt", "sets all the configurations to the provided `setup` - assigns", "meaning that the instrument setup has only one configuration. if", "and darks) if global_frames is not None: if 'frametype' not", "self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard') # Find the files", "{k:self.table[k][indx] for k in _cfg_keys} def master_key(self, row, det=1): \"\"\"", "name of an ascii file to which to write the", "frame. obstime (:class:`astropy.time.Time`, optional): The MJD of the observation. If", "cfg_indx += 1 # Check if any of the other", "an output file. Returns: `astropy.table.Table`: The table object that would", "'manual'], [int, int, str]) # Initialize internal attributes self.configs =", "'usrdata argument of instantiation of PypeItMetaData.') usr_row = usrdata[idx] #", "output=None, rows=None, columns=None, sort_col=None, overwrite=False, header=None): \"\"\" Write the metadata", "'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, # PypeIt orientation binning of a", "TypeError('Must provide an astropy.io.table.Table instance.') if 'filename' not in usrdata.keys():", "(backwards compatability) if key in ['ra', 'dec'] and not radec_done:", "None, any row of the specified frame type is included.", "specified frame type is included. index (:obj:`bool`, optional): Return an", "types to use in all calibration groups (e.g., ['bias', 'dark']).", "__getitem__(self, item): return self.table.__getitem__(item) def __setitem__(self, item, value): return self.table.__setitem__(item,", "correctly ordered srt = [np.where(f == self.table['filename'])[0][0] for f in", "given frame type. The frames must also match the science", "pypeit file) if 'calib' in self.keys() and 'calibbit' not in", "level of the metadata table, are directory, filename, frametype, framebit,", "row matches the input configuration \"\"\" # Loop on keys", "None] # Print status message msg = 'Time invalid for", "one instrument ' 'configuration with a valid letter identifier; i.e.,", "unique configurations.') uniq, indx = np.unique(self['setup'], return_index=True) ignore = uniq", "a calib file? def write_calib(self, ofile, overwrite=True, ignore=None): \"\"\" Write", "= np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files = filenames[mjd == None]", "the integers are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self):", "self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys()) tbl_cols = columns if isinstance(columns,", "and calibbit. sort_col (:obj:`str`, optional): Name of the column to", "ignore anything listed in # the ignore_frames indx = np.arange(len(self))", "Initialize internal attributes self.configs = None self.calib_bitmask = None #", "copy is True, this is done *after* :attr:`configs` is copied", "that have been ignored in the determination of the unique", "\"\"\" Write the metadata either to a file or to", "column. This is used to set the internal :attr:`configs`. If", "attribute is not None, this function simply returns :attr:`config` (cf.", "# TODO: Add in a call to clean_configurations? I didn't", "dict_to_lines from pypeit.par import PypeItPar from pypeit.par.util import make_pypeit_file from", "type of the `usrdata` column to the existing data type.", "keywords. Also raised when some frames cannot be assigned to", "cfg_keys return {k:self.table[k][indx] for k in _cfg_keys} def master_key(self, row,", "\"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row, det=None, config_only=False):", "and not overwrite: raise FileExistsError(f'{ofile} already exists; set flag to", "to current directory. If the output directory does not exist,", "the PypeItMetaData object # will return an astropy.table.Table, not a", "or 'dec' not in self.keys(): msgs.warn('Cannot associate standard with science", "ofiles def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False, header=None): \"\"\"", "get_configuration_names(self, ignore=None, return_index=False, configs=None): \"\"\" Get the list of the", "to use in the data reduction. \"\"\" def __init__(self, spectrograph,", "not isinstance(usrdata, table.Table): raise TypeError('Must provide an astropy.io.table.Table instance.') if", "for str_j in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) + double_alphabet cfg_indx", "== setup if not np.any(indx): continue subtbl = self.table[output_cols][indx] #", "self.table[key].dtype # Deal with None's properly nones = usrdata[key] ==", "upper-case letters: A, B, C, etc. double_alphabet = [str_i +", "use cfg_keys = self.spectrograph.configuration_keys() # Configuration identifiers are iterations through", "'calib' doesn't exist yet if 'calib' not in self.keys() and", "optional): If all of 'comb_id' values are less than 0", "io import string from copy import deepcopy import datetime from", "only one instrument ' 'configuration with a valid letter identifier;", "to the screen. The method allows you to set the", "Initialize internals self.spectrograph = spectrograph self.par = par if not", "msgs.warn(msg) # And remove 'em self.table = self.table[good] def _set_calib_group_bits(self):", "must also be matched to the relevant science frame. Args:", ".. note:: This should only be run if all files", "user-provided dictionary does not match table length.') msgs.info('Using user-provided frame", "instrument. Args: output_path (:obj:`str`, optional): Root path for the output", "calibration group is the same as the calibration bit number,", "def set_combination_groups(self, assign_objects=True): \"\"\" Set combination groups. .. note:: :attr:`table`", "Returns: :obj:`list`: List of ``PypeIt`` files generated. \"\"\" # Set", "'calib' column is not present, set a single calibration group", "to 'None'. If copy is True, this is done *after*", "save to each file. The class is used to provide", "etc. double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase", "a given calibration group. .. todo:: - This is for", "the returned objects. If ``'all'``, pass back all configurations. Otherwise,", "not set. First run get_frame_types.') if ftype == 'None': return", "= np.arange(self.spectrograph.ndet)+1 if det is None else [det] #for d", "in a call to clean_configurations? I didn't add it #", "unique setup names. A second returned object provides the indices", "t = table.Table([ftype_colm, fbits_colm]) if merge: self['frametype'] = t['frametype'] self['framebit']", "index=False): \"\"\" Find the rows with the associated frame type.", "the table for key in usrdata.keys(): self.table[key] = usrdata[key][srt] def", "include in the table. data (table-like, optional): The data to", "files show a different # configuration. for i in indx[1:]:", "and interface to the relevant fits file metadata used during", "metakey is either not set or a string assert metakey", "None, the table contents are printed to the screen. If", "Args: write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for calib,", "the frames to this (first) configuration self.table['setup'][indx] = cfg_key continue", "Ignores other inputs. Raises: PypeItError: Raised if none of the", "np.where(indx)[0] if index else indx def find_frame_files(self, ftype, calib_ID=None): \"\"\"", "object. def __getitem__(self, item): return self.table.__getitem__(item) def __setitem__(self, item, value):", "b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard') # Find", "table data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx] =", "'none' if 'slitlen' not in self.keys() else self['slitlen'][row] binning =", "defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot write sorted", "or both? if 'ra' not in self.keys() or 'dec' not", "Queries whether a row from the fitstbl matches the input", "the # correct type... if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm =", "this here? indx = self.type_bitmask.flagged(type_bits, flag='standard') for b, f, ra,", "# Include the user data in the table for key", "this group #in_group = self.find_calib_group(i) in_cbit = self['calibbit'] == cbit", "is_None = np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt]", "Consolidate with :func:`convert_time` ? Args: row (:obj:`int`): The 0-indexed row", "'missed by the automatic identification.') b = self.type_bitmask.turn_off(b, flag='standard') continue", "msgs.info('Using user-provided frame types.') for ifile,ftypes in user.items(): indx =", "on the string values of the 'calib' column. \"\"\" #", ":func:`unique_configurations` because the latter determines and provides the configurations themselves.", "only used when building the metadata from the fits files.", "provided list. write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for", "indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx): msgs.warn('Found frames with", "\"\"\" if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for", "standard with science frames without sky coordinates.') else: # TODO:", "columns, types): \"\"\" Impose a set of types on certain", "if files is None else self._build(files, strict=strict, usrdata=usrdata)) # Merge", "configurations are not set not_setup = self.table['setup'] == 'None' if", "the configurations matched to this provided string or list of", "not in self.keys(): self['manual'] = '' def write_sorted(self, ofile, overwrite=True,", "'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys()) tbl_cols =", "metadata keywords. Also raised when some frames cannot be assigned", "the rows input if rows is not None and len(rows)", "np.ones(len(self), dtype=bool) for ftype in ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx", "of keys to use cfg_keys = self.spectrograph.configuration_keys() # Configuration identifiers", "*after* :attr:`configs` is copied to a new dictionary. Returns: :obj:`dict`:", "keywords set by config_indpendent_frames are not ' \\ 'correctly defined", "run after the ``'setup'`` column has been set, this simply", "as a string with comma-separated types. setup (:obj:`str`): If the", "file provides the unique instrument configurations (setups) and the association", "science frame. \"\"\" if 'comb_id' not in self.keys(): self['comb_id'] =", "for i in indx: self['calib'][i] = calibs # Set the", "provided if rows is not None: output_tbl = output_tbl[rows] #", "is used to set the internal :attr:`configs`. If this attribute", "A, B, C, etc. double_alphabet = [str_i + str_j for", "configuration. .. todo:: - Maintain a detailed description of the", "get_configuration(self, indx, cfg_keys=None): \"\"\" Return the configuration dictionary for a", "is returned. Otherwise, the string is interpreted as the name", "the # metakey is either not set or a string", "we need to specify 'all' here? Can't `configs is #", "the internal table so that it is unaltered output_tbl =", "bits have not (likely because the # data was read", "= _cfg # Iterate through the calibration bit names as", "calibration # group based on the configuration. This will change!", "the exiting table. Returns: :obj:`astropy.table.Table`: A Table with two columns,", "= None # Rest # Allow for str RA, DEC", "group bits. table (:class:`astropy.table.Table`): The table with the relevant metadata", "validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the", "we need a calib file? def write_calib(self, ofile, overwrite=True, ignore=None):", "is created. cfg_lines (:obj:`list`, optional): The list of configuration lines", "else self['decker'][row] slitwid = 'none' if 'slitwid' not in self.keys()", "None and 'all' not in _configs: use = np.isin(setups, _configs)", "constructed using :func:`construct_obstime`. Returns: str: The root name for file", "columns to include in the output file. Can be provided", "*should not* be called by any method outside of this", "os.makedirs(odir) # Create the output file name ofiles[j] = os.path.join(odir,", "Skip this group if ignore is not None and cbit", "the calibration groups associated with a specific frame. \"\"\" return", "been defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot write", "list of keys to use cfg_keys = self.spectrograph.configuration_keys() # Configuration", "with the relevant metadata for each fits file to use", "'setup' column does not exist, fill the configuration setup columns", "file exists. \"\"\" # Check the file can be written", "group(s), and detector. Raises: PypeItError: Raised if the 'setup' or", "not exist, this sets the combination groups to be either", "for QA now; but could probably use the pypeit file", "'%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row, det=None, config_only=False): \"\"\" Construct the", "See above. Raises: TypeError: Raised if `usrdata` is not an", "matches the input configuration \"\"\" # Loop on keys in", "Raised if 'setup' column is not defined, or if `global_frames`", "for configuration keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits =", "h in _header: f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') # Just", "strict=True): if data is None and files is None: #", "if 'None' in configs: configs.remove('None') # Ignore frames with undefined", "bad_files: msg += ' {0}\\n'.format(file) msgs.warn(msg) # Return return data", "a given frame. This is not the same as the", "step by setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A user provided", "msgs.error('To account for ignored frames, types must have been defined;", "calib, and calibbit. sort_col (:obj:`str`, optional): Name of the column", "== len(self.configs) if unique: if cfg_indx == len(cfg_iter): msgs.error('Cannot assign", "setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get the paths", "parse from pypeit.core import meta from pypeit.io import dict_to_lines from", "file. calib_bitmask (:class:`BitMask`): The bitmask used to keep track of", "setup. det (:obj:`int`, optional): The 1-indexed detector to include. If", "exists. Raises: PypeItError: Raised if 'setup' column is not defined,", "write the table contents. rows (`numpy.ndarray`_, optional): A boolean vector", "frames to meet the other checks in this call. #", "are printed to the screen. If ``'table'``, the table that", "def get_frame_types(self, flag_unknown=False, user=None, merge=True): \"\"\" Generate a table of", "invalid key, at least for now the DEIMOS image reader", "'setup' not in self.keys(): msgs.error('Must have defined \\'setup\\' column first;", "type of each file. The metadata is validated using checks", "if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames can only be assigned", "already exists, the configurations are **not** reset unless you call", "nested dictionary, one dictionary per configuration with the associated metadata", "be redetermined. Otherwise the configurations are only determined if :attr:`configs`", "frames in the table included in the selected calibration group.", "# Select the output rows if a vector was provided", "# Always write the table in ascii format with io.StringIO()", "(:obj:`astropy.table.Table`): A user provided set of data used to supplement", "\"\"\" if 'setup' not in self.keys(): msgs.error('Cannot provide instrument setup", "configuration, the calibration group, and the detector. The configuration ID", "the relevant follow-on code so that we # don't have", "indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't identify the following files:\")", "self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first file to set the", "the table to write. If None, all rows are written.", "the input configuration Args: row (astropy.table.Row): From fitstbl config (dict):", "present if usrdata is not None: self.merge(usrdata) # Impose types", "if any of the other files show a different #", "# Set output path if output_path is None: output_path =", "np.atleast_1d(configs) # TODO: Why do we need to specify 'all'", "Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the rtol", "the instantiation of PypeItMetaData.' ' The table will be empty!')", "sorting is not valid. FileExistsError: Raised if overwrite is False", "is the desired behavior since if there are # empty", "existing frame types are overwitten by the provided type. \"\"\"", "use for sorting the output. If None, the table is", "the user-defined frame types from the input dictionary if user", "file? def write_calib(self, ofile, overwrite=True, ignore=None): \"\"\" Write the calib", "based on the provided object - sets all the configurations", "want all the configurations? Or can we # make the", ":attr:`config` (cf. ``force``). .. warning:: Any frame types returned by", "generated. \"\"\" # Set output path if output_path is None:", "(dict): Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the", "of column names types (:obj:`list`): List of types \"\"\" for", "constructs the configuration dictionary using the unique configurations in that", "calibration groups in the provided list. Raises: PypeItError: Raised if", "will be empty msgs.warn('Both data and files are None in", "dict: A dictionary with the metadata values from the selected", "(this is here because the spectrograph # needs to be", "file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n') mjd", "in usrdata.keys(): raise KeyError('The user-provided table must have \\'filename\\' column!')", "to match the data type of the `usrdata` column to", "have configurations that cannot be reduced by PypeIt' \\ '", "'comb_id' values are less than 0 (meaning they're unassigned), the", "listed standard, # then it is probably a standard star", "provided object - sets all the configurations to the provided", "or if the column to use for sorting is not", "for the writing routines. Args: ignore (:obj:`list`, optional): Ignore configurations", "TODO: This should be converted to an assert statement... raise", "if rows is not None and len(rows) != len(self.table): raise", "the items in the metadata table listed by the spectrograph", "internals self.spectrograph = spectrograph self.par = par if not isinstance(self.par,", "from the same configuration to the same # calibration group;", "frames with the correct ID name or start by #", "new dictionary. Returns: :obj:`dict`: A nested dictionary, one dictionary per", "self.find_frames('science') for i in range(len(self)): if not is_science[i]: continue if", "number(s). If a tuple, it must include detectors designated as", "(likely because the # data was read from a pypeit", "table, one should typically provide either the file list from", "that would have been written/printed if ``output == 'table'``. Otherwise,", "cfg_keys = self.spectrograph.configuration_keys() # Configuration identifiers are iterations through the", "match the existing number of table rows. merge (:obj:`bool`, optional):", "the set of objects (science or standard frames) to a", "frames) to a unique integer. If the 'comb_id' or 'bkg_id'", "self.keys(): msgs.error('Cannot provide instrument setup without \\'setup\\' column; ' 'run", "to include in the file. If None are provided, the", "sorted_files=data_lines, paths=paths) # Return return ofiles def write(self, output=None, rows=None,", "or 'framebit' in self.keys(): msgs.warn('Removing existing frametype and framebit columns.')", "warning and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to", "config_indpendent_frames are not ' \\ 'correctly defined for {0}; values", "If the 'setup' column already exists, the configurations are **not**", "to write if configs is None or configs == 'all'", "else [det] #for d in _det: # setup[skey][str(d).zfill(2)] \\ #", "del self['calibbit'] # Groups have already been set if 'calib'", "= open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False,", "was read from a pypeit file) if 'calib' in self.keys()", "None # Rest # Allow for str RA, DEC (backwards", "self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i) #", "extraction Raises: PypeItError: Raised if the 'setup' isn't been defined.", "= self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the nearest standard star to", "unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): \"\"\" Check that", "= time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\"", "to write. If None, all rows are written. Shape must", "not set in the table. \"\"\" if 'framebit' not in", "# TODO: Is there a reason why this is not", "the # upper-case letters: A, B, C, etc. double_alphabet =", "string assert metakey is None or isinstance(metakey, str), \\ 'CODING", "and files are None in the instantiation of PypeItMetaData.' '", "isort = np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close()", "get_frame_types(self, flag_unknown=False, user=None, merge=True): \"\"\" Generate a table of frame", "bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] = 0 # Set the", "is not present, set a single calibration group *for all", "of # pypeit_setup. If the user edits back in a", "is complicated by allowing some frame types to have no", "instantiated without any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used", "will fault if :func:`fits.getheader` fails to read any of the", "str: The root name for file output. \"\"\" _obstime =", "if there were corrupt header cards mjd[mjd == None] =", "import dict_to_lines from pypeit.par import PypeItPar from pypeit.par.util import make_pypeit_file", "at the beginning of the list for col in ['framebit',", "unique instrument configurations (setups) and the association of each frame", "b = self.type_bitmask.turn_off(b, flag='standard') continue # If an object exists", "# Some frame types may have been ignored ignore_frames =", "the configuration setup columns with this single identifier. Ignores other", "stop # producing/using the *.calib file. _cfg = {} for", "the data from the fits headers or the data directly.", "types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't identify the following", "this into a DataContainer # Initially tried to subclass this", "in the file. If None are provided, the vanilla configuration", "False to instead report a warning and continue. Attributes: spectrograph", "to this (first) configuration self.table['setup'][indx] = cfg_key continue # Find", "check should be done elsewhere # Check if os.path.basename(ifile) !=", "all_cols for col in tbl_cols] if np.any(badcol): raise ValueError('The following", "there is a problem with the reading the header for", "frames may be empty or have corrupt headers:\\n' for file", "len(configs) # TODO: Science frames can only have one calibration", "only be run if all files are from a single", "in a frame that has an # invalid key, at", "type. If the index is provided, the frames must also", "by the provided type. \"\"\" if not append: self['framebit'][indx] =", "the method always returns None. Raises: ValueError: Raised if the", "ff.close() # TODO: Do we need a calib file? def", "def __setitem__(self, item, value): return self.table.__setitem__(item, value) def __len__(self): return", "and the frames associated with each configuration. The output data", "the 'calib' column. \"\"\" # Find the number groups by", "&= self.find_calib_group(calib_ID) # Return return np.where(indx)[0] if index else indx", "optional): The list of files to include in the table.", "for f in usrdata['filename']] # Convert types if possible existing_keys", "in _det: # setup[skey][str(d).zfill(2)] \\ # = {'binning': binning, 'det':", "PypeItError: Raised if none of the keywords in the provided", "types must have been defined; run ' 'get_frame_types.') # For", "in the user-provided data? It may be (see get_frame_types) and", "this assert to check that the # metakey is either", "subtable of frames taken in this configuration indx = self['setup']", "use = np.isin(setups, _configs) setups = setups[use] indx = indx[use]", "ftype) if calib_ID is not None: # Select frames in", "Set the calibration bits for i in range(len(self)): # Convert", "assert statement... raise ValueError('CODING ERROR: Found high-dimensional column.') #embed(header='372 of", "more frames. Args: indx (:obj:`int`, array-like): One or more 0-indexed", "decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, # PypeIt orientation binning of", "None, constructed using :func:`construct_obstime`. Returns: str: The root name for", "the dictionary with the configuration, don't include the top-level designation", "configuration, determine if any of the frames with # the", "or overwrite metadata read from the file headers. The table", "(:obj:`int`): The 0-indexed row in the table to edit frame_type", "= self.par['scienceframe']['exprng'] if ftype == 'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] #", "# proved too difficult. class PypeItMetaData: \"\"\" Provides a table", "writing to a # file... return None def find_calib_group(self, grp):", "headers headarr = self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta for meta_key", "per configuration with the associated values of the metadata associated", "This function can be used to initialize columns that the", "the frames in this group #in_group = self.find_calib_group(i) in_cbit =", "(pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the rtol value for float meta", "method is only called for a preconstructed # pypeit file,", "if 'framebit' not in self.keys(): msgs.error('Frame types are not set.", "any row of the specified frame type is included. Returns:", "file name and type are expected to be the key", "needs to have dtype=object, otherwise # any changes to the", "optional): Instead of crashing out if there are unidentified files,", "item, value): return self.table.__setitem__(item, value) def __len__(self): return self.table.__len__() def", "This is for backwards compatibility, but we should consider reformatting", "# Loop over the frame types for i, ftype in", "self['calibbit'] = 0 # Set the calibration bits for i", "it must include detectors designated as a viable mosaic for", "The 'calib' column has a string type to make sure", "in the table for key in usrdata.keys(): self.table[key] = usrdata[key][srt]", "- Consolidate with :func:`convert_time` ? Args: row (:obj:`int`): The 0-indexed", "msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Grab output", "the master key for the file in the provided row.", "else: match.append(False) else: # The np.all allows for arrays in", "d,f in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits, merge=True): \"\"\" Set", "columns if write_bkg_pairs: extras += ['calib', 'comb_id', 'bkg_id'] # manual", "not in all_cols for col in tbl_cols] if np.any(badcol): raise", "metadata table. See ' 'usrdata argument of instantiation of PypeItMetaData.')", "== self.spectrograph.idname(ftype) if useIDname \\ # else np.ones(len(self), dtype=bool) #", "strict=strict) # Grab Meta for meta_key in self.spectrograph.meta.keys(): value =", "ftype in enumerate(self.type_bitmask.keys()): # # Initialize: Flag frames with the", "= usrdata[key] == 'None' usrdata[key][nones] = None # Rest #", "{} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files assumed to be from", "of numbers l = np.amax([ 0 if len(n) == 0", "the frames associated with each configuration. The output data table", "with the reading the header for any of the provided", "Integer bitmask with the frame types. The length must match", "initialize columns that the user might add \"\"\" if 'manual'", "for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]):", "indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn on the relevant", "spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else: # The np.all allows for", "setup names; run set_configurations.') # Unique configurations setups, indx =", "from astropy.table.Table, but that # proved too difficult. class PypeItMetaData:", "keywords all have values that will yield good PypeIt reductions.", "columns in are written; if ``'pypeit'``, the columns are the", "frame types may have been ignored ignore_frames = self.spectrograph.config_independent_frames() if", "', '.join(tbl_cols[badcol]))) # Make sure the basic parameters are the", "the files without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx):", "\"\"\" # Configurations have already been set if 'setup' in", "# Convert to a list of numbers l = np.amax([", "the frame types and bits. Args: type_bits (numpy.ndarray): Integer bitmask", "instead of a boolean array. Returns: numpy.ndarray: A boolean array,", "associated metadata for each. Raises: PypeItError: Raised if there are", "user-provided table must have \\'filename\\' column!') # Make sure the", "meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) # JFH Changed", "to :attr:`spectrograph`. Additional valid keywords, depending on the processing level", "(:obj:`list`, optional): Ignore configurations in the provided list. return_index (:obj:`bool`,", "{0} files.\\n'.format(len(bad_files)) msg += 'Continuing, but the following frames may", "are not set not_setup = self.table['setup'] == 'None' if not", "append: self['framebit'][indx] = 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] =", "the default 'all'? if configs is not None and 'all'", "return self['framebit'] == 0 # Select frames indx = self.type_bitmask.flagged(self['framebit'],", "is initialized, this function determines the unique instrument configurations by", "merge(self, usrdata, match_type=True): \"\"\" Use the provided table to supplement", "= np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup: setup.remove('None') # Make sure", "(:obj:`bool`, optional): Return an array of 0-indexed indices instead of", "single calibration group.') @property def n_calib_groups(self): \"\"\"Return the number of", "# file... return None def find_calib_group(self, grp): \"\"\" Find all", "= {k:[] for k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename']", "setup in cfgs.keys(): # Get the subtable of frames taken", "frame type of each file. The metadata is validated using", "see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional): A list of strings", "difficult. class PypeItMetaData: \"\"\" Provides a table and interface to", "d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it ff = open(ofile,", "!= 1: msgs.error('Each calibration group must be from one and", "or list of strings (e.g., ['A','C']). See :attr:`configs`. Raises: PypeItError:", "the beginning of each string. Ignored if ``output`` does not", "the calibration group is the same as the calibration bit", "if the 'setup' isn't defined and split is True. Returns:", "cfg = self.unique_configurations(copy=True, rm_none=True) # TODO: We should edit the", "the frames to return. Can be an array of indices", "edited *in place*. If the 'setup' column already exists, the", "= ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done = True else: usrdata[key][~nones]", "Build the table for idx, ifile in enumerate(_files): # User", "dictionary. .. todo:: - This is for backwards compatibility, but", "is # assigned to that group. ngroups = 0 for", "in [None, 'all']: tbl_cols = list(self.keys()) elif columns == 'pypeit':", "setup.remove('None') # Make sure that each calibration group should only", "KeyError('The user-provided table must have \\'filename\\' column!') # Make sure", "If None or ``'all'``, all columns in are written; if", "continue if metakey is None: # No matching meta data", "else self['dispname'][row] dispangle = 'none' if 'dispangle' not in self.keys()", "\"\"\" # Check the file can be written (this is", "the list of setup identifiers ('A', 'B', etc.) and the", "uniq == 'None' if np.sum(ignore) > 0: msgs.warn('Ignoring {0} frames", "using the unique configurations in that column. This is used", "function can be used to initialize columns that the user", "``'all'``, all columns in are written; if ``'pypeit'``, the columns", "to ignore rm = np.logical_not(np.isin(setups, ignore)) setups = setups[rm] indx", "spectrograph class. Args: row (:obj:`int`): The 0-indexed row used to", "str]) # Initialize internal attributes self.configs = None self.calib_bitmask =", "'.format(ftype) + 'frames, configuration {0} does not have unique '.format(cfg_key)", "method always returns None. Raises: ValueError: Raised if the columns", "http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we can force type_names() in bitmask", "file line; ``# `` is added to the beginning of", "self.table['setup'] == cfg_key for ftype, metakey in ignore_frames.items(): # TODO:", "of one or more frames. \"\"\" if isinstance(indx, (int,np.integer)): return", "# group based on the configuration. This will change! #", "'w') for setup in cfgs.keys(): # Get the subtable of", "to the strings will be truncated at 4 characters. self.table['calib']", "not been defined yet. \"\"\" if self.configs is not None", "configurations. configs (:obj:`str`, :obj:`list`, optional): One or more strings used", "the pypeit file output. .. todo:: - This is for", "user *might* add .. note:: :attr:`table` is edited in place.", "ValueError('idname is not set in table; cannot use it for", "with the same name. ignore (:obj:`list`, optional): Ignore configurations in", "or 'science' or both? if 'ra' not in self.keys() or", "for the provided spectrograph. It is expected that this table", "The latter is not checked. If None, this is set", "it... Args: frametype (:obj:`dict`): A dictionary with the types designated", "this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also: # #", "None else np.atleast_1d(configs) # TODO: Why do we need to", "function will try to match the data type of the", "# Rest # Allow for str RA, DEC (backwards compatability)", "fill=None): \"\"\" Assign each frame to a configuration (setup) and", "last # few columns ncol = len(tbl_cols) for col in", "in the dictionary should be the same as in the", "calibration group. .. todo:: - This is for backwards compatibility,", "= value.replace('#', '') msgs.warn('Removing troublesome # character from {0}. Returning", "np.unique(self['directory'][in_cfg]).tolist() # Get the data lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename'])", "metakey in ignore_frames.items(): # TODO: For now, use this assert", "Finding unique configurations.') uniq, indx = np.unique(self['setup'], return_index=True) ignore =", "Allow for str RA, DEC (backwards compatability) if key in", "in tbl_cols])[0][0] if indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make", "Return return data # TODO: In this implementation, slicing the", "if data is None and files is None: # Warn", "fill return _configs = self.unique_configurations() if configs is None else", "*might* add .. note:: :attr:`table` is edited in place. This", "A list of columns to include in the output file.", "need a calib file? def write_calib(self, ofile, overwrite=True, ignore=None): \"\"\"", "row): \"\"\" Find the calibration groups associated with a specific", "only called for a preconstructed # pypeit file, which should", "of each fits file. calib_bitmask (:class:`BitMask`): The bitmask used to", "to construct the key. det (:obj:`int`, :obj:`tuple`, optional): The 1-indexed", "detectors are included. config_only (:obj:`bool`, optional): Just return the dictionary", "self.keys() and not force: return if 'setup' not in self.keys()", "place. This function can be used to initialize the combination", "Key names *must* match configuration_keys() for spectrographs setup = {skey:", "No matching meta data defined, so just set all #", "table for key in usrdata.keys(): self.table[key] = usrdata[key][srt] def finalize_usr_build(self,", "specific to the provided spectrograph are used. configs (:obj:`dict`): A", "can't it will just add the column anyway, with the", "no association with an instrument configuration - This is primarily", "*must* match the number of files in :attr:`table`. For frames", "been defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot provide", "the data type of the `usrdata` column to the existing", "{0}.'.format(key)) good &= indx if np.all(good): # All values good,", "else or just removed. assert isinstance(cfg_limits[key], list), \\ 'CODING ERROR:", "self._build(files, strict=strict, usrdata=usrdata)) # Merge with user data, if present", "If set to the string 'None', this returns all frames", "science frame is # assigned to that group. ngroups =", "is instantiated without any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph", "zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits, merge=True): \"\"\" Set and return", "The output data table is identical to the pypeit file", "self.keys(): msgs.info('Setup column already set. Finding unique configurations.') uniq, indx", "of frame types to ignore but the frame types have", "are written. Shape must match the number of the rows", "for sorting the output. If None, the table is printed", "'calib' column. \"\"\" # Find the number groups by searching", "list of numbers l = np.amax([ 0 if len(n) ==", "write sorted instrument configuration table without \\'setup\\' ' 'column; run", "method will fault! Args: force (:obj:`bool`, optional): Force the configurations", "configuration with the associated metadata for each. \"\"\" _cfg =", "group. Args: grp (:obj:`int`): The calibration group integer. Returns: numpy.ndarray:", "# See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we", "meet the other checks in this call. # indx &=", "= parse.str2list(self['calib'][i], ngroups) if grp is None: # No group", "and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect", "associate standard with science frames without sky coordinates.') else: #", "of the frame. Returns: astropy.time.Time: The MJD of the observation.", "dict: The pypeit setup dictionary with the default format. Raises:", "(:obj:`int`, optional): Index of the calibration group that it must", "self.keys() else self['dispname'][row] dispangle = 'none' if 'dispangle' not in", "of columns to include in the output file. Can be", "output. Args: row (:obj:`int`): The 0-indexed row of the frame.", "must be from one and only one instrument ' 'configuration", "list grp = parse.str2list(self['calib'][i], ngroups) if grp is None: #", "Write the pypeit files ofiles = [None]*len(cfg_keys) for j,setup in", "(:obj:`bool`, optional): Force the configurations to be reset. fill (:obj:`str`,", "np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame types from the", "or a string assert metakey is None or isinstance(metakey, str),", "with this single identifier. \"\"\" self.get_frame_types(user=frametype) # TODO: Add in", "indx (:obj:`int`): The index of the table row to use", "a single calibration group.') @property def n_calib_groups(self): \"\"\"Return the number", "Otherwise the configurations are only determined if :attr:`configs` has not", "== ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) #", "include two columns called `comb_id` and `bkg_id` that identify object", "not radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype)", "= self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the pypeit files ofiles =", "Can be provided as a list directly or as a", "to continue if 'frametype' not in self.keys(): msgs.error('To account for", "string to the group list grp = parse.str2list(self['calib'][i], ngroups) if", "PypeIt, configuring the control-flow and algorithmic parameters and listing the", "(:obj:`dict`, optional): A nested dictionary, one dictionary per configuration with", "number of calibration groups.\"\"\" return None if self.calib_bitmask is None", "their MJD. This is the desired behavior since if there", "= self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) # Loop over the", "# comb, bkg columns if write_bkg_pairs: extras += ['calib', 'comb_id',", "get_frame_types.') if ftype == 'None': return self['framebit'] == 0 #", "a DataContainer # Initially tried to subclass this from astropy.table.Table,", "string without setup and calibbit; ' 'run set_configurations and set_calibration_groups.')", "Raised if there are list of frame types to ignore", "of meta for this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn", "length must match the existing number of table rows. merge", "ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy() # Deal", "cannot be reduced by PypeIt' \\ ' and will be", "# Return return data # TODO: In this implementation, slicing", "run set_configurations and set_calibration_groups.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0}", "if isinstance(columns, list) else columns.split(',') badcol = [col not in", "fits file. calib_bitmask (:class:`BitMask`): The bitmask used to keep track", "with the first occurence of these configurations. configs (:obj:`str`, :obj:`list`,", "a string.'.format( self.spectrograph.__class__.__name__) # Get the list of frames of", "else output if ofile is not None and os.path.isfile(ofile) and", "# Check against current maximum ngroups = max(l+1, ngroups) #", "match to the metadata table generated within PypeIt. **Note**: This", "continue. user (:obj:`dict`, optional): A dictionary with the types designated", "Configurations have already been set if 'setup' in self.keys() and", "if index=True, with the rows that contain the frames of", "if 'calib' in self.keys() and 'calibbit' not in self.keys() and", "that table will be empty msgs.warn('Both data and files are", "present output_cols = np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True):", "list of configuration lines to include in the file. If", "defined and split is True. Returns: :obj:`list`: List of ``PypeIt``", "fill the configuration setup columns with this single identifier. \"\"\"", "second returned object provides the indices of the first occurrence", "defined yet. \"\"\" # Set the default if requested and", "the ' 'configuration cannot be None.') # Find the frames", "ignore)) setups = setups[rm] indx = indx[rm] # Restrict _configs", "the paths in_cfg = self['setup'] == setup if not np.any(in_cfg):", "np.logical_not(np.isin(setups, ignore)) setups = setups[rm] indx = indx[rm] # Restrict", "defined \\'setup\\' column first; try running set_configurations.') configs = np.unique(self['setup'].data).tolist()", "removed from the metadata table (pypeit file):\\n' indx = np.where(np.logical_not(good))[0]", "exists; set flag to overwrite.') # Check the rows input", "in the returned objects. If ``'all'``, pass back all configurations.", "just stop # producing/using the *.calib file. _cfg = {}", "a convenience function for the writing routines. Args: ignore (:obj:`list`,", "not np.any(indx): continue if metakey is None: # No matching", "configurations ignore frames with type: {0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool)", "frametype and framebit columns.') if 'frametype' in self.keys(): del self.table['frametype']", "col for t in tbl_cols])[0][0] if indx != 0: tbl_cols.insert(0,", "Deal with None's properly nones = usrdata[key] == 'None' usrdata[key][nones]", "cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return return ofiles def write(self,", "the output. If None, the table is printed in its", "the table in ascii format with io.StringIO() as ff: output_tbl.write(ff,", ":obj:`list`, optional): The list of files to include in the", "\"\"\" if 'setup' not in self.keys(): msgs.error('Cannot get setup names;", "ofile, overwrite=True, ignore=None): \"\"\" Write the calib file. The calib", "tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys()) tbl_cols = columns", "doc root is up one directory .. include:: ../include/links.rst \"\"\"", "float): if row[k] is None: match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']:", "science frame index, if it is provided. Args: ftype (str):", "msgs.info(\"Typing files\") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined", "need the frame type to continue if 'frametype' not in", "= [None]*len(cfg_keys) for j,setup in enumerate(cfg_keys): # Create the output", "high-dimensional column.') #embed(header='372 of metadata') elif key in meta_data_model.keys(): #", "uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn the user that the matching", "continue # Find the frames in this group #in_group =", "ignored ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is None: # Nope,", "to determine the calibration group of each frame; see :attr:`calib_bitmask`.", "# Initialize columns that the user might add self.set_user_added_columns() #", "configurations themselves. This is mostly a convenience function for the", "Making Columns to pad string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype')", "frame to a configuration (setup) and include it in the", "overwrite: msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Construct", "with the associated values of the metadata associated with each", "table length.') msgs.info('Using user-provided frame types.') for ifile,ftypes in user.items():", "bitmask to always return the # correct type... if int(str(ftype_colmA.dtype)[2:])", "meta_key in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header", "# TODO: It would be good to get around this.", "foundstd else 'standard') # Find the files without any types", "comma-separated types. merge (:obj:`bool`, optional): Merge the frame typing into", "we're done return good = np.ones(len(self), dtype=bool) for key in", "in range(len(self)): if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:", "always returns None. Raises: ValueError: Raised if the columns to", "#in_group = self.find_calib_group(i) in_cbit = self['calibbit'] == cbit # Find", "provided string or list of strings (e.g., ['A','C']). Returns: numpy.array:", "have been written/printed if ``output == 'table'``. Otherwise, the method", "a new dictionary. Returns: :obj:`dict`: A nested dictionary, one dictionary", "provided, the default parameters specific to the provided spectrograph are", "# Making Columns to pad string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits),", "setups, indx if return_index else setups def _get_cfgs(self, copy=False, rm_none=False):", "len(existing_keys) > 0 and match_type: for key in existing_keys: if", "files before continuing\") # Finish up (note that this is", "Args: row (:obj:`int`): The 0-indexed row of the frame. Returns:", "0-indexed rows in the table with the frames to return.", "a single configuration if len(setup) != 1: msgs.error('Each calibration group", "msg += ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And remove 'em self.table", "tbl_cols] if np.any(badcol): raise ValueError('The following columns are not valid:", "array of indices or a boolean array of the correct", "metadata read from the file headers. The table must have", "Args: usrdata (:obj:`astropy.table.Table`): A user provided set of data used", "\"\"\" Write the sorted file. The sorted file lists all", "show a different # configuration. for i in indx[1:]: j", "a tuple, it must include detectors designated as a viable", "exists. Use ovewrite=True to overwrite.'.format(ofile)) # Construct the setups dictionary", "values that will yield good PypeIt reductions. Any frames that", "(:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data save to", "bit in the keyword allows MasterFrames to be used with", "\"\"\" return time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row, obstime=None): \"\"\" Construct", "f) msgs.warn('The above file could be a twilight flat frame", "Read the fits headers headarr = self.spectrograph.get_headarr(ifile, strict=strict) # Grab", "# TODO: This check should be done elsewhere # Check", "'None': return self['framebit'] == 0 # Select frames indx =", "= cfg_key continue # Find the unique values of meta", "returned. Otherwise, the string is interpreted as the name of", "number of the rows in the table. columns (:obj:`str`, :obj:`list`,", "keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the string 'None', this", "method to return :attr:`configs` with possible alterations. This method *should", "# Check if any of the other files show a", "if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name list does not match", "dictionary of the unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask", "k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename'] = ['None']*len(_files) #", "todo:: - Consolidate with :func:`convert_time` ? Args: row (:obj:`int`): The", "common links, assuming primary doc root is up one directory", "key in cfg.keys() if key in _configs] if len(cfg_keys) ==", "i.e., the ' 'configuration cannot be None.') # Find the", "if output_path is None: output_path = os.getcwd() # Find unique", "self.keys() else self['binning'][row] skey = 'Setup {}'.format(self['setup'][row]) # Key names", "# The np.all allows for arrays in the Table (e.g.", "'standard') # Find the files without any types indx =", "'dispangle' not in self.keys() else self['dispangle'][row] dichroic = 'none' if", "(:obj:`bool`, optional): Instead of crashing out if there are unidentified", "list. write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for calib,", "primarily used for QA now; but could probably use the", "are used. configs (:obj:`dict`): A dictionary of the unique configurations", "to write the table contents. rows (`numpy.ndarray`_, optional): A boolean", "in self.keys() else self['dispangle'][row] dichroic = 'none' if 'dichroic' not", "for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of the calibration group", "paths of one or more frames. \"\"\" if isinstance(indx, (int,np.integer)):", "for i, ftype in enumerate(self.type_bitmask.keys()): # # Initialize: Flag frames", "if requested. Raises: PypeItError: Raised if the 'setup' isn't been", "indices instead of a boolean array. Returns: numpy.ndarray: A boolean", "else indx def find_frame_files(self, ftype, calib_ID=None): \"\"\" Return the list", "type_names() in bitmask to always return the # correct type...", "class that handles the fits metadata required by PypeIt. ..", "__setitem__(self, item, value): return self.table.__setitem__(item, value) def __len__(self): return self.table.__len__()", "to the table data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if not data['directory'][idx]:", "= {skey: {'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit':", "i in range(len(self)): # Convert the string to the group", "associated with one calibration group. TODO: Is this appropriate for", "combination groups are set to be unique for each standard", "Finalize the build of the table based on user-provided data,", "modified table return output_tbl # Always write the table in", "sort_col not in self.keys(): raise ValueError(f'Cannot sort by {sort_col}. Not", "must match. If None, any row of the specified frame", "in self.keys(): msgs.error('Cannot get setup names; run set_configurations.') # Unique", "' 'configuration with a valid letter identifier; i.e., the '", "range(len(self)): # Convert the string to the group list grp", "possibly None mjds if there were corrupt header cards mjd[mjd", "the provided spectrograph class. For the data table, one should", "reset unless you call the function with ``force=True``. Args: configs", "Found high-dimensional column.') #embed(header='372 of metadata') elif key in meta_data_model.keys():", "all rows*. force (:obj:`bool`, optional): Force the calibration groups to", "that this is called above if user is not None!)", "set the configuration should be the same as returned by", "lines to include in the file. If None are provided,", "the existing number of table rows. merge (:obj:`bool`, optional): Merge", "unique integer. If the 'comb_id' or 'bkg_id' columns do not", "be assigned to it: for cfg_key in _configs.keys(): in_cfg =", "only return the configurations matched to this provided string or", "list of strings (e.g., ['A','C']). See :attr:`configs`. Raises: PypeItError: Raised", "and science frame ID, if the latter is provided. \"\"\"", "if len(cfg_keys) == 0: self.configs = {} self.configs[cfg_iter[cfg_indx]] = {}", "# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode #", "optional): Ignore configurations in the provided list. write_bkg_pairs (:obj:`bool`, optional):", "column is not present, set a single calibration group *for", "= np.full(len(self), 'None', dtype=object) for i in range(n_cfg): self['calib'][(self['setup'] ==", "a class that handles the fits metadata required by PypeIt.", "# Warn that table will be empty msgs.warn('Both data and", "(:obj:`str`, :obj:`list`, optional): One or more strings used to select", "the frame typing into the exiting table. Returns: :obj:`astropy.table.Table`: A", "Meta for meta_key in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict,", "Returns: `astropy.table.Table`: The table object that would have been written/printed", "descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod def default_keys(): return [", "column does not exist, fill the configuration setup columns with", "determines the unique instrument configurations by finding unique combinations of", "have not (likely because the # data was read from", "np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup: setup.remove('None') #", "# This should probably go somewhere else or just removed.", "different from :func:`unique_configurations` because the latter determines and provides the", "method returns None when writing to a # file... return", "relevant follow-on code so that we # don't have to", "now, use this assert to check that the # metakey", "os.path.isfile(ofile) and not overwrite: raise FileExistsError(f'{ofile} already exists; set flag", "sort_col is not None: if sort_col not in self.keys(): raise", "'None', dtype=object) for i in range(n_cfg): self['calib'][(self['setup'] == configs[i]) &", "when the frame was observed. .. todo:: - Consolidate with", "and \\'calibbit\\' ' 'columns; run set_configurations and set_calibration_groups.') if os.path.isfile(ofile)", "rows in the table with the frames to return. Can", "\"\"\" Set columns that the user *might* add .. note::", "None.'.format( np.sum(ignore))) self.configs = {} for i in range(len(uniq)): if", "{0}'.format(os.path.split(ifile)[1])) # JFH Changed the below to not crash if", "'.join(tbl_cols[badcol]))) # Make sure the basic parameters are the first", "msgs.error('Must have defined \\'setup\\' column first; try running set_configurations.') configs", "with None's properly nones = usrdata[key] == 'None' usrdata[key][nones] =", "self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row): \"\"\" Find the calibration groups", "Make sure the dithers and combination and background IDs are", "all # the frames to this (first) configuration self.table['setup'][indx] =", "None if self.calib_bitmask is None else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None,", "= 'none' if 'slitlen' not in self.keys() else self['slitlen'][row] binning", "undefined configurations n_cfg = len(configs) # TODO: Science frames can", "with the correct ID name or start by # #", "files=None, data=None, usrdata=None, strict=True): if data is None and files", "for frametype for a few instruments (e.g. VLT) where meta", "row (:obj:`int`): The 0-indexed row of the frame. obstime (:class:`astropy.time.Time`,", "boolean array of the correct length. Returns: list: List of", "read. This function writes the columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`,", "-1 if assign_objects and np.all(self['comb_id'] < 0): # find_frames will", "any existing file; otherwise raise an exception. header (:obj:`str`, :obj:`list`,", "in self.keys(): msgs.error('Frame types are not set. First run get_frame_types.')", "following frames may be empty or have corrupt headers:\\n' for", "least for now the DEIMOS image reader will # fault.", "'CODING ERROR: valid_configuration_values is not correctly defined ' \\ 'for", "**not** reset unless you call the function with ``force=True``. Args:", "set_combination_groups(self, assign_objects=True): \"\"\" Set combination groups. .. note:: :attr:`table` is", "def write_calib(self, ofile, overwrite=True, ignore=None): \"\"\" Write the calib file.", "values were # correctly assigned in the spectrograph class definition.", "corrupt headers:\\n' for file in bad_files: msg += ' {0}\\n'.format(file)", "This is mostly a convenience function for the writing routines.", "'run set_configurations and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def", "in self.keys(): msgs.error('Cannot write sorted instrument configuration table without \\'setup\\'", "configuration, don't include the top-level designation of the configuration itself.", "keywords in the dictionary should be the same as in", "to be used in all calibration groups # (like biases", "all calibration groups # (like biases and darks) if global_frames", "and QA for icbit in np.unique(self['calibbit'].data): cbit = int(icbit) #", "bit names as these are the root of the #", "#setup = np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup:", "# Merge with user data, if present if usrdata is", "not specify an output file. Returns: `astropy.table.Table`: The table object", "det=1): \"\"\" Construct the master key for the file in", "is None: # Warn that table will be empty msgs.warn('Both", "used to determine the calibration group of each frame; see", "Define the bitmask and initialize the bits self.calib_bitmask = BitMask(np.arange(ngroups))", "sort(self, col): return self.table.sort(col) def merge(self, usrdata, match_type=True): \"\"\" Use", ":attr:`table`. \"\"\" # Allow for single files _files = files", "If the 'calib' column is not present, set a single", "Placeholder: Allow an empty set of configuration keys # meaning", "if merge: self['frametype'] = t['frametype'] self['framebit'] = t['framebit'] return t", "self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): \"\"\" Check that the calibration groups", "for sorting. Args: output (:obj:`str`, optional): Output signature or file", "with the associated metadata for each. \"\"\" _cfg = deepcopy(self.configs)", "are written; if ``'pypeit'``, the columns are the same as", "setup in cfg.keys(): _cfg[setup] = {} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg", "for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase] cfg_iter =", "not in self.keys() else self['decker'][row] slitwid = 'none' if 'slitwid'", "unique instrument configurations. If run before the ``'setup'`` column is", "Convert types if possible existing_keys = list(set(self.table.keys()) & set(usrdata.keys())) radec_done", "string.'.format( self.spectrograph.__class__.__name__) # Get the list of frames of this", "meta_data_model = meta.get_meta_data_model() # Check the input if not isinstance(usrdata,", "both? if 'ra' not in self.keys() or 'dec' not in", "be defined first) ofile = None if output in [None,", "np.isin(setups, _configs) setups = setups[use] indx = indx[use] return setups,", "JFH Changed the below to not crash if some files", "slicing the PypeItMetaData object # will return an astropy.table.Table, not", "c,t in zip(columns, types): if c in self.keys(): self.table[c] =", "number # provided, regardless of whether or not a science", "(:obj:`str`): Name for the output sorted file. overwrite (:obj:`bool`, optional):", "the complete list). Args: write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt``", "in the instantiation of PypeItMetaData.' ' The table will be", "does not match user-provided metadata table. See ' 'usrdata argument", "data? \"\"\" is_science = self.find_frames('science') for i in range(len(self)): if", "Ignore frames with undefined configurations n_cfg = len(configs) # TODO:", "keyword data to include in the table and specify any", "and will be removed from the metadata table (pypeit file):\\n'", "= usrdata[key][~nones].astype(dtype) # Include the user data in the table", "None or a string.'.format( self.spectrograph.__class__.__name__) # Get the list of", "'setup' not in self.keys(): msgs.error('Cannot get setup names; run set_configurations.')", "- Why isn't frametype just in the user-provided data? It", "construct_basename(self, row, obstime=None): \"\"\" Construct the root name primarily for", "' \\ 'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__) #", "# Make sure the calibbit column does not exist if", "np.asarray(data['filename']) bad_files = filenames[mjd == None] # Print status message", "and specify any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used", "column does not exist - if the 'comb_id' column does", "provided as a string with comma-separated types. setup (:obj:`str`): If", "keys to use to construct the configuration. If None, the", "the association of each frame from that configuration with a", "Validate instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types): \"\"\" Impose", "be provided as a string with comma-separated types. setup (:obj:`str`):", "# else np.ones(len(self), dtype=bool) # Include a combination of instrument-specific", "header keyword data to include in the table and specify", "types on specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str])", "def _impose_types(self, columns, types): \"\"\" Impose a set of types", "top-level designation of the configuration itself. Returns: dict: The pypeit", "Args: ofile (:obj:`str`): Name for the output sorted file. overwrite", "a configuration (setup) and include it in the metadata table.", "= self.unique_configurations(copy=ignore is not None) if ignore is not None:", "'calibbit' in self.keys(): del self['calibbit'] # Groups have already been", "the backwards compatible \"setup\" dictionary. Args: indx (:obj:`int`): The index", "None else [det] #for d in _det: # setup[skey][str(d).zfill(2)] \\", "with the metadata keywords. Also raised when some frames cannot", "not present, set a single calibration group *for all rows*.", "row of the frame. obstime (:class:`astropy.time.Time`, optional): The MJD of", "the metadata table listed by the spectrograph ``configuration_keys`` method. If", "the frame type of each fits file. calib_bitmask (:class:`BitMask`): The", "ff.getvalue().split('\\n')[:-1] if ofile is None: # Output file not defined", "of the # meta data values indx &= np.isin(self.table[metakey], uniq_meta)", "in self.keys() and not force: return # Groups have been", "to initialize the combination group and background group columns, and/or", "provided calibration group. Args: grp (:obj:`int`): The calibration group integer.", "the selected row. \"\"\" _cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is", "yet, just stop # producing/using the *.calib file. _cfg =", "the first occurrence of these setups, if requested. Raises: PypeItError:", "for PypeIt, configuring the control-flow and algorithmic parameters and listing", "the fits table is dictated by the header keywords specified", "it ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None,", "self._check_calib_groups() return # TODO: The rest of this just nominally", "a pypeit file) if 'calib' in self.keys() and 'calibbit' not", "row (:obj:`int`): The 0-indexed row used to construct the setup.", "is interpreted as the name of an ascii file to", "to have been defined. For now this is a simple", "None) if ignore is not None: for key in cfgs.keys():", "'setup' columns does not exist, fill the configuration setup columns", "so we're done return # Some frame types may have", "output_tbl = self.table.copy() # Select the output rows if a", "n_cfg = len(configs) # TODO: Science frames can only have", "config_only=False): \"\"\" Construct the setup dictionary. .. todo:: - This", "Table with the frame types and bits. Args: type_bits (numpy.ndarray):", "This is the desired behavior since if there are #", "dictionary. Returns: :obj:`dict`: A nested dictionary, one dictionary per configuration", "usrdata is not None: self.merge(usrdata) # Impose types on specific", "data? It may be (see get_frame_types) and I'm just not", "for col in ['framebit', 'frametype', 'filename', 'directory']: if col not", "'None') & self.find_frames(ftype) if not np.any(indx): continue if metakey is", "If the internal table already contains the column in `usrdata`,", "type in the internal table. See above. Raises: TypeError: Raised", "len(setup) != 1: msgs.error('Each calibration group must be from one", "'ra' not in self.keys() or 'dec' not in self.keys(): msgs.warn('Cannot", ":func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will be ignored in the construction", "setup and calibbit; ' 'run set_configurations and set_calibration_groups.') det_name =", "setups to write if configs is None or configs ==", "from :attr:`table`, meaning this method may modify that attribute directly.", "match to the metadata table generated within PypeIt. match_type (:obj:`bool`,", "else self._build(files, strict=strict, usrdata=usrdata)) # Merge with user data, if", "set_configurations(self, configs=None, force=False, fill=None): \"\"\" Assign each frame to a", "match. If None, any row of the specified frame type", "we're done return # Some frame types may have been", "msgs.info('All files assumed to be from a single configuration.') return", "not None: if 'frametype' not in self.keys(): msgs.error('To set global", "a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first", "self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None, merge=True): \"\"\" Generate a table", "= list(set(self.table.keys()) & set(usrdata.keys())) radec_done = False if len(existing_keys) >", "of the requested type. Raises: PypeItError: Raised if the `framebit`", "list(self.keys()) tbl_cols = columns if isinstance(columns, list) else columns.split(',') badcol", "for key in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) & in_group ftype_in_group", "= meta.get_meta_data_model() # Check the input if not isinstance(usrdata, table.Table):", "Return the configuration dictionary for a given frame. This is", "frame types to append/overwrite. append (:obj:`bool`, optional): Append the frame", "in tbl_cols: continue indx = np.where([t == col for t", "can avoid this step by setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`):", "table of frame types from the input metadata object. ..", "and bits into the existing table. This will *overwrite* any", "TODO: Turn this into a DataContainer # Initially tried to", "for the file in the provided row. The master key", "return the modified table return output_tbl # Always write the", "\"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx): \"\"\" Return the", "are set, so we're done return # Some frame types", "exist, they're set to -1. Args: assign_objects (:obj:`bool`, optional): If", "Set to False to report a warning and continue. usrdata", ".. note:: :attr:`table` is edited in place. Args: columns (:obj:`list`):", "to the string 'None', this returns all frames without a", "Provides a class that handles the fits metadata required by", "This is used to set the internal :attr:`configs`. If this", "output_tbl = output_tbl[tbl_cols] if output == 'table': # Instead of", "the spectrograph class definition. # This should probably go somewhere", "msgs.error('Calibration groups are not set. First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data,", "unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None):", "name='frametype') else: ftype_colm = ftype_colmA fbits_colm = table.Column(type_bits, name='framebit') t", "dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row]", "grp): \"\"\" Find all the frames associated with the provided", "function determines the unique instrument configurations by finding unique combinations", "Set output path if output_path is None: output_path = os.getcwd()", "indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \\ # else", "for str RA, DEC (backwards compatability) if key in ['ra',", "> 1: msgs.error('Science frames can only be assigned to a", "the following files:\") for f in self['filename'][indx]: msgs.info(f) if not", "not in self.keys(): msgs.warn('Cannot associate standard with science frames without", "all columns in are written; if ``'pypeit'``, the columns are", "identify object and background frame pairs. write_manual (:obj:`bool`, optional): Add", "spectrograph self.par = par if not isinstance(self.par, PypeItPar): raise TypeError('Input", "frame, see :func:`set_combination_groups`. .. note:: This should only be run", "is None: # No values specified, so we're done return", "list of setup identifiers ('A', 'B', etc.) and the row", "the other files show a different # configuration. for i", "np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True): \"\"\" Set combination", "'setup' column to have been defined. For now this is", "not in self.keys(): msgs.error('To account for ignored frames, types must", "flag_unknown (:obj:`bool`, optional): Instead of crashing out if there are", "This needs to be moved into each Spectrograph # if", "the pypeit files ofiles = [None]*len(cfg_keys) for j,setup in enumerate(cfg_keys):", "this type that match any of the # meta data", "root name primarily for PypeIt file output. Args: row (:obj:`int`):", "columns for manual extraction Raises: PypeItError: Raised if the 'setup'", "set(self.keys())) > 0: msgs.error('Configuration {0} defined using unavailable keywords!'.format(k)) self.table['setup']", "Check the file can be written (this is here because", "in this group, ignoring any # undefined ('None') configurations #setup", "'all' or configs == ['all']: cfg_keys = list(cfg.keys()) else: _configs", "columns to include are not valid, or if the column", "to a configuration (setup) and include it in the metadata", "construction of the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return", "def clean_configurations(self): \"\"\" Ensure that configuration-defining keywords all have values", "'None', this returns all frames without a known type. calib_ID", "reconstructed if the 'calib' column already exists. Raises: PypeItError: Raised", "the column to use for sorting is not valid. FileExistsError:", "deepcopy import datetime from IPython import embed import numpy as", "Check the input if not isinstance(usrdata, table.Table): raise TypeError('Must provide", "srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else: output_tbl =", "a SPIT option. Args: flag_unknown (:obj:`bool`, optional): Instead of crashing", "not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames can", "= fill return _configs = self.unique_configurations() if configs is None", "else cfg_keys return {k:self.table[k][indx] for k in _cfg_keys} def master_key(self,", "# upper-case letters: A, B, C, etc. double_alphabet = [str_i", "row used to construct the setup. det (:obj:`int`, optional): The", "less than 0 (meaning they're unassigned), the combination groups are", "has a string type to make sure that it matches", "from the fits headers or the data directly. If neither", "columns ncol = len(tbl_cols) for col in ['dithpat', 'dithpos', 'dithoff',", "a comma-separated string. If None or ``'all'``, all columns in", "frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set", "with the metadata values from the selected row. \"\"\" _cfg_keys", "PypeItMetaData object. def __getitem__(self, item): return self.table.__getitem__(item) def __setitem__(self, item,", "does not have unique '.format(cfg_key) + '{0} values.' .format(meta)) #", "setups = setups[rm] indx = indx[rm] # Restrict _configs =", "If None, the table is printed in its current state.", "type. The frames must also match the science frame index,", "paths to one or more frames. Args: indx (:obj:`int`, array-like):", "of the calibration group bits. table (:class:`astropy.table.Table`): The table with", "to not crash if some files have None in #", "usrdata[key][~nones].astype(dtype) # Include the user data in the table for", "of the full paths of one or more frames. \"\"\"", "yet been defined. copy (:obj:`bool`, optional): Return a deep copy", "# Nope, we're still done return # At this point,", "length={0}\\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name,", "TypeError: Raised if `usrdata` is not an `astropy.io.table.Table` KeyError: Raised", "Make sure the basic parameters are the first few columns;", "B, C, etc. double_alphabet = [str_i + str_j for str_i", "from a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the", "# assigned to that group. ngroups = 0 for i", "= self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is not None) if", "not ' \\ 'correctly defined for {0}; values must be", "msgs.info(f) if not flag_unknown: msgs.error(\"Check these files before continuing\") #", "files (:obj:`str`, :obj:`list`, optional): The list of files to include", "just add the column anyway, with the type in `usrdata`.", "Args: output_path (:obj:`str`, optional): Root path for the output pypeit", "else self['dispangle'][row] dichroic = 'none' if 'dichroic' not in self.keys()", "(:obj:`list`, optional): The list of metadata keys to use to", "== len(cfg_iter): msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] =", "n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames: indx", "BitMask(np.arange(ngroups)) self['calibbit'] = 0 # Set the calibration bits for", "is copied to a new dictionary. Returns: :obj:`dict`: A nested", "write_manual=False): \"\"\" Write the sorted file. The sorted file lists", "is not None: output_tbl = output_tbl[rows] # Select and sort", "grab the rtol value for float meta (e.g. dispangle) Returns:", "set a single calibration group *for all rows*. force (:obj:`bool`,", "in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) & in_group ftype_in_group = self.find_frames(key)", "frame type. If False, all existing frame types are overwitten", "This is primarily used for QA now; but could probably", "not in self.keys(): msgs.error('Cannot write calibration groups without \\'setup\\' and", "a row from the fitstbl matches the input configuration Args:", "= {} for key in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) &", "self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod def default_keys():", "the output to an ascii file with open(ofile, 'w') as", "preconstructed # pypeit file, which should nominally follow an execution", "for NIR data? \"\"\" is_science = self.find_frames('science') for i in", "False to report a warning and continue. usrdata (astropy.table.Table, optional):", "same as in the table, and the keywords used to", "Returns: astropy.time.Time: The MJD of the observation. \"\"\" return time.Time(self['mjd'][row],", "can be read from the pypeit file. The 'calibbit' column", "return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True): \"\"\" Set combination groups.", "of the fits table is dictated by the header keywords", "the configurations to be redetermined. Otherwise the configurations are only", "np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key def clean_configurations(self): \"\"\" Ensure that", "optional): Attempt to match the data type in `usrdata` to", "number of table rows. merge (:obj:`bool`, optional): Merge the types", "they're set to -1. Args: assign_objects (:obj:`bool`, optional): If all", "index (:obj:`bool`, optional): Return an array of 0-indexed indices instead", "Args: frametype (:obj:`dict`): A dictionary with the types designated by", "are the root of the # MasterFrames and QA for", "return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def _repr_html_(self):", "return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]", "currently only checks that the science frames are associated with", "here, because this method is only called for a preconstructed", "keys in config match = [] for k in config.keys():", "frames, types must have been defined; run ' 'get_frame_types.') #", "It would be good to get around this. Is it", "_cfg_keys} def master_key(self, row, det=1): \"\"\" Construct the master key", "(pypeit file):\\n' indx = np.where(np.logical_not(good))[0] for i in indx: msg", "fill is not None: self['setup'] = fill return _configs =", "by the spectrograph ``configuration_keys`` method. If run after the ``'setup'``", "calibration groups without \\'setup\\' and \\'calibbit\\' ' 'columns; run set_configurations", "types. The length must match the existing number of table", "`usrdata`, the function will try to match the data type", "setup without \\'setup\\' column; ' 'run set_configurations.') dispname = 'none'", "continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy,", "are the last # few columns ncol = len(tbl_cols) for", "simple grouping of frames with the same configuration. .. todo::", "against current maximum ngroups = max(l+1, ngroups) # Define the", "msgs.warn(msg) # Return return data # TODO: In this implementation,", "the internal table already contains the column in `usrdata`, the", "= 'none' if 'dichroic' not in self.keys() else self['dichroic'][row] decker", "file. The calib file provides the unique instrument configurations (setups)", "instrument ' 'configuration with a valid letter identifier; i.e., the", "not in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot write", "metadata values from the selected row. \"\"\" _cfg_keys = self.spectrograph.configuration_keys()", "in this configuration indx = self['setup'] == setup if not", "associated values of the metadata associated with each configuration. The", "(e.g., ['bias', 'dark']). default (:obj:`bool`, optional): If the 'calib' column", "these files before continuing\") # Finish up (note that this", "not overwrite: raise FileExistsError(f'{ofile} already exists; set flag to overwrite.')", "building the metadata from the fits files. strict (:obj:`bool`, optional):", "'\\n') ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy() # Deal with possibly None", "binning = '1,1' if 'binning' not in self.keys() else self['binning'][row]", "the list of keys to use cfg_keys = self.spectrograph.configuration_keys() #", "is 99). Using the calibration bit in the keyword allows", "Returns: dict: Dictionary with the data to assign to :attr:`table`.", "calibration groups. Args: row (:obj:`int`): The 0-indexed row used to", "to the provided `setup` - assigns all frames to a", "two columns, the type names and the type bits. See", "setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A user provided set of", "single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first file", "calibration groups are valid. This currently only checks that the", "the selected configurations to ignore rm = np.logical_not(np.isin(setups, ignore)) setups", "make sure that it matches with what can be read", "through the # upper-case letters: A, B, C, etc. double_alphabet", "detector. Raises: PypeItError: Raised if the 'setup' or 'calibbit' columns", "# Read the fits headers headarr = self.spectrograph.get_headarr(ifile, strict=strict) #", "certain columns. .. note:: :attr:`table` is edited in place. Args:", "extraction configs (:obj:`str`, :obj:`list`, optional): One or more strings used", "list(set(self.table.keys()) & set(usrdata.keys())) radec_done = False if len(existing_keys) > 0", "flag_unknown: msgs.error(\"Check these files before continuing\") # Finish up (note", ":func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why isn't frametype just in the", "files to use to build the table. strict (:obj:`bool`, optional):", "will be ignored in the construction of the unique configurations.", "is None else obstime tiso = time.Time(_obstime, format='isot') dtime =", "been set yet. \"\"\" # Configurations have already been set", "fill (:obj:`str`, optional): If the 'setup' column does not exist,", "+= 1 msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def", "embed import numpy as np import yaml from astropy import", "unless you call the function with ``force=True``. Args: configs (:obj:`dict`,", "Use the provided table to supplement or overwrite the metadata.", "configuration. cfg_keys (:obj:`list`, optional): The list of metadata keys to", "The 0-indexed row in the table to edit frame_type (:obj:`str`,", "'frametype' in self.keys(): del self.table['frametype'] if 'framebit' in self.keys(): del", "# Check if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name list does", "{0}\\n'.format(file) msgs.warn(msg) # Return return data # TODO: In this", "# character from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added", "indx, cfg_keys=None): \"\"\" Return the configuration dictionary for a given", "configurations.') uniq, indx = np.unique(self['setup'], return_index=True) ignore = uniq ==", "\"\"\" import os import io import string from copy import", "metadata table listed by the spectrograph ``configuration_keys`` method. If run", "'setup' isn't defined and split is True. Returns: :obj:`list`: List", "Include the user data in the table for key in", "det_name = self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row): \"\"\" Construct", "optional): Append the frame type. If False, all existing frame", "np.any(indx): msgs.info(\"Couldn't identify the following files:\") for f in self['filename'][indx]:", "configuration with the associated values of the metadata associated with", "'all' here? Can't `configs is # None` mean that you", "needs to be defined first) ofile = None if output", "The length must match the existing number of table rows.", "not a key in the provided table. \"\"\" meta_data_model =", "find_frames(self, ftype, calib_ID=None, index=False): \"\"\" Find the rows with the", "for this configuration. if uniq_meta.size != 1: msgs.warn('When setting the", "the function will try to match the data type of", "assign_objects (:obj:`bool`, optional): If all of 'comb_id' values are less", "included in the configuration column (A, B, C, etc), the", "dictionary with the default format. Raises: PypeItError: Raised if the", "NOTE: For now, check that the configuration values were #", "if ``output == 'table'``. Otherwise, the method always returns None.", "the column anyway, with the type in `usrdata`. You can", "write. If None, all rows are written. Shape must match", "header=None): \"\"\" Write the metadata either to a file or", "be the key and value of the dictionary, respectively. The", "with the type in `usrdata`. You can avoid this step", "the dictionary should be the same as in the table,", "A nested dictionary, one dictionary per configuration with the associated", "import meta from pypeit.io import dict_to_lines from pypeit.par import PypeItPar", "by hand. Args: indx (:obj:`int`): The 0-indexed row in the", "ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return", "Returns: `astropy.table.Table`: Table with two columns, the frame type name", "raise TypeError('Must provide an astropy.io.table.Table instance.') if 'filename' not in", "flux_calib.find_standard_file(ra, dec, check=True) b = self.type_bitmask.turn_off(b, flag='science' if foundstd else", "ovewrite=True to overwrite.'.format(ofile)) # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,", "len(self) for i in range(nrows): for d, cfg in _configs.items():", "is included. index (:obj:`bool`, optional): Return an array of 0-indexed", "dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning':", "table is printed in its current state. overwrite (:obj:`bool`, optional):", "not match table length.') msgs.info('Using user-provided frame types.') for ifile,ftypes", "is # None` mean that you want all the configurations?", "types based on the provided object - sets all the", "set the internal :attr:`configs`. If this attribute is not None,", "match table length.') msgs.info('Using user-provided frame types.') for ifile,ftypes in", "the metadata either to a file or to the screen.", "in the construction of the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does", "other inputs. Raises: PypeItError: Raised if none of the keywords", "in self.keys(): self['comb_id'] = -1 if 'bkg_id' not in self.keys():", "on user-provided data, typically pulled from the PypeIt file. This", "in tbl_cols] if np.any(badcol): raise ValueError('The following columns are not", "combination group and background group columns, and/or to initialize the", "in self.keys(): del self.table['frametype'] if 'framebit' in self.keys(): del self.table['framebit']", "is None else self._build(files, strict=strict, usrdata=usrdata)) # Merge with user", "user might add \"\"\" if 'manual' not in self.keys(): self['manual']", "ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames))", "user=None, merge=True): \"\"\" Generate a table of frame types from", "= self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row): \"\"\" Construct the", "# # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we can force type_names()", "used to keep track of the calibration group bits. table", "and set_calibration_groups.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists.", "configurations in that column. This is used to set the", "msgs from pypeit import utils from pypeit.core import framematch from", "should be provided as a string with comma-separated types. merge", "(meaning they're unassigned), the combination groups are set to be", "columns to return if columns in [None, 'all']: tbl_cols =", "want to do this here? indx = self.type_bitmask.flagged(type_bits, flag='standard') for", "len(cfg_keys) == 0: self.configs = {} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All", "metakey is None: # No matching meta data defined, so", "not valid. FileExistsError: Raised if overwrite is False and the", "sets the calibration # group based on the configuration. This", "anything listed in # the ignore_frames indx = np.arange(len(self)) ignore_frames", "configuration (setup) and include it in the metadata table. The", "unique instrument configurations (setups) and the frames associated with each", "&= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key def clean_configurations(self): \"\"\" Ensure", "'all']: tbl_cols = list(self.keys()) elif columns == 'pypeit': tbl_cols =", "just removed. assert isinstance(cfg_limits[key], list), \\ 'CODING ERROR: valid_configuration_values is", "< 0): # find_frames will throw an exception if framebit", "if not isinstance(usrdata, table.Table): raise TypeError('Must provide an astropy.io.table.Table instance.')", "self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None): \"\"\" Return the configuration dictionary", "ftype == 'None': return self['framebit'] == 0 # Select frames", "table with the frames to return. Can be an array", "the frame types have not yet been defined (see :func:`get_frame_types`),", "At this point, we need the frame type to continue", "internal table is edited *in place*. If the 'setup' column", "This functionality is only used when building the metadata from", "TODO: In this implementation, slicing the PypeItMetaData object # will", "star foundstd = flux_calib.find_standard_file(ra, dec, check=True) b = self.type_bitmask.turn_off(b, flag='science'", "provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to instead report", "not in usrdata.keys(): raise KeyError('The user-provided table must have \\'filename\\'", "write_manual=False, configs=None): \"\"\" Write a pypeit file in data-table format.", "'all'? if configs is not None and 'all' not in", "np.unique(self['setup'].data).tolist() if 'None' in configs: configs.remove('None') # Ignore frames with", "columns are the same as those included in the pypeit", "length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod def default_keys(): return [ 'directory', 'filename',", "'.' # Read the fits headers headarr = self.spectrograph.get_headarr(ifile, strict=strict)", "msgs.newline() + 'missed by the automatic identification.') b = self.type_bitmask.turn_off(b,", "Raised if the 'calibbit' column is not defined. \"\"\" if", "match the number of files in :attr:`table`. For frames that", "1: # NOT ALLOWED!! # TODO: This should be converted", "groups. .. note:: :attr:`table` is edited in place. This function", "usrdata (astropy.table.Table, optional): Parsed for frametype for a few instruments", "has an # invalid key, at least for now the", "because the spectrograph # needs to be defined first) ofile", "from copy import deepcopy import datetime from IPython import embed", "first occurs. This is different from :func:`unique_configurations` because the latter", "msgs.warn('Cannot associate standard with science frames without sky coordinates.') else:", "== 'None' or dec == 'None': msgs.warn('RA and DEC must", "data may not be required Returns: dict: Dictionary with the", "dtype=object) for i in range(n_cfg): self['calib'][(self['setup'] == configs[i]) & (self['framebit']", "``'pypeit'``, the columns are the same as those included in", "col for t in tbl_cols])[0][0] if indx != ncol-1: tbl_cols.insert(ncol-1,", "with the data to assign to :attr:`table`. \"\"\" # Allow", "frames without sky coordinates.') else: # TODO: Do we want", "be written (this is here because the spectrograph # needs", "c, self.spectrograph): break j += 1 unique = j ==", "based on the string representation of the groups self._set_calib_group_bits() #", "self['calibbit'] # Groups have already been set if 'calib' in", "must be of type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() # Build", "sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) +", "haven't been defined. \"\"\" if 'setup' not in self.keys() or", "if col not in tbl_cols: continue indx = np.where([t ==", "\"\"\" Group calibration frames into sets. Requires the 'setup' column", "the row matches the input configuration \"\"\" # Loop on", "the data table, one should typically provide either the file", "self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False, force=False): \"\"\" Group calibration frames", "Do we need a calib file? def write_calib(self, ofile, overwrite=True,", "above gets overwritten by # this if the frames to", "setups to write!') # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,", "frames associated with the provided calibration group. Args: grp (:obj:`int`):", "is validated using checks specified by the provided spectrograph class.", "not in self.keys(): # raise ValueError('idname is not set in", "in the pypeit file. Each selected column must be a", "table return output_tbl # Always write the table in ascii", "defined; run ' 'get_frame_types.') # For each configuration, determine if", "double_alphabet cfg_indx = 0 # TODO: Placeholder: Allow an empty", "{0} frames with configuration set to None.'.format( np.sum(ignore))) self.configs =", "to which to write the table contents. rows (`numpy.ndarray`_, optional):", "pypeit import msgs from pypeit import utils from pypeit.core import", "# Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write", "output path if output_path is None: output_path = os.getcwd() #", "= {} msgs.info('All files assumed to be from a single", "in this group cfg[setup[0]][cbit] = {} for key in self.type_bitmask.keys():", "an astropy.io.table.Table instance.') if 'filename' not in usrdata.keys(): raise KeyError('The", "function: - sets the frame types based on the provided", "Ensure that configuration-defining keywords all have values that will yield", "any method outside of this class; use :func:`unique_configurations` instead. Args:", "assuming primary doc root is up one directory .. include::", "if any of the frames with # the ignored frame", "must have been defined; run ' 'get_frame_types.') # For each", ":class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A user provided set of data", "a reason why this is not an attribute of #", "written/printed if ``output == 'table'``. Otherwise, the method always returns", "framebit is not # set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0]", "return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys(): msgs.info('Setup column already", "will change! # The configuration must be present to determine", "= np.unique(self['setup'], return_index=True) ignore = uniq == 'None' if np.sum(ignore)", "strict (:obj:`bool`, optional): Function will fault if there is a", "'Setup {}'.format(self['setup'][row]) # Key names *must* match configuration_keys() for spectrographs", "and not force: return if 'setup' not in self.keys() and", "note:: :attr:`table` is edited in place. Args: columns (:obj:`list`): List", "lists all the unique instrument configurations (setups) and the frames", "as a string with comma-separated types. merge (:obj:`bool`, optional): Merge", "that is used to match to the metadata table generated", "with comma-separated types. setup (:obj:`str`): If the 'setup' columns does", "of when the frame was observed. .. todo:: - Consolidate", "table; cannot use it for file typing.') # Start msgs.info(\"Typing", "that the configuration values were # correctly assigned in the", "have defined \\'setup\\' column first; try running set_configurations.') configs =", "from pypeit.io import dict_to_lines from pypeit.par import PypeItPar from pypeit.par.util", "instead report a warning and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The", "cards mjd[mjd == None] = -99999.0 isort = np.argsort(mjd) subtbl", "column is not defined, or if `global_frames` is provided but", "and converted to a zero-filled string with two digits (the", "del cfgs[key] # Construct file ff = open(ofile, 'w') for", "Instead of writing, just return the modified table return output_tbl", "files, strict=True, usrdata=None): \"\"\" Generate the fitstbl that will be", "'all' not in _configs: use = np.isin(setups, _configs) setups =", "List of the full paths of one or more frames.", "format='mjd') def construct_basename(self, row, obstime=None): \"\"\" Construct the root name", "type of each fits file. calib_bitmask (:class:`BitMask`): The bitmask used", "I didn't add it # here, because this method is", "with a given frame type. The frames must also match", "didn't add it # here, because this method is only", ".format(meta)) # Find the frames of this type that match", "The class is used to provide the header keyword data", ":attr:`table`. For frames that have multiple types, the types should", "assigned to it: for cfg_key in _configs.keys(): in_cfg = self.table['setup']", "continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames can only be", "['all']: cfg_keys = list(cfg.keys()) else: _configs = configs if isinstance(configs,", "calibration group integer. Returns: numpy.ndarray: Boolean array selecting those frames", "the associated metadata for each. \"\"\" _cfg = deepcopy(self.configs) if", "configuration \"\"\" # Loop on keys in config match =", "self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str]) # Initialize internal attributes", "+= 1 unique = j == len(self.configs) if unique: if", "_cfg['None'] return _cfg def unique_configurations(self, force=False, copy=False, rm_none=False): \"\"\" Return", "print out a warning if there is problem try: time.Time(data['mjd'],", "attribute directly. The valid values for configuration keys is set", "global frames, types must have been defined; ' 'run get_frame_types.')", "vector was provided if rows is not None: output_tbl =", "== 0 # Select frames indx = self.type_bitmask.flagged(self['framebit'], ftype) if", "No group selected continue # Assign the group; ensure the", "empty msgs.warn('Both data and files are None in the instantiation", "> 1: # NOT ALLOWED!! # TODO: This should be", "without \\'setup\\' column; ' 'run set_configurations.') dispname = 'none' if", "radec_done = False if len(existing_keys) > 0 and match_type: for", "of the list for col in ['framebit', 'frametype', 'filename', 'directory']:", "the type names and the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for", "t['framebit'] return t def edit_frame_type(self, indx, frame_type, append=False): \"\"\" Edit", "provide an astropy.io.table.Table instance.') if 'filename' not in usrdata.keys(): raise", "# Allow some frame types to be used in all", "frametype just in the user-provided data? It may be (see", "Allow for single files _files = files if hasattr(files, '__len__')", "msgs.error('Cannot provide master key string without setup and calibbit; '", "of :attr:`configs` instead of the object itself. rm_none (:obj:`bool`, optional):", "output if ofile is not None and os.path.isfile(ofile) and not", "np.amax([ 0 if len(n) == 0 else int(n) for n", "This should probably go somewhere else or just removed. assert", "'None'. If copy is True, this is done *after* :attr:`configs`", "this. Is it related to # this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3", "with the associated metadata for each. Raises: PypeItError: Raised if", "for manual extraction configs (:obj:`str`, :obj:`list`, optional): One or more", "names. A second returned object provides the indices of the", "if obstime is None else obstime tiso = time.Time(_obstime, format='isot')", "fitstbl that will be at the heart of PypeItMetaData. Args:", "{0} '.format(ftype) + 'frames, configuration {0} does not have unique", "floating configs (e.g. grating angle) if isinstance(config[k], float): if row[k]", "' 'configuration cannot be None.') # Find the frames of", "_configs: use = np.isin(setups, _configs) setups = setups[use] indx =", "{0}; values must be None or a string.'.format( self.spectrograph.__class__.__name__) #", "write the table in ascii format with io.StringIO() as ff:", "in ignore: del cfgs[key] # Construct file ff = open(ofile,", "optional): Add additional ``PypeIt`` columns for manual extraction configs (:obj:`str`,", "Here's where we could add a SPIT option. Args: flag_unknown", "function for the writing routines. Args: ignore (:obj:`list`, optional): Ignore", "run get_frame_types.') if ftype == 'None': return self['framebit'] == 0", "for cfg_key in _configs.keys(): in_cfg = self.table['setup'] == cfg_key for", "t in tbl_cols])[0][0] if indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) #", "match user-provided metadata table. See ' 'usrdata argument of instantiation", "of each file. The metadata is validated using checks specified", "Function will fault if :func:`fits.getheader` fails to read any of", "Check if any of the configurations are not set not_setup", "edit frame_type (:obj:`str`, :obj:`list`): One or more frame types to", "unique configurations, always ignoring any 'None' # configurations... cfg =", "1 else ','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames: indx = np.where(self.find_frames(ftype))[0]", "more frame types to append/overwrite. append (:obj:`bool`, optional): Append the", "'setup' isn't been defined. \"\"\" if 'setup' not in self.keys():", "or file name. If None, the table contents are printed", "= None if configs is None else np.atleast_1d(configs) # TODO:", "used to collect the data save to each file. The", "to each file. The class is used to provide the", "existing frametype and framebit columns.') if 'frametype' in self.keys(): del", "self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types): \"\"\" Impose a set of", "to each science frame # TODO: Should this be 'standard'", "what is used to determine the calibration group of each", ".. todo:: - Here's where we could add a SPIT", "(:obj:`str`, :obj:`list`, optional): The list of files to include in", "= configs if isinstance(configs, list) else [configs] cfg_keys = [key", "rm = np.logical_not(np.isin(setups, ignore)) setups = setups[rm] indx = indx[rm]", "dictionary does not match table length.') msgs.info('Using user-provided frame types.')", "matches the input configuration Args: row (astropy.table.Row): From fitstbl config", "metadata exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \\ else", "in self.keys(): msgs.error('Cannot provide instrument setup without \\'setup\\' column; '", "configuration. The output data table is identical to the pypeit", "only contain # frames from a single configuration if len(setup)", "Unique configurations setups, indx = np.unique(self['setup'], return_index=True) if ignore is", "pypeit.par import PypeItPar from pypeit.par.util import make_pypeit_file from pypeit.bitmask import", "the calibration bit number, and the detector number is provided", "return_index=True) if ignore is not None: # Remove the selected", "the unique instrument configurations. If run before the ``'setup'`` column", "unidentified files, leave without a type and continue. user (:obj:`dict`,", "The pypeit file is the main configuration file for PypeIt,", "a listed standard, # then it is probably a standard", "beginning of each string. Ignored if ``output`` does not specify", "for :attr:`spectrograph` will be ignored in the construction of the", "0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure the dithers and combination", "isn't frametype just in the user-provided data? It may be", "we # make the default 'all'? if configs is not", "def set_frame_types(self, type_bits, merge=True): \"\"\" Set and return a Table", "in self.keys(): del self.table['framebit'] # # TODO: This needs to", "append/overwrite. append (:obj:`bool`, optional): Append the frame type. If False,", "manual if write_manual: extras += ['manual'] for key in extras:", "you to set the columns to print and which column", "Overwrite any existing file; otherwise raise an exception. header (:obj:`str`,", "This check should be done elsewhere # Check if os.path.basename(ifile)", "viable mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key", "if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for d,f", "return good = np.ones(len(self), dtype=bool) for key in cfg_limits.keys(): #", "of the observation. \"\"\" return time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row,", "configurations to ignore rm = np.logical_not(np.isin(setups, ignore)) setups = setups[rm]", "if hasattr(files, '__len__') else [files] # Build lists to fill", "\"\"\" for c,t in zip(columns, types): if c in self.keys():", "types, the types should be provided as a string with", "rows=None, columns=None, sort_col=None, overwrite=False, header=None): \"\"\" Write the metadata either", "attribute of # PypeItMetaData? def row_match_config(row, config, spectrograph): \"\"\" Queries", "be at the heart of PypeItMetaData. Args: files (:obj:`str`, :obj:`list`):", "used to set the code behavior. If not provided, the", "should be converted to an assert statement... raise ValueError('CODING ERROR:", "is provided as an argument and converted to a zero-filled", "@staticmethod def default_keys(): return [ 'directory', 'filename', 'instrume' ] def", "undefined ('None') configurations #setup = np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if", "list for col in ['framebit', 'frametype', 'filename', 'directory']: if col", "def write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\" Write the", "of a science image } } } #_det = np.arange(self.spectrograph.ndet)+1", "provide the header keyword data to include in the table", "of files to include in the table. data (table-like, optional):", "the same # calibration group; this needs to have dtype=object,", "headers or the data directly. If neither are provided the", "= np.isin(setups, _configs) setups = setups[use] indx = indx[use] return", "place. This function can be used to initialize columns that", "include in the output file. Can be provided as a", "good &= indx if np.all(good): # All values good, so", "of metadata keys to use to construct the configuration. If", "def __getitem__(self, item): return self.table.__getitem__(item) def __setitem__(self, item, value): return", "on specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str]) #", "of table rows. merge (:obj:`bool`, optional): Merge the types and", "names as these are the root of the # MasterFrames", "multiple calibration groups. Args: row (:obj:`int`): The 0-indexed row used", "that do not are removed from :attr:`table`, meaning this method", "overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\" Write the sorted file. The", "Using idname above gets overwritten by # this if the", "flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) # Loop over the frame types", "\"\"\" if not append: self['framebit'][indx] = 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx],", "identifier. Ignores other inputs. Raises: PypeItError: Raised if none of", "subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] # Write the file make_pypeit_file(ofiles[j],", "it will just add the column anyway, with the type", "not # set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] =", "Write it ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self,", "may be something to put in the relevant spectrograph class.", "the calibration group bits. table (:class:`astropy.table.Table`): The table with the", "there are list of frame types to ignore but the", "# Write the file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) +", "PypeItError: Raised if the 'calibbit' column is not defined. \"\"\"", "associated with a specific frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO:", "dtype=bool) for ftype in ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx =", "to do these gymnastics. Or better yet, just stop #", "by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will be ignored in", "not overwrite: msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) #", "ascii format with io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines =", "data['directory'][idx]: data['directory'][idx] = '.' # Read the fits headers headarr", "of strings with the frame types to use in all", "use in all calibration groups (e.g., ['bias', 'dark']). default (:obj:`bool`,", "output directory does not exist, it is created. cfg_lines (:obj:`list`,", "binning, 'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only", "return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits,", "b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if", "an array of indices or a boolean array of the", "is not None: # Select frames in the same calibration", "detector number(s). If a tuple, it must include detectors designated", "'None' if np.sum(ignore) > 0: msgs.warn('Ignoring {0} frames with configuration", "generated within PypeIt. **Note**: This is ignored if `data` is", "# for yaml # Skip this group if ignore is", "configs.remove('None') # Ignore frames with undefined configurations n_cfg = len(configs)", "for c,t in zip(columns, types): if c in self.keys(): self.table[c]", "'bkg_id' not in self.keys(): self['bkg_id'] = -1 if assign_objects and", "cfg = _cfg # Iterate through the calibration bit names", "of the first occurrence of these setups, if requested. Raises:", "this type without a # configuration indx = (self.table['setup'] ==", "file output. .. todo:: - This is for backwards compatibility,", "continue # Find the unique values of meta for this", "the name of an ascii file to which to write", "defined, or if `global_frames` is provided but the frame types", "'comb_id', 'bkg_id']: if col not in tbl_cols: continue indx =", "top of the file, on string per file line; ``#", "None!) msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):", "in string.ascii_uppercase for str_j in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) +", "return self.set_frame_types(type_bits, merge=merge) # Loop over the frame types for", "output in [None, 'table'] else output if ofile is not", "n_calib_groups(self): \"\"\"Return the number of calibration groups.\"\"\" return None if", "None: output_tbl = output_tbl[rows] # Select and sort the data", "set of data used to supplement or overwrite metadata read", "[det] #for d in _det: # setup[skey][str(d).zfill(2)] \\ # =", "return # At this point, we need the frame type", "ValueError: mjd = np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files = filenames[mjd", "'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__) # Check that", "setup dictionary with the default format. Raises: PypeItError: Raised if", "if not os.path.isdir(odir): os.makedirs(odir) # Create the output file name", "data-table format. The pypeit file is the main configuration file", "all have values that will yield good PypeIt reductions. Any", "def sort(self, col): return self.table.sort(col) def merge(self, usrdata, match_type=True): \"\"\"", "match the data type of the `usrdata` column to the", "values specified, so we're done return good = np.ones(len(self), dtype=bool)", "(:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction Raises:", "config_only else setup def get_configuration_names(self, ignore=None, return_index=False, configs=None): \"\"\" Get", "frame type and science frame ID, if the latter is", "See :attr:`configs`. Raises: PypeItError: Raised if the 'setup' isn't defined", "files we still want this to run. # Validate, print", "No values specified, so we're done return good = np.ones(len(self),", "group, if the 'calib' column does not exist - if", "calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) for", "cbit in ignore: continue # Find the frames in this", "usrdata=None): \"\"\" Generate the fitstbl that will be at the", "if useIDname and 'idname' not in self.keys(): # raise ValueError('idname", "if there is problem try: time.Time(data['mjd'], format='mjd') except ValueError: mjd", "todo:: - Here's where we could add a SPIT option.", "correct length. Returns: list: List of the full paths of", "== 'all' or configs == ['all']: cfg_keys = list(cfg.keys()) else:", "{} for key in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) & in_group", "finalize_usr_build(self, frametype, setup): \"\"\" Finalize the build of the table", "if len(indx) == 0: msgs.error('No frames to use to define", "== 1 else ','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames: indx =", "user.items(): indx = self['filename'] == ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(','))", "\"\"\" Get the list of the unique configuration names. This", "in the data reduction. \"\"\" def __init__(self, spectrograph, par, files=None,", "the unique instrument configurations (setups) and the frames associated with", "standard star foundstd = flux_calib.find_standard_file(ra, dec, check=True) b = self.type_bitmask.turn_off(b,", "into sets. Requires the 'setup' column to have been defined.", "= 0 for c in self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph):", "_configs = None if configs is None else np.atleast_1d(configs) #", "reverse order so I can always insert at the beginning", "the processing level of the metadata table, are directory, filename,", "that the metadata are valid for this column. indx =", "within PypeIt. match_type (:obj:`bool`, optional): Attempt to match the data", "table contents. rows (`numpy.ndarray`_, optional): A boolean vector selecting the", "using unavailable keywords!'.format(k)) self.table['setup'] = 'None' nrows = len(self) for", "ngroups = 0 for i in range(len(self)): if self['calib'][i] in", "that has an # invalid key, at least for now", "frame typing into the exiting table. Returns: :obj:`astropy.table.Table`: A Table", "!= None is_None = np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl", "the rows with the associated frame type. If the index", "0 # TODO: Placeholder: Allow an empty set of configuration", "in self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph): break j += 1", "good to get around this. Is it related to #", "combination groups to be either undefined or to be unique", "Deal with floating configs (e.g. grating angle) if isinstance(config[k], float):", "existing columns. Returns: `astropy.table.Table`: Table with two columns, the frame", "ignore_frames is not None: if 'frametype' not in self.keys(): msgs.error('To", "# = {'binning': binning, 'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return", "if 'frametype' in self.keys(): del self.table['frametype'] if 'framebit' in self.keys():", "Merge with user data, if present if usrdata is not", "file, on string per file line; ``# `` is added", "? Args: row (:obj:`int`): The 0-indexed row of the frame.", "setup, calib, and calibbit. sort_col (:obj:`str`, optional): Name of the", "< spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else: # The np.all allows", "been defined; run ' 'get_frame_types.') # For each configuration, determine", "subtbl['mjd'].copy() # Deal with possibly None mjds if there were", "frame types to be used in all calibration groups #", "ignore (:obj:`list`, optional): Ignore configurations in the provided list. write_bkg_pairs", "list) else columns.split(',') badcol = [col not in all_cols for", "are from a single instrument configuration. :attr:`table` is modified in-place.", "root of the # MasterFrames and QA for icbit in", "already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Construct the setups", "self.set_frame_types(type_bits, merge=merge) # Loop over the frame types for i,", "cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy,", "find_frames will throw an exception if framebit is not #", "Make sure the calibbit column does not exist if 'calibbit'", "setup) odir = os.path.join(output_path, root) if not os.path.isdir(odir): os.makedirs(odir) #", "group indx &= self.find_calib_group(calib_ID) # Return return np.where(indx)[0] if index", "optional): A dictionary with the types designated by the user.", "subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close() # TODO: Do we need a", "proved too difficult. class PypeItMetaData: \"\"\" Provides a table and", "!= 1: msgs.warn('When setting the instrument configuration for {0} '.format(ftype)", "if unique: if cfg_indx == len(cfg_iter): msgs.error('Cannot assign more than", "= {} for i in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]]", "PypeIt. **Note**: This is ignored if `data` is also provided.", "itself. rm_none (:obj:`bool`, optional): Remove any configurations set to 'None'.", "it related to # this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # #", "they're unassigned), the combination groups are set to be unique", "names and the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed", "frametype, setup): \"\"\" Finalize the build of the table based", "> 0)] = str(i) # Allow some frame types to", "a configuration, the spectrograph defined frames that have been ignored", "maximum number # provided, regardless of whether or not a", "is used. Returns: dict: A dictionary with the metadata values", "none of the keywords in the provided configuration match with", "return t def edit_frame_type(self, indx, frame_type, append=False): \"\"\" Edit the", "the ignore_frames indx = np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames", "columns for manual extraction Returns: `numpy.ndarray`_: Array of columns to", "indx = (self.table['setup'] == 'None') & self.find_frames(ftype) if not np.any(indx):", "meta data defined, so just set all # the frames", "optional): Parsed for frametype for a few instruments (e.g. VLT)", "Returns: list: List of the full paths of one or", "\", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row, det=None,", "output to an ascii file with open(ofile, 'w') as f:", "object provides the indices of the first occurrence of these", "values.' .format(meta)) # Find the frames of this type that", "in self.keys() and 'calibbit' not in self.keys() and not force:", "header keywords specified for the provided spectrograph. It is expected", "when writing to a # file... return None def find_calib_group(self,", "'configuration cannot be None.') # Find the frames of each", "matched to the relevant science frame. Args: ftype (str): The", "been defined (see :func:`get_frame_types`), this method will fault! Args: force", "0 # Set the calibration bits for i in range(len(self)):", "to use for sorting. Args: output (:obj:`str`, optional): Output signature", "the user data in the table for key in usrdata.keys():", "class is used to provide the header keyword data to", "frames are going to be # removed msg = 'The", "better yet, just stop # producing/using the *.calib file. _cfg", "\\ 'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__) # Check", "the instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A user provided", "if len(n) == 0 else int(n) for n in self['calib'][i].replace(':',',').split(',')])", "None: if len(user.keys()) != len(self): raise ValueError('The user-provided dictionary does", "set_user_added_columns(self): \"\"\" Set columns that the user *might* add ..", "table must have a `filename` column that is used to", "file with the same name. ignore (:obj:`list`, optional): Ignore configurations", "combination groups to the set of objects (science or standard", "[None]*len(cfg_keys) for j,setup in enumerate(cfg_keys): # Create the output directory", "None. Raises: ValueError: Raised if the columns to include are", "if not np.any(indx): continue if metakey is None: # No", "# provided, regardless of whether or not a science frame", "np.where(np.logical_not(good))[0] for i in indx: msg += ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg)", "== 0: self.configs = {} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files", "write_calib(self, ofile, overwrite=True, ignore=None): \"\"\" Write the calib file. The", "Find the rows with the associated frame type. If the", "in self.keys() else self['decker'][row] slitwid = 'none' if 'slitwid' not", "angle) if isinstance(config[k], float): if row[k] is None: match.append(False) elif", ".. include common links, assuming primary doc root is up", "metadata required by PypeIt. .. include common links, assuming primary", "isinstance(configs, list) else [configs] cfg_keys = [key for key in", "have to do these gymnastics. Or better yet, just stop", "Returns: str: The root name for file output. \"\"\" _obstime", "def __init__(self, spectrograph, par, files=None, data=None, usrdata=None, strict=True): if data", "type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional):", "this function simply returns :attr:`config` (cf. ``force``). .. warning:: Any", "always insert at the beginning of the list for col", "full paths to one or more frames. Args: indx (:obj:`int`,", "and background IDs are the last # few columns ncol", "The type can be anything allowed by the instantiation of", "+ '{0} values.' .format(meta)) # Find the frames of this", "self.spectrograph.name, len(self))]) @staticmethod def default_keys(): return [ 'directory', 'filename', 'instrume'", "the configuration column (A, B, C, etc), the calibration group", "def merge(self, usrdata, match_type=True): \"\"\" Use the provided table to", "key in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) & in_group ftype_in_group =", "set to 'None'. If copy is True, this is done", "set, this simply constructs the configuration dictionary using the unique", "associated with each configuration. The output data table is identical", "frames must also match the science frame index, if it", "if 'manual' not in self.keys(): self['manual'] = '' def write_sorted(self,", "column is initialized, this function determines the unique instrument configurations", "open(ofile, 'w') as f: if header is not None: _header", "# Deal with possibly None mjds if there were corrupt", "the relevant fits file metadata used during the reduction. The", "be anything allowed by the instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`,", "use to define configurations!') # Get the list of keys", "optional): When constructing the :class:`pypeit.metadata.PypeItMetaData` object, include two columns called", "force=False): \"\"\" Group calibration frames into sets. Requires the 'setup'", "cfg in _configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] = d", "pypeit.par.util import make_pypeit_file from pypeit.bitmask import BitMask # TODO: Turn", "set, ignore anything listed in # the ignore_frames indx =", "if there are unidentified files, leave without a type and", "Include a combination of instrument-specific checks using # combinations of", "it is probably a standard star foundstd = flux_calib.find_standard_file(ra, dec,", "cfg_indx = 0 # TODO: Placeholder: Allow an empty set", "use for sorting. Args: output (:obj:`str`, optional): Output signature or", "any # undefined ('None') configurations #setup = np.unique(self['setup'][in_group]).tolist() setup =", "given calibration group. .. todo:: - This is for backwards", "validated using checks specified by the provided spectrograph class. For", "os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])] def", "or 'calibbit' columns haven't been defined. \"\"\" if 'setup' not", "setups, if requested. Raises: PypeItError: Raised if the 'setup' isn't", "as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] # Write the", "meta for this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn the", "True if the row matches the input configuration \"\"\" #", "# Groups have been set but the bits have not", "\"\"\" Construct the MJD of when the frame was observed.", "For frames that have multiple types, the types should be", "in ascii format with io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines", "if :attr:`configs` has not yet been defined. copy (:obj:`bool`, optional):", "calib file? def write_calib(self, ofile, overwrite=True, ignore=None): \"\"\" Write the", "# Impose types on specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int,", "Set the calibration group bit based on the string values", "unique instrument configurations by finding unique combinations of the items", "None mjds if there were corrupt header cards mjd[mjd ==", "' 'run set_configurations.') dispname = 'none' if 'dispname' not in", "frame type. The frames must also match the science frame", "of objects (science or standard frames) to a unique integer.", "to each instrument. Args: output_path (:obj:`str`, optional): Root path for", "'bkg_id']: if col not in tbl_cols: continue indx = np.where([t", "on keys in config match = [] for k in", "# Assign everything from the same configuration to the same", "= framematch.FrameTypeBitMask() # Build table self.table = table.Table(data if files", "mostly a convenience function for the writing routines. Args: ignore", "if grp is None: # No group selected continue #", "an instrument configuration - This is primarily used for QA", "is a problem with the reading the header for any", "(:obj:`bool`, optional): Function will fault if :func:`fits.getheader` fails to read", "must not be None for file:' + msgs.newline() + f)", "it is created. cfg_lines (:obj:`list`, optional): The list of configuration", "One or more frame types to append/overwrite. append (:obj:`bool`, optional):", "self['filename'] == ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge)", "is False and the file exists. \"\"\" # Check the", "if 'dichroic' not in self.keys() else self['dichroic'][row] decker = 'none'", "set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row): \"\"\"", "= ftype_colmA fbits_colm = table.Column(type_bits, name='framebit') t = table.Table([ftype_colm, fbits_colm])", "troublesome # character from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value)", "'None']: # No information, keep going continue # Convert to", "and `bkg_id` that identify object and background frame pairs. write_manual", "is None and files is None: # Warn that table", "key is the combination of the configuration, the calibration group,", "in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra == 'None' or", "code behavior. files (:obj:`str`, :obj:`list`, optional): The list of files", "is done *after* :attr:`configs` is copied to a new dictionary.", "'None' in setup: setup.remove('None') # Make sure that each calibration", "is different from :func:`unique_configurations` because the latter determines and provides", "# group if 'setup' not in self.keys(): msgs.error('Must have defined", "] def keys(self): return self.table.keys() def sort(self, col): return self.table.sort(col)", "Attempt to match the data type in `usrdata` to the", "to overwrite.') # Check the rows input if rows is", "(e.g., ['A','C']). Returns: numpy.array: The list of unique setup names.", "None else configs for k, cfg in _configs.items(): if len(set(cfg.keys())", "not # unique for this configuration. if uniq_meta.size != 1:", "frames of each type in this group cfg[setup[0]][cbit] = {}", "'framebit' not in self.keys(): msgs.error('Frame types are not set. First", "can be used to set the frame type of each", "None: # No matching meta data defined, so just set", "that it is unaltered output_tbl = self.table.copy() # Select the", "All are set, so we're done return # Some frame", "in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot write calibration", "ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) # Loop", "pypeit files. If None, set to current directory. If the", "bitmask with the frame types. The length must match the", ":obj:`dict`: A nested dictionary, one dictionary per configuration with the", "yet. \"\"\" # Set the default if requested and 'calib'", "type by hand. Args: indx (:obj:`int`): The 0-indexed row in", "this appropriate for NIR data? \"\"\" is_science = self.find_frames('science') for", "False and the file exists. \"\"\" # Check the file", "to match to the metadata table generated within PypeIt. match_type", "None: # Remove the selected configurations to ignore rm =", "MasterFrames to be used with multiple calibration groups. Args: row", "specific to each instrument. Args: output_path (:obj:`str`, optional): Root path", "_configs = configs if isinstance(configs, list) else [configs] cfg_keys =", "type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) # Loop over", "for each fits file to use in the data reduction.", "(:class:`astropy.time.Time`, optional): The MJD of the observation. If None, constructed", "for ftype in global_frames: indx = np.where(self.find_frames(ftype))[0] for i in", "= meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype # Deal with None's", ":func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None and the frame types have", "in the table to edit frame_type (:obj:`str`, :obj:`list`): One or", "__init__(self, spectrograph, par, files=None, data=None, usrdata=None, strict=True): if data is", "the table is instantiated without any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):", "= indx[use] return setups, indx if return_index else setups def", "output_path (:obj:`str`, optional): Root path for the output pypeit files.", "PypeItMetaData. Args: files (:obj:`str`, :obj:`list`): One or more files to", "on the relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find", "in the table. The type can be anything allowed by", "existing file; otherwise raise an exception. header (:obj:`str`, :obj:`list`, optional):", "99). Using the calibration bit in the keyword allows MasterFrames", "existing data type. If it can't it will just add", "not append: self['framebit'][indx] = 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx]", "fbits_colm = table.Column(type_bits, name='framebit') t = table.Table([ftype_colm, fbits_colm]) if merge:", "an integer array if index=True, with the rows that contain", "# the frames to this (first) configuration self.table['setup'][indx] = cfg_key", "table rows. merge (:obj:`bool`, optional): Merge the types and bits", "output_tbl[tbl_cols] if output == 'table': # Instead of writing, just", "= len(configs) # TODO: Science frames can only have one", "strings will be truncated at 4 characters. self.table['calib'] = np.full(len(self),", "not be None for file:' + msgs.newline() + f) msgs.warn('The", "checks specified by the provided spectrograph class. For the data", "do not are removed from :attr:`table`, meaning this method may", "if there are # empty or corrupt files we still", "is not None and 'all' not in _configs: use =", "existing file with the same name. ignore (:obj:`list`, optional): Ignore", "If the 'comb_id' or 'bkg_id' columns do not exist, they're", "is None: # No matching meta data defined, so just", "key in usrdata.keys(): self.table[key] = usrdata[key][srt] def finalize_usr_build(self, frametype, setup):", "for k in _cfg_keys} def master_key(self, row, det=1): \"\"\" Construct", "set_configurations.') configs = np.unique(self['setup'].data).tolist() if 'None' in configs: configs.remove('None') #", "relevant metadata for each fits file to use in the", "done *after* :attr:`configs` is copied to a new dictionary. Returns:", "'frametype' not in self.keys(): msgs.error('To set global frames, types must", "self['setup'] = fill return _configs = self.unique_configurations() if configs is", "= self.find_frames('science') for i in range(len(self)): if not is_science[i]: continue", "occurrence of these setups, if requested. Raises: PypeItError: Raised if", "to provide the header keyword data to include in the", "the first file to set the first unique configuration self.configs", "{}'.format(self['setup'][row]) # Key names *must* match configuration_keys() for spectrographs setup", "message msg = 'Time invalid for {0} files.\\n'.format(len(bad_files)) msg +=", "#for d in _det: # setup[skey][str(d).zfill(2)] \\ # = {'binning':", "instrument configurations (setups) and the frames associated with each configuration.", "the fitstbl matches the input configuration Args: row (astropy.table.Row): From", "Otherwise, the string is interpreted as the name of an", "with floating configs (e.g. grating angle) if isinstance(config[k], float): if", "is for backwards compatibility, but we should consider reformatting it.", "typing.') # Start msgs.info(\"Typing files\") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) #", "= 'none' if 'decker' not in self.keys() else self['decker'][row] slitwid", "good PypeIt reductions. Any frames that do not are removed", "so that it is unaltered output_tbl = self.table.copy() # Select", "sorted file lists all the unique instrument configurations (setups) and", "This currently only checks that the science frames are associated", "the default parameters specific to the provided spectrograph are used.", "one or more frames. \"\"\" if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx],", "f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra", "unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None and the", "defined; ' 'run get_frame_types.') calibs = '0' if n_cfg ==", "['bias', 'dark']). default (:obj:`bool`, optional): If the 'calib' column is", "set must be of type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() #", "cfg_keys=None): \"\"\" Return the configuration dictionary for a given frame.", "with the provided calibration group. Args: grp (:obj:`int`): The calibration", "the 'setup' isn't defined and split is True. Returns: :obj:`list`:", "set global frames, types must have been defined; ' 'run", "0 for c in self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph): break", "t['frametype'] self['framebit'] = t['framebit'] return t def edit_frame_type(self, indx, frame_type,", "optional): A list of strings with the frame types to", "specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str]) # Initialize", "return self.table.__setitem__(item, value) def __len__(self): return self.table.__len__() def __repr__(self): return", "data type. If it can't it will just add the", "exist yet if 'calib' not in self.keys() and default: self['calib']", "match the science frame index, if it is provided. Args:", "User data (for frame type) if usrdata is None: usr_row", "Returns: numpy.ndarray: Boolean array selecting those frames in the table", "already set. Finding unique configurations.') uniq, indx = np.unique(self['setup'], return_index=True)", "# Set the default if requested and 'calib' doesn't exist", "sort by {sort_col}. Not a valid column.') # Ignore any", "cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] #", "and the row index where it first occurs. This is", "to a new dictionary. Returns: :obj:`dict`: A nested dictionary, one", "output_tbl # Always write the table in ascii format with", "DataContainer # Initially tried to subclass this from astropy.table.Table, but", "to construct the configuration. If None, the `configuration_keys` of :attr:`spectrograph`", "= d # Check if any of the configurations are", "here? Can't `configs is # None` mean that you want", "of a boolean array. Returns: numpy.ndarray: A boolean array, or", "_configs] if len(cfg_keys) == 0: msgs.error('No setups to write!') #", "= list(self.keys()) elif columns == 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else:", "self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return return ofiles", "grp (:obj:`int`): The calibration group integer. Returns: numpy.ndarray: Boolean array", "on the string representation of the groups self._set_calib_group_bits() # Check", "and files is None: # Warn that table will be", "do these gymnastics. Or better yet, just stop # producing/using", "if 'setup' not in self.keys(): msgs.error('Cannot write sorted instrument configuration", "types to be used in all calibration groups # (like", "is None: usr_row = None else: # TODO: This check", "file typing.') # Start msgs.info(\"Typing files\") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype())", "For now, check that the configuration values were # correctly", "dtype=bool) for key in cfg_limits.keys(): # NOTE: For now, check", "# TODO: We should edit the relevant follow-on code so", "valid. FileExistsError: Raised if overwrite is False and the file", "= files if hasattr(files, '__len__') else [files] # Build lists", "valid_configuration_values is not correctly defined ' \\ 'for {0}; values", "# Find the frames of each type in this group", "def _set_calib_group_bits(self): \"\"\" Set the calibration group bit based on", "the column to use for sorting the output. If None,", "validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the", "# Loop on keys in config match = [] for", "user-provided data, typically pulled from the PypeIt file. This function:", "this configuration indx = self['setup'] == setup if not np.any(indx):", "Should this be 'standard' or 'science' or both? if 'ra'", "self.keys(): # raise ValueError('idname is not set in table; cannot", "defined ' \\ 'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)", "calibration groups to be reconstructed if the 'calib' column already", "set to None.'.format( np.sum(ignore))) self.configs = {} for i in", "a string with comma-separated types. setup (:obj:`str`): If the 'setup'", "for key in cfgs.keys(): if key in ignore: del cfgs[key]", "not force: return if 'setup' not in self.keys() and fill", "calib, comb_id and bkg_id write_manual (:obj:`bool`, optional): Add additional ``PypeIt``", "the code behavior. If not provided, the default parameters specific", "calibbit; ' 'run set_configurations and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return", "if return_index else setups def _get_cfgs(self, copy=False, rm_none=False): \"\"\" Convenience", "{sort_col}. Not a valid column.') # Ignore any NoneTypes indx", "Ignore configurations in the provided list. return_index (:obj:`bool`, optional): Return", "that will yield good PypeIt reductions. Any frames that do", "is up one directory .. include:: ../include/links.rst \"\"\" import os", "# their MJD. This is the desired behavior since if", "t in tbl_cols])[0][0] if indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx)) #", "= spectrograph self.par = par if not isinstance(self.par, PypeItPar): raise", "some of the frames are going to be # removed", "output data table is identical to the pypeit file output.", ":class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the string 'None', this returns all", "_files = files if hasattr(files, '__len__') else [files] # Build", "ff: subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] # Write the file", "You can avoid this step by setting `match_type=False`. Args: usrdata", "number, and the detector number is provided as an argument", "obstime=None): \"\"\" Construct the root name primarily for PypeIt file", "global_frames: indx = np.where(self.find_frames(ftype))[0] for i in indx: self['calib'][i] =", "# Initialize internals self.spectrograph = spectrograph self.par = par if", "value of the dictionary, respectively. The number of keys therefore", "TODO: For now, use this assert to check that the", "The frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID", "will throw an exception if framebit is not # set...", "the table. data (table-like, optional): The data to include in", "unique: if cfg_indx == len(cfg_iter): msgs.error('Cannot assign more than {0}", "ID is the same as included in the configuration column", "merge=merge) # Loop over the frame types for i, ftype", "to include in the table and specify any validation checks.", "the selected calibration group. Raises: PypeItError: Raised if the 'calibbit'", "def finalize_usr_build(self, frametype, setup): \"\"\" Finalize the build of the", "if 'dispangle' not in self.keys() else self['dispangle'][row] dichroic = 'none'", "the associated metadata for each. Raises: PypeItError: Raised if there", "deepcopy(cfg[setup]) cfg = _cfg # Iterate through the calibration bit", "letters: A, B, C, etc. double_alphabet = [str_i + str_j", "have been printed/written to disk is returned. Otherwise, the string", "a list directly or as a comma-separated string. If None", "= np.amax([ 0 if len(n) == 0 else int(n) for", "the index is provided, the frames must also be matched", "(the maximum number of detectors is 99). Using the calibration", "!= 0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure the dithers and", "should only be run if all files are from a", "= filenames[mjd == None] # Print status message msg =", "if the 'calib' column does not exist - if the", "msgs.info('Using metadata to determine unique configurations.') # If the frame", "usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and '#' in", "self['ra'][indx], self['dec'][indx]): if ra == 'None' or dec == 'None':", "data by a given column if sort_col is not None:", "primary doc root is up one directory .. include:: ../include/links.rst", "types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will be", "dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row]", "The np.all allows for arrays in the Table (e.g. binning)", "calibbit column does not exist if 'calibbit' in self.keys(): del", "frame type name and bits. \"\"\" # Making Columns to", "configurations... cfg = self.unique_configurations(copy=True, rm_none=True) # Get the setups to", "= indx[use] if len(indx) == 0: msgs.error('No frames to use", "any of the other files show a different # configuration.", "use for sorting is not valid. FileExistsError: Raised if overwrite", "= self.unique_configurations(copy=True, rm_none=True) # TODO: We should edit the relevant", "the allowed frame types. \"\"\" # Checks if 'frametype' in", "the frame types to use in all calibration groups (e.g.,", "if not flag_unknown: msgs.error(\"Check these files before continuing\") # Finish", "# needs to be defined first) ofile = None if", "== cfg_key for ftype, metakey in ignore_frames.items(): # TODO: For", "else: # TODO: Do we want to do this here?", "directory root = '{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path, root) if", "zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra == 'None' or dec", "be from a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use", "table, coordinates, time, units from pypeit import msgs from pypeit", ".. todo:: - This is for backwards compatibility, but we", "Checks if 'frametype' in self.keys() or 'framebit' in self.keys(): msgs.warn('Removing", "Is there a reason why this is not an attribute", "in enumerate(self.type_bitmask.keys()): # # Initialize: Flag frames with the correct", "value for float meta (e.g. dispangle) Returns: bool: True if", "are overwitten by the provided type. \"\"\" if not append:", "types to append/overwrite. append (:obj:`bool`, optional): Append the frame type.", "to the same # calibration group; this needs to have", "\"\"\" Assign each frame to a configuration (setup) and include", "= self.table[c].astype(t) def _build(self, files, strict=True, usrdata=None): \"\"\" Generate the", "this to run. # Validate, print out a warning if", "== 'None') & self.find_frames(ftype) if not np.any(indx): continue if metakey", "with the same configuration. .. todo:: - Maintain a detailed", "msgs.warn('The above file could be a twilight flat frame that", "for icbit in np.unique(self['calibbit'].data): cbit = int(icbit) # for yaml", "compatibility, but we should consider reformatting/removing it. Args: ofile (:obj:`str`):", "defined first) ofile = None if output in [None, 'table']", "all_cols = list(self.keys()) tbl_cols = columns if isinstance(columns, list) else", "in tbl_cols])[0][0] if indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy", "in [None, 'table'] else output if ofile is not None", "[str_i + str_j for str_i in string.ascii_uppercase for str_j in", "file lists all the unique instrument configurations (setups) and the", "which should nominally follow an execution of # pypeit_setup. If", "force (:obj:`bool`, optional): Force the configurations to be reset. fill", "the table to edit frame_type (:obj:`str`, :obj:`list`): One or more", "simply returns :attr:`config` (cf. ``force``). .. warning:: Any frame types", "group #in_group = self.find_calib_group(i) in_cbit = self['calibbit'] == cbit #", "selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific to each", "should consider reformatting/removing it. - This is complicated by allowing", "file name to the table data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if", "specified, so we're done return good = np.ones(len(self), dtype=bool) for", "reason why this is not an attribute of # PypeItMetaData?", "Args: ignore (:obj:`list`, optional): Ignore configurations in the provided list.", "# Define the bitmask and initialize the bits self.calib_bitmask =", "fits table is dictated by the header keywords specified for", "overwrite=True, ignore=None): \"\"\" Write the calib file. The calib file", "with a valid letter identifier; i.e., the ' 'configuration cannot", "column has been set, this simply constructs the configuration dictionary", "include common links, assuming primary doc root is up one", "if index else indx def find_frame_files(self, ftype, calib_ID=None): \"\"\" Return", "included in the fitstbl (nearly the complete list). Args: write_bkg_pairs", "column in `usrdata`, the function will try to match the", "pypeit file output. .. todo:: - This is for backwards", "= columns if isinstance(columns, list) else columns.split(',') badcol = [col", "configuration. if uniq_meta.size != 1: msgs.warn('When setting the instrument configuration", "The data to include in the table. The type can", "add it # here, because this method is only called", "self['calib'] = '0' # Make sure the calibbit column does", "the setup. det (:obj:`int`, optional): The 1-indexed detector to include.", "with the frames to return. Can be an array of", "= len(tbl_cols) for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id',", "ignore_frames.items(): # TODO: For now, use this assert to check", "occurs. This is different from :func:`unique_configurations` because the latter determines", "the file in the provided row. The master key is", "usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done = True else:", "par, files=None, data=None, usrdata=None, strict=True): if data is None and", "maximum number of detectors is 99). Using the calibration bit", "configurations in the provided list. write_bkg_pairs (:obj:`bool`, optional): Add additional", "``configuration_keys`` method. If run after the ``'setup'`` column has been", "and algorithmic parameters and listing the data files to read.", "spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data save", "same # calibration group; this needs to have dtype=object, otherwise", "' 'get_frame_types.') # For each configuration, determine if any of", "optional): Force the configurations to be redetermined. Otherwise the configurations", "the configuration dictionary using the unique configurations in that column.", "Rest # Allow for str RA, DEC (backwards compatability) if", "method may modify that attribute directly. The valid values for", "= '' def write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\"", "in self['filename'][indx]: msgs.info(f) if not flag_unknown: msgs.error(\"Check these files before", "self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False,", "# Get the paths in_cfg = self['setup'] == setup if", "If None, all rows are written. Shape must match the", "the `framebit` column is not set in the table. \"\"\"", "during the reduction. The content of the fits table is", "set_configurations.') dispname = 'none' if 'dispname' not in self.keys() else", "unique values of meta for this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data)", "screen. The method allows you to set the columns to", "provided list. Raises: PypeItError: Raised if the 'setup' or 'calibbit'", "['None']*len(_files) data['filename'] = ['None']*len(_files) # Build the table for idx,", "type... if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype')", "(:obj:`int`): The calibration group integer. Returns: numpy.ndarray: Boolean array selecting", "If neither are provided the table is instantiated without any", "& (self['framebit'] > 0)] = str(i) # Allow some frame", "= BitMask(np.arange(ngroups)) self['calibbit'] = 0 # Set the calibration bits", "be the same as returned by the spectrograph `configuration_keys` method.", "group if 'setup' not in self.keys(): msgs.error('Must have defined \\'setup\\'", "configuration - This is primarily used for QA now; but", "the data directly. If neither are provided the table is", "or | ? Using idname above gets overwritten by #", "the same as included in the configuration column (A, B,", "0-indexed row in the table to edit frame_type (:obj:`str`, :obj:`list`):", "setup (:obj:`str`): If the 'setup' columns does not exist, fill", "function can be used to initialize the combination group and", "(:obj:`bool`, optional): If all of 'comb_id' values are less than", "ra == 'None' or dec == 'None': msgs.warn('RA and DEC", "np.any(badcol): raise ValueError('The following columns are not valid: {0}'.format( ',", "itself. Returns: dict: The pypeit setup dictionary with the default", "headers:\\n' for file in bad_files: msg += ' {0}\\n'.format(file) msgs.warn(msg)", ":func:`unique_configurations` instead. Args: copy (:obj:`bool`, optional): Return a deep copy", "# Checks if 'frametype' in self.keys() or 'framebit' in self.keys():", "setting the instrument configuration for {0} '.format(ftype) + 'frames, configuration", "os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx] = '.' # Read the", "# For each configuration, determine if any of the frames", "its current state. overwrite (:obj:`bool`, optional): Overwrite any existing file;", "i in indx: self['calib'][i] = calibs # Set the bits", "if not np.any(not_setup): # All are set, so we're done", "TODO: Use & or | ? Using idname above gets", "or a boolean array of the correct length. Returns: list:", "msgs.error('No setups to write!') # Grab output columns output_cols =", "Check that the groups are valid self._check_calib_groups() def find_frames(self, ftype,", "!= len(self): raise ValueError('The user-provided dictionary does not match table", "the provided list. return_index (:obj:`bool`, optional): Return row indices with", "for t in tbl_cols])[0][0] if indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx))", "to keep track of the calibration group bits. table (:class:`astropy.table.Table`):", "if the columns to include are not valid, or if", "calibration groups # (like biases and darks) if global_frames is", "the output pypeit files. If None, set to current directory.", "at 4 characters. self.table['calib'] = np.full(len(self), 'None', dtype=object) for i", "'table'``. Otherwise, the method always returns None. Raises: ValueError: Raised", "for i in range(len(self)): # Convert the string to the", "fill the configuration setup columns with this single identifier. Ignores", "if self.configs is not None and not force: return self._get_cfgs(copy=copy,", "overwrite: raise FileExistsError(f'{ofile} already exists; set flag to overwrite.') #", "going continue # Convert to a list of numbers l", "= [ os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write", "msgs.error('Cannot write calibration groups without \\'setup\\' and \\'calibbit\\' ' 'columns;", "empty!') # Initialize internals self.spectrograph = spectrograph self.par = par", "format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] # Write the file make_pypeit_file(ofiles[j], self.spectrograph.name,", "column first; try running set_configurations.') configs = np.unique(self['setup'].data).tolist() if 'None'", "still done return # At this point, we need the", "# flagging all as true # indx = self['idname'] ==", "of strings (e.g., ['A','C']). See :attr:`configs`. Raises: PypeItError: Raised if", "for ftype in ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx = indx[use]", "the latter determines and provides the configurations themselves. This is", "could be a twilight flat frame that was' + msgs.newline()", "'' def write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\" Write", "to check that the # metakey is either not set", "= self.table[good] def _set_calib_group_bits(self): \"\"\" Set the calibration group bit", "root is up one directory .. include:: ../include/links.rst \"\"\" import", "above. Raises: TypeError: Raised if `usrdata` is not an `astropy.io.table.Table`", "table. \"\"\" meta_data_model = meta.get_meta_data_model() # Check the input if", "TODO: Is there a reason why this is not an", "# Get the setup lines setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])},", "set_configurations and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self,", "of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A user provided set of", "the # data was read from a pypeit file) if", "is printed in its current state. overwrite (:obj:`bool`, optional): Overwrite", "= output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols] if output == 'table':", "if 'frametype' not in self.keys(): msgs.error('To ignore frames, types must", "configuration to the same # calibration group; this needs to", "None when writing to a # file... return None def", "\\'setup\\' and \\'calibbit\\' ' 'columns; run set_configurations and set_calibration_groups.') if", "row, det=1): \"\"\" Construct the master key for the file", "# configurations... cfg = self.unique_configurations(copy=True, rm_none=True) # Get the setups", "the Table (e.g. binning) match.append(np.all(config[k] == row[k])) # Check return", "must include detectors designated as a viable mosaic for :attr:`spectrograph`;", "are correctly ordered srt = [np.where(f == self.table['filename'])[0][0] for f", "set to be unique for each standard and science frame.", "configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn the user that the", "[None, 'table'] else output if ofile is not None and", "coordinates, time, units from pypeit import msgs from pypeit import", "science frame ID, if the latter is provided. \"\"\" return", "raised when some frames cannot be assigned to a configuration,", "the configuration values were # correctly assigned in the spectrograph", "the keywords in the provided configuration match with the metadata", "sky coordinates.') else: # TODO: Do we want to do", "file to use in the data reduction. \"\"\" def __init__(self,", "msgs.warn('Removing existing frametype and framebit columns.') if 'frametype' in self.keys():", "configuration should be the same as returned by the spectrograph", ":attr:`table` is modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: -", "# MasterFrames and QA for icbit in np.unique(self['calibbit'].data): cbit =", "calibration group; this needs to have dtype=object, otherwise # any", "if the latter is provided. \"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def", "to include in the table. data (table-like, optional): The data", "to use to construct the configuration. cfg_keys (:obj:`list`, optional): The", "column is not set in the table. \"\"\" if 'framebit'", "Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data", "# removed msg = 'The following frames have configurations that", ":attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional): A list of strings with", "is None else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False, force=False): \"\"\"", "of the logic. The 'calib' column has a string type", "and 'None' in _cfg.keys(): del _cfg['None'] return _cfg def unique_configurations(self,", "be used to set the frame type of each file.", "background IDs are the last # few columns ncol =", "or standard frames) to a unique integer. If the 'comb_id'", "# Find unique configurations, always ignoring any 'None' # configurations...", "dtype=int) + 1 def set_user_added_columns(self): \"\"\" Set columns that the", "\"\"\" if 'comb_id' not in self.keys(): self['comb_id'] = -1 if", "types to ignore but the frame types have not been", "whether or not a science frame is # assigned to", "ValueError('The following columns are not valid: {0}'.format( ', '.join(tbl_cols[badcol]))) #", "Convert to a list of numbers l = np.amax([ 0", "paths = np.unique(self['directory'][in_cfg]).tolist() # Get the data lines subtbl =", "configs (:obj:`dict`, optional): A nested dictionary, one dictionary per configuration", "Merge the types and bits into the existing table. This", "Append the frame type. If False, all existing frame types", "a warning and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used", "rows input if rows is not None and len(rows) !=", "it matches with what can be read from the pypeit", "instrument configuration - This is primarily used for QA now;", "pulled from the PypeIt file. This function: - sets the", "msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Construct the", "called above if user is not None!) msgs.info(\"Typing completed!\") return", "data was read from a pypeit file) if 'calib' in", "sure the calibbit column does not exist if 'calibbit' in", "of columns to be included in the fitstbl (nearly the", "pairs. write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual", "have unique '.format(cfg_key) + '{0} values.' .format(meta)) # Find the", "frames in the same calibration group indx &= self.find_calib_group(calib_ID) #", "write_manual=False): \"\"\" Generate the list of columns to be included", "if the `framebit` column is not set in the table.", "all as true # indx = self['idname'] == self.spectrograph.idname(ftype) if", "data are correctly ordered srt = [np.where(f == self.table['filename'])[0][0] for", "name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) # Get the setup lines", "MJD of when the frame was observed. .. todo:: -", "pypeit file is the main configuration file for PypeIt, configuring", "tuple, it must include detectors designated as a viable mosaic", ".. todo:: - Maintain a detailed description of the logic.", "= subtbl['mjd'].copy() # Deal with possibly None mjds if there", "or configs == ['all']: cfg_keys = list(cfg.keys()) else: _configs =", "of PypeItMetaData.' ' The table will be empty!') # Initialize", "and cbit in ignore: continue # Find the frames in", "calib_bitmask (:class:`BitMask`): The bitmask used to keep track of the", "include:: ../include/links.rst \"\"\" import os import io import string from", "None else obstime tiso = time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value,", "setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return return ofiles def write(self, output=None,", "user edits back in a frame that has an #", ":func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to instead report a warning and", "out a warning if there is problem try: time.Time(data['mjd'], format='mjd')", "indx: self['calib'][i] = calibs # Set the bits based on", "be required Returns: dict: Dictionary with the data to assign", "the fitstbl (nearly the complete list). Args: write_bkg_pairs (:obj:`bool`, optional):", "is included. write_bkg_pairs (:obj:`bool`, optional): When constructing the :class:`pypeit.metadata.PypeItMetaData` object,", "the column in `usrdata`, the function will try to match", "each frame; see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional): A list", "import embed import numpy as np import yaml from astropy", "Raised if `filename` is not a key in the provided", "provided spectrograph class. For the data table, one should typically", "therefore *must* match the number of files in :attr:`table`. For", "rm_none=False): \"\"\" Convenience method to return :attr:`configs` with possible alterations.", "are not ' \\ 'correctly defined for {0}; values must", "slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row]", "uniq_meta.size != 1: msgs.warn('When setting the instrument configuration for {0}", "# NOTE: For now, check that the configuration values were", "all the frames associated with the provided calibration group. Args:", "the below to not crash if some files have None", "if np.any(badcol): raise ValueError('The following columns are not valid: {0}'.format(", "= np.logical_not(np.isin(setups, ignore)) setups = setups[rm] indx = indx[rm] #", "keywords specified for the provided spectrograph. It is expected that", "== col for t in tbl_cols])[0][0] if indx != 0:", "== 'table'``. Otherwise, the method always returns None. Raises: ValueError:", "\"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there a reason why", "f in usrdata['filename']] # Convert types if possible existing_keys =", "'Continuing, but the following frames may be empty or have", "Table (e.g. binning) match.append(np.all(config[k] == row[k])) # Check return np.all(match)", "Group calibration frames into sets. Requires the 'setup' column to", "in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename'] = ['None']*len(_files) # Build", "return None if self.calib_bitmask is None else self.calib_bitmask.nbits def set_calibration_groups(self,", "configs if isinstance(configs, list) else [configs] cfg_keys = [key for", "'None' usrdata[key][nones] = None # Rest # Allow for str", "of instantiation of PypeItMetaData.') usr_row = usrdata[idx] # Add the", "in self.keys(): msgs.error('To ignore frames, types must have been defined;", "(like biases and darks) if global_frames is not None: if", "sort the data by a given column if sort_col is", "= np.ones(len(self), dtype=bool) for key in cfg_limits.keys(): # NOTE: For", "and the detector number is provided as an argument and", "def keys(self): return self.table.keys() def sort(self, col): return self.table.sort(col) def", "table based on user-provided data, typically pulled from the PypeIt", "name or start by # # flagging all as true", "The 1-indexed detector to include. If None, all detectors are", "are set to be unique for each standard and science", "'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use & or |", "if 'frametype' not in self.keys(): msgs.error('To account for ignored frames,", "Add additional ``PypeIt`` columns for manual extraction configs (:obj:`str`, :obj:`list`,", "to pad string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE", "comma-separated types. setup (:obj:`str`): If the 'setup' columns does not", "time.Time(data['mjd'], format='mjd') except ValueError: mjd = np.asarray(data['mjd']) filenames = np.asarray(data['filename'])", "TODO: It would be good to get around this. Is", "None # Write the output to an ascii file with", "not None: # Remove the selected configurations to ignore rm", "optional): A boolean vector selecting the rows of the table", "metadata for {0}'.format(os.path.split(ifile)[1])) # JFH Changed the below to not", "have not been set yet. \"\"\" # Configurations have already", "extraction Returns: `numpy.ndarray`_: Array of columns to be used in", "if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] = d # Check if", "so that we # don't have to do these gymnastics.", "provided configuration match with the metadata keywords. Also raised when", "= list(cfg.keys()) else: _configs = configs if isinstance(configs, list) else", "Return an array of 0-indexed indices instead of a boolean", "'None' nrows = len(self) for i in range(nrows): for d,", "typically provide either the file list from which to grab", "in the relevant spectrograph class. Args: row (:obj:`int`): The 0-indexed", "writing routines. Args: ignore (:obj:`list`, optional): Ignore configurations in the", "= '.' # Read the fits headers headarr = self.spectrograph.get_headarr(ifile,", "table self.table = table.Table(data if files is None else self._build(files,", "be good to get around this. Is it related to", "dtype='U9', name='frametype') else: ftype_colm = ftype_colmA fbits_colm = table.Column(type_bits, name='framebit')", "self.set_user_added_columns() # Validate instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types):", "the unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to", "0 if len(n) == 0 else int(n) for n in", "more files to use to build the table. strict (:obj:`bool`,", "sorted file. overwrite (:obj:`bool`, optional): Overwrite any existing file with", "object. .. todo:: - Here's where we could add a", "fits headers headarr = self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta for", "Restrict _configs = None if configs is None else np.atleast_1d(configs)", "self.configs = {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1", "frame types. The length must match the existing number of", "metadata table (pypeit file):\\n' indx = np.where(np.logical_not(good))[0] for i in", "ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is None: # Nope, we're", "in # reverse order so I can always insert at", "= {} for setup in cfg.keys(): _cfg[setup] = {} _cfg[setup]['--']", "#_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det] #for", "single identifier. \"\"\" self.get_frame_types(user=frametype) # TODO: Add in a call", "return setups, indx if return_index else setups def _get_cfgs(self, copy=False,", "deep copy of :attr:`configs` instead of the object itself. rm_none", "overwrite=False, header=None): \"\"\" Write the metadata either to a file", "configuration-defining keywords all have values that will yield good PypeIt", "defined for {0}; values must be None or a string.'.format(", "'binning' not in self.keys() else self['binning'][row] skey = 'Setup {}'.format(self['setup'][row])", "'calibbit' column is actually what is used to determine the", "not a PypeItMetaData object. def __getitem__(self, item): return self.table.__getitem__(item) def", "{} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg = _cfg # Iterate through", "the associated frame type. If the index is provided, the", "or 'calibbit' not in self.keys(): msgs.error('Cannot provide master key string", "configs = np.unique(self['setup'].data).tolist() if 'None' in configs: configs.remove('None') # Ignore", "Edit the frame type by hand. Args: indx (:obj:`int`): The", "PypeItPar from pypeit.par.util import make_pypeit_file from pypeit.bitmask import BitMask #", "'dark']). default (:obj:`bool`, optional): If the 'calib' column is not", "A boolean vector selecting the rows of the table to", "occurence of these configurations. configs (:obj:`str`, :obj:`list`, optional): One or", "& self.find_frames(ftype) if not np.any(indx): continue if metakey is None:", "A boolean array, or an integer array if index=True, with", "if np.sum(ignore) > 0: msgs.warn('Ignoring {0} frames with configuration set", "string 'None', this returns all frames without a known type.", "# PypeIt orientation binning of a science image } }", "if `filename` is not a key in the provided table.", "be used to initialize the combination group and background group", "self['manual'] = '' def write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False):", "used to set the code behavior. files (:obj:`str`, :obj:`list`, optional):", "written. Shape must match the number of the rows in", "self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none)", "None def find_calib_group(self, grp): \"\"\" Find all the frames associated", "Raises: PypeItError: Raised if the 'setup' isn't defined and split", "not None: # Select frames in the same calibration group", "be assigned to a configuration, the spectrograph defined frames that", "And remove 'em self.table = self.table[good] def _set_calib_group_bits(self): \"\"\" Set", "tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the internal table so that it", "complete list). Args: write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns", "usrdata, match_type=True): \"\"\" Use the provided table to supplement or", "the default format. Raises: PypeItError: Raised if the 'setup' isn't", "must also match the science frame index, if it is", "frames. Args: indx (:obj:`int`, array-like): One or more 0-indexed rows", "fitstbl (nearly the complete list). Args: write_bkg_pairs (:obj:`bool`, optional): Add", "objects. If ``'all'``, pass back all configurations. Otherwise, only return", "some frame types to have no association with an instrument", "isn't defined and split is True. Returns: :obj:`list`: List of", "tbl_cols.pop(indx)) # Make sure the dithers and combination and background", "a science image } } } #_det = np.arange(self.spectrograph.ndet)+1 if", "fails to read any of the headers. Set to False", "arcmins of a listed standard, # then it is probably", "self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines =", "not provided, the default parameters specific to the provided spectrograph", "without \\'setup\\' ' 'column; run set_configurations.') if os.path.isfile(ofile) and not", "in the table. \"\"\" if 'framebit' not in self.keys(): msgs.error('Frame", "\"\"\" meta_data_model = meta.get_meta_data_model() # Check the input if not", "== 'None' usrdata[key][nones] = None # Rest # Allow for", "usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done = True", "for key in cfg.keys() if key in _configs] if len(cfg_keys)", "and combination and background IDs are the last # few", "the metadata table generated within PypeIt. **Note**: This is ignored", "value) def __len__(self): return self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n',", "This method *should not* be called by any method outside", "metadata. If the internal table already contains the column in", "self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name), '", "\"\"\" Return the unique instrument configurations. If run before the", "not None: if sort_col not in self.keys(): raise ValueError(f'Cannot sort", "identifiers are iterations through the # upper-case letters: A, B,", "of files in :attr:`table`. For frames that have multiple types,", "= 0 # TODO: Placeholder: Allow an empty set of", "return self.table.__getitem__(item) def __setitem__(self, item, value): return self.table.__setitem__(item, value) def", "Return return ofiles def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False,", "type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() # Build table self.table =", "logic. The 'calib' column has a string type to make", "be ignored in the construction of the unique configurations. If", "a few instruments (e.g. VLT) where meta data may not", "the 'comb_id' or 'bkg_id' columns do not exist, they're set", "For the data table, one should typically provide either the", "to initialize the combination groups to the set of objects", "this group cfg[setup[0]][cbit] = {} for key in self.type_bitmask.keys(): #ftype_in_group", "defined yet. \"\"\" if self.configs is not None and not", "return time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row, obstime=None): \"\"\" Construct the", "vector selecting the rows of the table to write. If", "Add additional ``PypeIt`` columns for manual extraction Returns: `numpy.ndarray`_: Array", "not in self.keys(): msgs.error('Frame types are not set. First run", "len(self.configs) if unique: if cfg_indx == len(cfg_iter): msgs.error('Cannot assign more", "are list of frame types to ignore but the frame", "of types on certain columns. .. note:: :attr:`table` is edited", "# Build the table for idx, ifile in enumerate(_files): #", "data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx] = '.'", "observation. \"\"\" return time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row, obstime=None): \"\"\"", "list of metadata keys to use to construct the configuration.", "detectors designated as a viable mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`.", "this method will fault! Args: force (:obj:`bool`, optional): Force the", "# Convert the string to the group list grp =", "columns = self.spectrograph.pypeit_file_keys() extras = [] # comb, bkg columns", "any of the frames with # the ignored frame types", "or 'calibbit' not in self.keys(): msgs.error('Cannot write calibration groups without", "optional): Root path for the output pypeit files. If None,", "object exists within 20 arcmins of a listed standard, #", "\"\"\" # Find the number groups by searching for the", "['ra', 'dec'] and not radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data)", "= datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime,", "column has a string type to make sure that it", "type that match any of the # meta data values", "Args: flag_unknown (:obj:`bool`, optional): Instead of crashing out if there", "_get_cfgs(self, copy=False, rm_none=False): \"\"\" Convenience method to return :attr:`configs` with", "from pypeit.core import flux_calib from pypeit.core import parse from pypeit.core", "\"\"\" Ensure that configuration-defining keywords all have values that will", "called by any method outside of this class; use :func:`unique_configurations`", "setup identifiers ('A', 'B', etc.) and the row index where", "either to a file or to the screen. The method", "length.') # Get the columns to return if columns in", "def unique_configurations(self, force=False, copy=False, rm_none=False): \"\"\" Return the unique instrument", "science frame # TODO: Should this be 'standard' or 'science'", "nones = usrdata[key] == 'None' usrdata[key][nones] = None # Rest", "anyway, with the type in `usrdata`. You can avoid this", "still want this to run. # Validate, print out a", "used to construct the setup. det (:obj:`int`, optional): The 1-indexed", "rm_none=True) # Get the setups to write if configs is", "cfg = self.unique_configurations(copy=True, rm_none=True) # Get the setups to write", "Get the data lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO()", "instrument configurations (setups) and the association of each frame from", "float meta (e.g. dispangle) Returns: bool: True if the row", "optional): The 1-indexed detector to include. If None, all detectors", "file metadata used during the reduction. The content of the", "a given column if sort_col is not None: if sort_col", "configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first file to", "np.arange(len(sci_std_idx), dtype=int) + 1 def set_user_added_columns(self): \"\"\" Set columns that", "set_configurations.') # Unique configurations setups, indx = np.unique(self['setup'], return_index=True) if", "if the row matches the input configuration \"\"\" # Loop", ":obj:`list`: List of ``PypeIt`` files generated. \"\"\" # Set output", "an ascii file with open(ofile, 'w') as f: if header", "(:obj:`dict`, optional): A dictionary with the types designated by the", "not* be called by any method outside of this class;", "configs (:obj:`dict`): A dictionary of the unique configurations identified. type_bitmask", "in cfgs.keys(): if key in ignore: del cfgs[key] # Construct", "Remove any configurations set to 'None'. If copy is True,", "table. The internal table is edited *in place*. If the", "types are not set. First run get_frame_types.') if ftype ==", "frames taken in this configuration indx = self['setup'] == setup", "match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else: #", "Output signature or file name. If None, the table contents", "any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set", "that the user might add self.set_user_added_columns() # Validate instrument name", "the data by a given column if sort_col is not", "so I can always insert at the beginning of the", "used. Returns: dict: A dictionary with the metadata values from", "c in self.keys(): self.table[c] = self.table[c].astype(t) def _build(self, files, strict=True,", "match_type: for key in existing_keys: if len(self.table[key].shape) > 1: #", "data, if present if usrdata is not None: self.merge(usrdata) #", "columns, the frame type name and bits. \"\"\" # Making", "j,setup in enumerate(cfg_keys): # Create the output directory root =", "if usrdata is None: usr_row = None else: # TODO:", "parameters and listing the data files to read. This function", "run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row): \"\"\" Find", "See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of", "nrows = len(self) for i in range(nrows): for d, cfg", "should nominally follow an execution of # pypeit_setup. If the", "flag='standard') continue # If an object exists within 20 arcmins", "contain # frames from a single configuration if len(setup) !=", "optional): Overwrite any existing file; otherwise raise an exception. header", "fault if there is a problem with the reading the", "paths=paths) # Return return ofiles def write(self, output=None, rows=None, columns=None,", "of the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None", "self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot write calibration groups", "specified by the provided spectrograph class. For the data table,", "identical to the pypeit file output. .. todo:: - This", "configs is not None and 'all' not in _configs: use", "string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME #", "the provided object - sets all the configurations to the", "calibration group, and the detector. The configuration ID is the", "finding unique combinations of the items in the metadata table", "indx = self['setup'] == setup if not np.any(indx): continue subtbl", "return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row): \"\"\" Find the calibration", "of indices or a boolean array of the correct length.", "return # Groups have been set but the bits have", "row, obstime=None): \"\"\" Construct the root name primarily for PypeIt", "existing number of table rows. merge (:obj:`bool`, optional): Merge the", "ignore: continue # Find the frames in this group #in_group", ":attr:`table` is edited in place. Args: columns (:obj:`list`): List of", "the same as the backwards compatible \"setup\" dictionary. Args: indx", "in all calibration groups (e.g., ['bias', 'dark']). default (:obj:`bool`, optional):", "object itself. rm_none (:obj:`bool`, optional): Remove any configurations set to", "to overwrite.'.format(ofile)) # Construct the setups dictionary cfg = self.unique_configurations(copy=True,", "except ValueError: mjd = np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files =", "group; ensure the integers are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp)", "does not match table length.') msgs.info('Using user-provided frame types.') for", "a table and interface to the relevant fits file metadata", "frame ID, if the latter is provided. \"\"\" return self.frame_paths(self.find_frames(ftype,", "file, which should nominally follow an execution of # pypeit_setup.", "self['filename'][indx]: msgs.info(f) if not flag_unknown: msgs.error(\"Check these files before continuing\")", "return. Can be an array of indices or a boolean", "msgs.error('Cannot provide instrument setup without \\'setup\\' column; ' 'run set_configurations.')", "pypeit.io import dict_to_lines from pypeit.par import PypeItPar from pypeit.par.util import", "to be unique for each standard and science frame. \"\"\"", "is either not set or a string assert metakey is", "= self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and '#' in value: value", "calib_ID is not None: # Select frames in the same", "the subtable of frames taken in this configuration indx =", "with # the ignored frame types should be assigned to", "setup: setup.remove('None') # Make sure that each calibration group should", "integer. If the 'comb_id' or 'bkg_id' columns do not exist,", "an array of 0-indexed indices instead of a boolean array.", "if not isinstance(self.par, PypeItPar): raise TypeError('Input parameter set must be", "# Write it ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def", "types. merge (:obj:`bool`, optional): Merge the frame typing into the", "in self.keys() or 'framebit' in self.keys(): msgs.warn('Removing existing frametype and", "valid column.') # Ignore any NoneTypes indx = output_tbl[sort_col] !=", "is not None, this function simply returns :attr:`config` (cf. ``force``).", "path if output_path is None: output_path = os.getcwd() # Find", "table for idx, ifile in enumerate(_files): # User data (for", "or start by # # flagging all as true #", "Raises: PypeItError: Raised if 'setup' column is not defined, or", "yield good PypeIt reductions. Any frames that do not are", "data?? dtype = meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype # Deal", "ascii file to which to write the table contents. rows", "configuration dictionary for a given frame. This is not the", "it first occurs. This is different from :func:`unique_configurations` because the", "relevant science frame. Args: ftype (str): The frame type identifier.", "mjd[mjd == None] = -99999.0 isort = np.argsort(mjd) subtbl =", "subtbl = self.table[output_cols][indx] # Write the file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup))", "in enumerate(_files): # User data (for frame type) if usrdata", "def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False, header=None): \"\"\" Write", "split is True. Returns: :obj:`list`: List of ``PypeIt`` files generated.", "behavior since if there are # empty or corrupt files", "in user.items(): indx = self['filename'] == ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx],", "one calibration group # Assign everything from the same configuration", "detector number is provided as an argument and converted to", "\"\"\" if 'setup' not in self.keys() or 'calibbit' not in", "pypeit.bitmask import BitMask # TODO: Turn this into a DataContainer", "(numpy.ndarray): Integer bitmask with the frame types. The length must", "the calibration bit names as these are the root of", "Find the frames of this type that match any of", "self._set_calib_group_bits() self._check_calib_groups() return # TODO: The rest of this just", "but the frame types have not been defined yet. \"\"\"", "columns: columns += [key] # Take only those present output_cols", "format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"),", "Raised if the 'setup' isn't been defined. \"\"\" if 'setup'", "use to build the table. strict (:obj:`bool`, optional): Function will", "manual extraction Raises: PypeItError: Raised if the 'setup' isn't been", "designation of the configuration itself. Returns: dict: The pypeit setup", "the setup lines setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) #", "default=False, force=False): \"\"\" Group calibration frames into sets. Requires the", "fill data = {k:[] for k in self.spectrograph.meta.keys()} data['directory'] =", "a file or to the screen. The method allows you", "which column to use for sorting. Args: output (:obj:`str`, optional):", ":func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key with configuration, calibration group(s), and", "in the table included in the selected calibration group. Raises:", "The metadata is validated using checks specified by the provided", "track of the calibration group bits. table (:class:`astropy.table.Table`): The table", "= np.ones(len(self), dtype=bool) for ftype in ignore_frames: use &= np.logical_not(self.find_frames(ftype))", "data, typically pulled from the PypeIt file. This function: -", "the science frame index, if it is provided. Args: ftype", "this method is only called for a preconstructed # pypeit", "k in _cfg_keys} def master_key(self, row, det=1): \"\"\" Construct the", "self.keys(): self['bkg_id'] = -1 if assign_objects and np.all(self['comb_id'] < 0):", "odir = os.path.join(output_path, root) if not os.path.isdir(odir): os.makedirs(odir) # Create", "file instead Args: ofile (:obj:`str`): Name for the output sorted", "# PypeItMetaData? def row_match_config(row, config, spectrograph): \"\"\" Queries whether a", "self.keys(): msgs.error('Cannot provide master key string without setup and calibbit;", "if the 'comb_id' column does not exist, this sets the", "that the # metakey is either not set or a", "internal attributes self.configs = None self.calib_bitmask = None # Initialize", "Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) #", "the type in `usrdata`. You can avoid this step by", "calib file. The calib file provides the unique instrument configurations", "else setups def _get_cfgs(self, copy=False, rm_none=False): \"\"\" Convenience method to", "frame types have been set, ignore anything listed in #", "this single identifier. Ignores other inputs. Raises: PypeItError: Raised if", "return # Some frame types may have been ignored ignore_frames", "provided as a string with comma-separated types. merge (:obj:`bool`, optional):", "(:obj:`list`, optional): Ignore calibration groups in the provided list. Raises:", "metadata') elif key in meta_data_model.keys(): # Is this meta data??", "pypeit.core import parse from pypeit.core import meta from pypeit.io import", "','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames: indx = np.where(self.find_frames(ftype))[0] for i", "a twilight flat frame that was' + msgs.newline() + 'missed", "for the output sorted file. overwrite (:obj:`bool`, optional): Overwrite any", "Add the directory and file name to the table data['directory'][idx],", "(see :func:`get_frame_types`), this method will fault! Args: force (:obj:`bool`, optional):", "not np.any(indx): continue subtbl = self.table[output_cols][indx] # Write the file", "first occurrence of these setups, if requested. Raises: PypeItError: Raised", "group. Raises: PypeItError: Raised if the 'calibbit' column is not", "the 'calib' column already exists. Raises: PypeItError: Raised if 'setup'", "be moved into each Spectrograph # if useIDname and 'idname'", "self.table[key] = usrdata[key][srt] def finalize_usr_build(self, frametype, setup): \"\"\" Finalize the", "# Ignore any NoneTypes indx = output_tbl[sort_col] != None is_None", "the list for col in ['framebit', 'frametype', 'filename', 'directory']: if", "mjd = subtbl['mjd'].copy() # Deal with possibly None mjds if", "np.logical_not(self.find_frames(ftype)) indx = indx[use] if len(indx) == 0: msgs.error('No frames", "frame type) if usrdata is None: usr_row = None else:", "not in self.keys(): msgs.error('Calibration groups are not set. First run", "else int(n) for n in self['calib'][i].replace(':',',').split(',')]) # Check against current", "# The configuration must be present to determine the calibration", "'setup' in self.keys(): msgs.info('Setup column already set. Finding unique configurations.')", "type to continue if 'frametype' not in self.keys(): msgs.error('To account", "yet been defined (see :func:`get_frame_types`), this method will fault! Args:", "write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): \"\"\" Write a pypeit", "if it is provided. Args: ftype (str): The frame type", "= output_tbl[sort_col] != None is_None = np.logical_not(indx) srt = np.append(np.where(is_None)[0],", "# calibration group; this needs to have dtype=object, otherwise #", "{'binning': binning, 'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if", "the user might add \"\"\" if 'manual' not in self.keys():", "configs == 'all' or configs == ['all']: cfg_keys = list(cfg.keys())", "code so that we # don't have to do these", "return None def find_calib_group(self, grp): \"\"\" Find all the frames", "to return :attr:`configs` with possible alterations. This method *should not*", "(:obj:`bool`, optional): Just return the dictionary with the configuration, don't", "from the metadata table (pypeit file):\\n' indx = np.where(np.logical_not(good))[0] for", "= np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True): \"\"\" Set", "cfg_lines (:obj:`list`, optional): The list of configuration lines to include", "an # invalid key, at least for now the DEIMOS", "Can't `configs is # None` mean that you want all", "configurations in this group, ignoring any # undefined ('None') configurations", "rows with the associated frame type. If the index is", "= 0 for i in range(len(self)): if self['calib'][i] in ['all',", "self['comb_id'] = -1 if 'bkg_id' not in self.keys(): self['bkg_id'] =", "a list of numbers l = np.amax([ 0 if len(n)", "# Deal with floating configs (e.g. grating angle) if isinstance(config[k],", "(:obj:`str`, optional): Root path for the output pypeit files. If", "metadata are valid for this column. indx = np.isin(self[key], cfg_limits[key])", ":class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of the calibration group that", "good = np.ones(len(self), dtype=bool) for key in cfg_limits.keys(): # NOTE:", "parameters are the first few columns; do them in #", "= subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close() # TODO: Do we", "number is provided as an argument and converted to a", "it: for cfg_key in _configs.keys(): in_cfg = self.table['setup'] == cfg_key", "be # removed msg = 'The following frames have configurations", "if key not in columns: columns += [key] # Take", "basic parameters are the first few columns; do them in", "[configs] cfg_keys = [key for key in cfg.keys() if key", "input configuration \"\"\" # Loop on keys in config match", "We should edit the relevant follow-on code so that we", "'instrume' ] def keys(self): return self.table.keys() def sort(self, col): return", "vector selecting output rows has incorrect length.') # Get the", "themselves. This is mostly a convenience function for the writing", "by PypeIt. .. include common links, assuming primary doc root", "Get the paths in_cfg = self['setup'] == setup if not", "+ msgs.newline() + f) msgs.warn('The above file could be a", "decker = 'none' if 'decker' not in self.keys() else self['decker'][row]", "configuration, the spectrograph defined frames that have been ignored in", "selecting the rows of the table to write. If None,", "provided, the vanilla configuration is included. write_bkg_pairs (:obj:`bool`, optional): When", "mjds if there were corrupt header cards mjd[mjd == None]", "np.unique(self['setup'], return_index=True) ignore = uniq == 'None' if np.sum(ignore) >", "astropy.io.table.Table instance.') if 'filename' not in usrdata.keys(): raise KeyError('The user-provided", "argument and converted to a zero-filled string with two digits", "in the fitstbl (nearly the complete list). Args: write_bkg_pairs (:obj:`bool`,", "self.merge(usrdata) # Impose types on specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'],", "is the combination of the configuration, the calibration group, and", "the keyword allows MasterFrames to be used with multiple calibration", "self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0},", "also provided. This functionality is only used when building the", "see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key with configuration, calibration group(s),", "other files show a different # configuration. for i in", "Requires the 'setup' column to have been defined. For now", "function simply returns :attr:`config` (cf. ``force``). .. warning:: Any frame", "Validate, print out a warning if there is problem try:", "'.format(cfg_key) + '{0} values.' .format(meta)) # Find the frames of", "the number of files in :attr:`table`. For frames that have", "group # Assign everything from the same configuration to the", "'table': # Instead of writing, just return the modified table", "self.table['setup'] == 'None' if not np.any(not_setup): # All are set,", "configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set the", "group. .. todo:: - This is for backwards compatibility, but", "def __len__(self): return self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', '", "type in this group cfg[setup[0]][cbit] = {} for key in", "0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self,", "ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None, cfg_lines=None,", "output_tbl[rows] # Select and sort the data by a given", "This should be converted to an assert statement... raise ValueError('CODING", "key in ['ra', 'dec'] and not radec_done: ras, decs =", "idname above gets overwritten by # this if the frames", "'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning,", "don't include the top-level designation of the configuration itself. Returns:", "the same name. ignore (:obj:`list`, optional): Ignore configurations in the", "or ``'all'``, all columns in are written; if ``'pypeit'``, the", "setup columns with this single identifier. Ignores other inputs. Raises:", "column to have been defined. For now this is a", "in self.keys() and 'calibbit' in self.keys() and not force: return", "(:obj:`bool`, optional): Merge the types and bits into the existing", "\"\"\" # Allow for single files _files = files if", "if 'setup' not in self.keys(): msgs.error('Cannot get setup names; run", "code behavior. If not provided, the default parameters specific to", "this just nominally sets the calibration # group based on", "to be from a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) #", "it # here, because this method is only called for", "is modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why", "have been defined; ' 'run get_frame_types.') calibs = '0' if", "= decs.astype(dtype) radec_done = True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) #", "the calibration # group based on the configuration. This will", "not in self.keys() else self['slitwid'][row] slitlen = 'none' if 'slitlen'", "all the unique instrument configurations (setups) and the frames associated", "= list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames)) use", "self.configs is not None and not force: return self._get_cfgs(copy=copy, rm_none=rm_none)", "rows in the table. columns (:obj:`str`, :obj:`list`, optional): A list", "The frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If", "configurations (setups) and the frames associated with each configuration. The", "= self.table['setup'] == cfg_key for ftype, metakey in ignore_frames.items(): #", "same configuration to the same # calibration group; this needs", "used to construct the key. det (:obj:`int`, :obj:`tuple`, optional): The", "else self['slitwid'][row] slitlen = 'none' if 'slitlen' not in self.keys()", "(:obj:`int`): The 0-indexed row of the frame. Returns: astropy.time.Time: The", "there are # empty or corrupt files we still want", "# # TODO: This needs to be moved into each", "metadata table, are directory, filename, frametype, framebit, setup, calib, and", "table listed by the spectrograph ``configuration_keys`` method. If run after", "or just removed. assert isinstance(cfg_limits[key], list), \\ 'CODING ERROR: valid_configuration_values", "files, leave without a type and continue. user (:obj:`dict`, optional):", "Return return np.where(indx)[0] if index else indx def find_frame_files(self, ftype,", "['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']: if col not in", "grating angle) if isinstance(config[k], float): if row[k] is None: match.append(False)", "the calibration group that it must match. If None, any", "an exception if framebit is not # set... sci_std_idx =", "change! # The configuration must be present to determine the", "The 0-indexed row of the frame. Returns: astropy.time.Time: The MJD", "self.type_bitmask.turn_off(b, flag='standard') continue # If an object exists within 20", "possible alterations. This method *should not* be called by any", "self._set_calib_group_bits() # Check that the groups are valid self._check_calib_groups() def", "if ignore_frames is None: # Nope, we're still done return", "any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't identify the", ".. todo:: - Why isn't frametype just in the user-provided", "also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we can force", "optional): Function will fault if :func:`fits.getheader` fails to read any", "be reconstructed if the 'calib' column already exists. Raises: PypeItError:", "None: self.merge(usrdata) # Impose types on specific columns self._impose_types(['comb_id', 'bkg_id',", "output_path = os.getcwd() # Find unique configurations, always ignoring any", "be empty msgs.warn('Both data and files are None in the", "is None: match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False)", "import utils from pypeit.core import framematch from pypeit.core import flux_calib", "meta from pypeit.io import dict_to_lines from pypeit.par import PypeItPar from", "self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the nearest standard star to each", "if :func:`fits.getheader` fails to read any of the headers. Set", "only be assigned to a single calibration group.') @property def", "= j == len(self.configs) if unique: if cfg_indx == len(cfg_iter):", "file is the main configuration file for PypeIt, configuring the", "key not in columns: columns += [key] # Take only", "tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row, det=None, config_only=False): \"\"\" Construct the setup", "self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def _repr_html_(self): return", "key in existing_keys: if len(self.table[key].shape) > 1: # NOT ALLOWED!!", "correctly defined ' \\ 'for {0}; values must be a", "{0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool) for ftype in ignore_frames: use", "following frames have configurations that cannot be reduced by PypeIt'", "be an array of indices or a boolean array of", "in ['ra', 'dec'] and not radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data,", "is not None: if 'frametype' not in self.keys(): msgs.error('To set", "# correctly assigned in the spectrograph class definition. # This", "get_frame_types.') calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str))", "# Finish up (note that this is called above if", "configuration must be present to determine the calibration # group", "= self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys()) tbl_cols = columns if", "+ '\\n') ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy() # Deal with possibly", "but could probably use the pypeit file instead Args: ofile", "Maintain a detailed description of the logic. The 'calib' column", "already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Grab output columns", "Find the unique values of meta for this configuration uniq_meta", "Flag frames with the correct ID name or start by", "one dictionary per configuration with the associated metadata for each.", "table. columns (:obj:`str`, :obj:`list`, optional): A list of columns to", "the frame types have not been defined yet. \"\"\" if", "groups to be reconstructed if the 'calib' column already exists.", "> 0: msgs.error('Configuration {0} defined using unavailable keywords!'.format(k)) self.table['setup'] =", "(:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction configs", "= self.spectrograph.config_independent_frames() if ignore_frames is None: # Nope, we're still", "data (table-like, optional): The data to include in the table.", "setup names. A second returned object provides the indices of", "number of files in :attr:`table`. For frames that have multiple", "those present output_cols = np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self,", "datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row, det=None, config_only=False): \"\"\" Construct", "but the frame types have not been set yet. \"\"\"", "existing table. This will *overwrite* any existing columns. Returns: `astropy.table.Table`:", "None, all rows are written. Shape must match the number", "Just to be explicit that the method returns None when", "in the determination of the unique configurations, but the frame", "is not None: self['setup'] = fill return _configs = self.unique_configurations()", "are directory, filename, frametype, framebit, setup, calib, and calibbit. sort_col", "set. Finding unique configurations.') uniq, indx = np.unique(self['setup'], return_index=True) ignore", "'The following frames have configurations that cannot be reduced by", "the spectrograph defined frames that have been ignored in the", "an execution of # pypeit_setup. If the user edits back", "whether a row from the fitstbl matches the input configuration", "the data lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as", "msgs.warn('Found frames with invalid {0}.'.format(key)) good &= indx if np.all(good):", "file output. \"\"\" _obstime = self.construct_obstime(row) if obstime is None", "a valid letter identifier; i.e., the ' 'configuration cannot be", "unique_configurations(self, force=False, copy=False, rm_none=False): \"\"\" Return the unique instrument configurations.", "config_only (:obj:`bool`, optional): Just return the dictionary with the configuration,", "['all', 'None']: # No information, keep going continue # Convert", "as the calibration bit number, and the detector number is", "are **not** reset unless you call the function with ``force=True``.", "edited in place. Args: columns (:obj:`list`): List of column names", "been defined; ' 'run get_frame_types.') calibs = '0' if n_cfg", "output rows has incorrect length.') # Get the columns to", "`usrdata` is not an `astropy.io.table.Table` KeyError: Raised if `filename` is", "for i in range(len(self)): if self['calib'][i] in ['all', 'None']: #", "dec == 'None': msgs.warn('RA and DEC must not be None", "None and the frame types have not yet been defined", "not return None and the frame types have not yet", "subtbl.sort(['frametype','filename']) with io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1]", "the observation. \"\"\" return time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row, obstime=None):", "which to write the table contents. rows (`numpy.ndarray`_, optional): A", "returns None. Raises: ValueError: Raised if the columns to include", "range(len(self)): if self['calib'][i] in ['all', 'None']: # No information, keep", "force: return if 'setup' not in self.keys() and fill is", "# Get the data lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with", "in cfg.keys(): _cfg[setup] = {} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg =", "PypeItError: Raised if there are list of frame types to", "return {k:self.table[k][indx] for k in _cfg_keys} def master_key(self, row, det=1):", "were corrupt header cards mjd[mjd == None] = -99999.0 isort", "frames that do not are removed from :attr:`table`, meaning this", "\"\"\" Queries whether a row from the fitstbl matches the", "in the table and specify any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`):", "an object exists within 20 arcmins of a listed standard,", "associated with the provided calibration group. Args: grp (:obj:`int`): The", "out if there are unidentified files, leave without a type", "'em self.table = self.table[good] def _set_calib_group_bits(self): \"\"\" Set the calibration", "by searching for the maximum number # provided, regardless of", "or if `global_frames` is provided but the frame types have", "self['setup'] == setup if not np.any(indx): continue subtbl = self.table[output_cols][indx]", "in all_cols for col in tbl_cols] if np.any(badcol): raise ValueError('The", "user (:obj:`dict`, optional): A dictionary with the types designated by", "check=True) b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard') #", "digits (the maximum number of detectors is 99). Using the", "object that would have been written/printed if ``output == 'table'``.", "Deal with possibly None mjds if there were corrupt header", "if columns in [None, 'all']: tbl_cols = list(self.keys()) elif columns", "list of strings (e.g., ['A','C']). Returns: numpy.array: The list of", "through the calibration bit names as these are the root", "msg += 'Continuing, but the following frames may be empty", "None and len(rows) != len(self.table): raise ValueError('Boolean vector selecting output", "table included in the selected calibration group. Raises: PypeItError: Raised", "configuration self.configs = {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx +=", "if ``'pypeit'``, the columns are the same as those included", "configs[i]) & (self['framebit'] > 0)] = str(i) # Allow some", "meta data may not be required Returns: dict: Dictionary with", "file:' + msgs.newline() + f) msgs.warn('The above file could be", "not match user-provided metadata table. See ' 'usrdata argument of", "table is edited *in place*. If the 'setup' column already", "self.find_frames(ftype) if not np.any(indx): continue if metakey is None: #", "the columns to print and which column to use for", "the user-provided data? It may be (see get_frame_types) and I'm", "converted to a zero-filled string with two digits (the maximum", "self.par = par if not isinstance(self.par, PypeItPar): raise TypeError('Input parameter", "backwards compatibility, but we should consider reformatting/removing it. - This", "is not correctly defined ' \\ 'for {0}; values must", "also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why isn't frametype just in", "np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files = filenames[mjd == None] #", "printed to the screen. If ``'table'``, the table that would", "else: ftype_colm = ftype_colmA fbits_colm = table.Column(type_bits, name='framebit') t =", "done return # Some frame types may have been ignored", "self.unique_configurations(copy=ignore is not None) if ignore is not None: for", "i in indx[1:]: j = 0 for c in self.configs.values():", "to be unique for each science or standard frame, see", "valid letter identifier; i.e., the ' 'configuration cannot be None.')", "construct_obstime(self, row): \"\"\" Construct the MJD of when the frame", "cfg.keys() if key in _configs] if len(cfg_keys) == 0: msgs.error('No", "been set, ignore anything listed in # the ignore_frames indx", "included. Returns: list: List of file paths that match the", "Raised if the 'setup' isn't defined and split is True.", "collect the data save to each file. The class is", "the dithers and combination and background IDs are the last", "(`numpy.ndarray`_, optional): A boolean vector selecting the rows of the", "provided spectrograph are used. configs (:obj:`dict`): A dictionary of the", "for h in _header: f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') #", "the row index where it first occurs. This is different", "in its current state. overwrite (:obj:`bool`, optional): Overwrite any existing", "set the frame type of each fits file. calib_bitmask (:class:`BitMask`):", "KLUDGE ME # # TODO: It would be good to", "self['calib'][i] in ['all', 'None']: # No information, keep going continue", "np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols] if", "string representation of the groups self._set_calib_group_bits() # Check that the", "If the 'setup' column does not exist, fill the configuration", "configurations #setup = np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if 'None' in", "columns that the user *might* add .. note:: :attr:`table` is", "calibration group indx &= self.find_calib_group(calib_ID) # Return return np.where(indx)[0] if", "in the output file. Can be provided as a list", "_build(self, files, strict=True, usrdata=None): \"\"\" Generate the fitstbl that will", "strings with the frame types to use in all calibration", "if ofile is not None and os.path.isfile(ofile) and not overwrite:", "range(n_cfg): self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i)", "self.keys(): msgs.error('To set global frames, types must have been defined;", "of this just nominally sets the calibration # group based", "= self['calibbit'] == cbit # Find the unique configurations in", "array. Returns: numpy.ndarray: A boolean array, or an integer array", "Set the bits based on the string representation of the", "be read from the pypeit file. The 'calibbit' column is", "do them in # reverse order so I can always", "np.any(indx): continue if metakey is None: # No matching meta", "if 'setup' not in self.keys() or 'calibbit' not in self.keys():", "self.keys() or 'dec' not in self.keys(): msgs.warn('Cannot associate standard with", ":class:`pypeit.metadata.PypeItMetaData` object, include two columns called `comb_id` and `bkg_id` that", "if ofile is None: # Output file not defined so", "the list of frames of this type without a #", "or more frame types to append/overwrite. append (:obj:`bool`, optional): Append", "`data` is also provided. This functionality is only used when", "if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames", "table row to use to construct the configuration. cfg_keys (:obj:`list`,", "set or a string assert metakey is None or isinstance(metakey,", "ncol = len(tbl_cols) for col in ['dithpat', 'dithpos', 'dithoff', 'calib',", "Args: output (:obj:`str`, optional): Output signature or file name. If", "= calibs # Set the bits based on the string", "the `configuration_keys` of :attr:`spectrograph` is used. Returns: dict: A dictionary", "the table. \"\"\" if 'framebit' not in self.keys(): msgs.error('Frame types", ":obj:`list`, optional): A list of columns to include in the", "d # Check if any of the configurations are not", "def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): \"\"\" Write a", "a deep copy of :attr:`configs` instead of the object itself.", "\"\"\" Return the configuration dictionary for a given frame. This", "columns with this single identifier. Ignores other inputs. Raises: PypeItError:", "allowed frame types. \"\"\" # Checks if 'frametype' in self.keys()", "internal table already contains the column in `usrdata`, the function", "the table based on user-provided data, typically pulled from the", "dithers and combination and background IDs are the last #", "been defined yet. \"\"\" # Set the default if requested", "indx = indx[use] return setups, indx if return_index else setups", "zero-filled string with two digits (the maximum number of detectors", "of the calibration group that it must match. If None,", "self.keys(): msgs.warn('Cannot associate standard with science frames without sky coordinates.')", "not None: _header = header if isinstance(header, list) else [header]", "== 'None' if not np.any(not_setup): # All are set, so", "if output == 'table': # Instead of writing, just return", "for {0}; values must be None or a string.'.format( self.spectrograph.__class__.__name__)", "the unique configurations, but the frame types have not been", "and 'idname' not in self.keys(): # raise ValueError('idname is not", "the matching meta values are not # unique for this", "usrdata=usrdata)) # Merge with user data, if present if usrdata", "the same as the calibration bit number, and the detector", "key with configuration, calibration group(s), and detector. Raises: PypeItError: Raised", "self.table.copy() # Select the output rows if a vector was", "grab the data from the fits headers or the data", "output. \"\"\" _obstime = self.construct_obstime(row) if obstime is None else", "= np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1", "should consider reformatting it. And it may be something to", "backwards compatibility, but we should consider reformatting it. And it", "a unique integer. If the 'comb_id' or 'bkg_id' columns do", "dictionary if user is not None: if len(user.keys()) != len(self):", "if configs is not None and 'all' not in _configs:", "to a # file... return None def find_calib_group(self, grp): \"\"\"", "configuration itself. Returns: dict: The pypeit setup dictionary with the", "table generated within PypeIt. match_type (:obj:`bool`, optional): Attempt to match", "configuration with the associated metadata for each. Raises: PypeItError: Raised", "configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None): \"\"\"", "0 # Select frames indx = self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID", "added to the beginning of each string. Ignored if ``output``", "are the same as those included in the pypeit file.", "The list of metadata keys to use to construct the", "(astropy.table.Table, optional): Parsed for frametype for a few instruments (e.g.", "combination of the configuration, the calibration group, and the detector.", "try to match the data type of the `usrdata` column", "must be a valid pypeit metadata keyword, specific to :attr:`spectrograph`.", "None and files is None: # Warn that table will", "else: # TODO: This check should be done elsewhere #", "the detector number is provided as an argument and converted", "include it in the metadata table. The internal table is", "column to the existing data type. If it can't it", "consider reformatting/removing it. - This is complicated by allowing some", "would have been printed/written to disk is returned. Otherwise, the", "dtype=bool) # Include a combination of instrument-specific checks using #", "not in self.keys() else self['binning'][row] skey = 'Setup {}'.format(self['setup'][row]) #", "fits files. strict (:obj:`bool`, optional): Function will fault if there", "None, the table is printed in its current state. overwrite", "default (:obj:`bool`, optional): If the 'calib' column is not present,", "indx[rm] # Restrict _configs = None if configs is None", "cfg_keys = [key for key in cfg.keys() if key in", "bitmask and initialize the bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] =", "column anyway, with the type in `usrdata`. You can avoid", "[header] for h in _header: f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n')", "['framebit', 'frametype', 'filename', 'directory']: if col not in tbl_cols: continue", "This function can be used to initialize the combination group", "to be defined first) ofile = None if output in", "\\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use & or | ?", "by the instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A user", "method *should not* be called by any method outside of", "data to include in the table. The type can be", "in # their MJD. This is the desired behavior since", "to be included in the fitstbl (nearly the complete list).", "table and specify any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters", "frames that have been ignored in the determination of the", "uniq_meta) self.table['setup'][indx] = cfg_key def clean_configurations(self): \"\"\" Ensure that configuration-defining", "+= ['manual'] for key in extras: if key not in", "types \"\"\" for c,t in zip(columns, types): if c in", "with each configuration. The metadata keywords in the dictionary should", "for PypeIt file output. Args: row (:obj:`int`): The 0-indexed row", "types have been set, ignore anything listed in # the", "os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists. Use ovewrite=True to", "heart of PypeItMetaData. Args: files (:obj:`str`, :obj:`list`): One or more", "list of columns to be included in the fitstbl (nearly", "(:obj:`int`, array-like): One or more 0-indexed rows in the table", "backwards compatible \"setup\" dictionary. Args: indx (:obj:`int`): The index of", "set the first unique configuration self.configs = {} self.configs[cfg_iter[cfg_indx]] =", "directory. If the output directory does not exist, it is", "the heart of PypeItMetaData. Args: files (:obj:`str`, :obj:`list`): One or", "in self.keys(): self['manual'] = '' def write_sorted(self, ofile, overwrite=True, ignore=None,", "'table'] else output if ofile is not None and os.path.isfile(ofile)", "contents. rows (`numpy.ndarray`_, optional): A boolean vector selecting the rows", "unique '.format(cfg_key) + '{0} values.' .format(meta)) # Find the frames", "# Check if any of the configurations are not set", "the bits based on the string representation of the groups", "= list(self.keys()) tbl_cols = columns if isinstance(columns, list) else columns.split(',')", "of the unique configuration names. This provides just the list", "list does not match user-provided metadata table. See ' 'usrdata", "instance.') if 'filename' not in usrdata.keys(): raise KeyError('The user-provided table", "column (A, B, C, etc), the calibration group is the", "in self.keys(): # raise ValueError('idname is not set in table;", "# Instead of writing, just return the modified table return", "self.keys(): msgs.error('Cannot get setup names; run set_configurations.') # Unique configurations", "!= usrdata['filename'][idx]: msgs.error('File name list does not match user-provided metadata", "the calibbit column does not exist if 'calibbit' in self.keys():", "not exist - if the 'comb_id' column does not exist,", "= self.unique_configurations(copy=True, rm_none=True) # Get the setups to write if", "# Print status message msg = 'Time invalid for {0}", "function with ``force=True``. Args: configs (:obj:`dict`, optional): A nested dictionary,", "== None] = -99999.0 isort = np.argsort(mjd) subtbl = subtbl[isort]", "group cfg[setup[0]][cbit] = {} for key in self.type_bitmask.keys(): #ftype_in_group =", "'{0}.pypeit'.format(root)) # Get the setup lines setup_lines = dict_to_lines({'Setup {0}'.format(setup):", "the pypeit file instead Args: ofile (:obj:`str`): Name for the", "for sorting is not valid. FileExistsError: Raised if overwrite is", "strings (e.g., ['A','C']). Returns: numpy.array: The list of unique setup", "the frame types based on the provided object - sets", "= {} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files assumed to be", "based on the configuration. This will change! # The configuration", "if usrdata is not None: self.merge(usrdata) # Impose types on", "rm_none=rm_none) if 'setup' in self.keys(): msgs.info('Setup column already set. Finding", "numbers l = np.amax([ 0 if len(n) == 0 else", "Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the", "*for all rows*. force (:obj:`bool`, optional): Force the calibration groups", "decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype)", "from the file headers. The table must have a `filename`", "allows for arrays in the Table (e.g. binning) match.append(np.all(config[k] ==", "optional): Ignore calibration groups in the provided list. Raises: PypeItError:", "(:obj:`bool`, optional): Add additional ``PypeIt`` columns for calib, comb_id and", "configuration setup columns with this single identifier. Ignores other inputs.", "control-flow and algorithmic parameters and listing the data files to", "or configs == 'all' or configs == ['all']: cfg_keys =", "it is unaltered output_tbl = self.table.copy() # Select the output", "def find_calib_group(self, grp): \"\"\" Find all the frames associated with", "for i in indx[1:]: j = 0 for c in", "configuration dictionary using the unique configurations in that column. This", "None's properly nones = usrdata[key] == 'None' usrdata[key][nones] = None", ":attr:`configs` is copied to a new dictionary. Returns: :obj:`dict`: A", "if ra == 'None' or dec == 'None': msgs.warn('RA and", "Raises: TypeError: Raised if `usrdata` is not an `astropy.io.table.Table` KeyError:", "as a viable mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`:", "_cfg = deepcopy(self.configs) if copy else self.configs if rm_none and", "directly or as a comma-separated string. If None or ``'all'``,", "depending on the processing level of the metadata table, are", "raise TypeError('Input parameter set must be of type PypeItPar.') self.type_bitmask", "if uniq_meta.size != 1: msgs.warn('When setting the instrument configuration for", "subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close() # TODO: Do", "0 else int(n) for n in self['calib'][i].replace(':',',').split(',')]) # Check against", "else 'standard') # Find the files without any types indx", "Raises: PypeItError: Raised if there are list of frame types", "specific to :attr:`spectrograph`. Additional valid keywords, depending on the processing", "frame pairs. write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for", "dictionary per configuration with the associated metadata for each. Raises:", "have no association with an instrument configuration - This is", "return [ 'directory', 'filename', 'instrume' ] def keys(self): return self.table.keys()", "to a single calibration group.') @property def n_calib_groups(self): \"\"\"Return the", "existing_keys = list(set(self.table.keys()) & set(usrdata.keys())) radec_done = False if len(existing_keys)", "True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include the user data", "of the headers. Set to False to report a warning", "+ 1 def set_user_added_columns(self): \"\"\" Set columns that the user", "Check if any of the other files show a different", "Returns: :obj:`astropy.table.Table`: A Table with two columns, the type names", "config.keys(): # Deal with floating configs (e.g. grating angle) if", "type_bits, merge=True): \"\"\" Set and return a Table with the", "del self.table['framebit'] # # TODO: This needs to be moved", "ignored in the construction of the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`", "rows has incorrect length.') # Get the columns to return", "by finding unique combinations of the items in the metadata", "with the configuration, don't include the top-level designation of the", "columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is not", "configurations are only determined if :attr:`configs` has not yet been", "-1. Args: assign_objects (:obj:`bool`, optional): If all of 'comb_id' values", "`` is added to the beginning of each string. Ignored", "# Check that the groups are valid self._check_calib_groups() def find_frames(self,", "# manual if write_manual: extras += ['manual'] for key in", "values indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key def clean_configurations(self):", "as included in the configuration column (A, B, C, etc),", "throw an exception if framebit is not # set... sci_std_idx", "The root name for file output. \"\"\" _obstime = self.construct_obstime(row)", "does not return None and the frame types have not", "with possibly None mjds if there were corrupt header cards", "output rows if a vector was provided if rows is", "values are less than 0 (meaning they're unassigned), the combination", "datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'),", "= np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else:", "frame types should be assigned to it: for cfg_key in", "with user data, if present if usrdata is not None:", "are not valid, or if the column to use for", "to get around this. Is it related to # this", "and listing the data files to read. This function writes", "the frame types have not been defined yet. \"\"\" #", "this group, ignoring any # undefined ('None') configurations #setup =", "'#' in value: value = value.replace('#', '') msgs.warn('Removing troublesome #", "file with open(ofile, 'w') as f: if header is not", "should be the same as returned by the spectrograph `configuration_keys`", "msgs.error('Frame types are not set. First run get_frame_types.') if ftype", "columns == 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys())", "= self.spectrograph.configuration_keys() # Configuration identifiers are iterations through the #", "from the PypeIt file. This function: - sets the frame", "ftype in global_frames: indx = np.where(self.find_frames(ftype))[0] for i in indx:", "may have been ignored ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is", "ignore frames, types must have been defined; run get_frame_types.') ignore_frames", "in ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx = indx[use] if len(indx)", "warning and continue. usrdata (astropy.table.Table, optional): Parsed for frametype for", "not in self.keys(): self['comb_id'] = -1 if 'bkg_id' not in", "for the maximum number # provided, regardless of whether or", "indices with the first occurence of these configurations. configs (:obj:`str`,", "to a list of numbers l = np.amax([ 0 if", "Find the nearest standard star to each science frame #", "spectrograph # needs to be defined first) ofile = None", "'bkg_id' columns do not exist, they're set to -1. Args:", "default 'all'? if configs is not None and 'all' not", "max(l+1, ngroups) # Define the bitmask and initialize the bits", "dictionary per configuration with the associated values of the metadata", "self['calibbit'] == cbit # Find the unique configurations in this", "values from the selected row. \"\"\" _cfg_keys = self.spectrograph.configuration_keys() if", "this is called above if user is not None!) msgs.info(\"Typing", "# Set the bits based on the string representation of", "self.spectrograph.valid_configuration_values() if cfg_limits is None: # No values specified, so", "done return good = np.ones(len(self), dtype=bool) for key in cfg_limits.keys():", "to fill data = {k:[] for k in self.spectrograph.meta.keys()} data['directory']", "self.spectrograph): self.table['setup'][i] = d # Check if any of the", "values of meta for this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) #", "the data files to read. This function writes the columns", "columns (:obj:`str`, :obj:`list`, optional): A list of columns to include", "two columns called `comb_id` and `bkg_id` that identify object and", "table generated within PypeIt. **Note**: This is ignored if `data`", "the frames of the requested type. Raises: PypeItError: Raised if", "that match the frame type and science frame ID, if", "flag='standard') for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx],", "Get the setup lines setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1)", "row_match_config(row, config, spectrograph): \"\"\" Queries whether a row from the", "If this attribute is not None, this function simply returns", "frames with configuration set to None.'.format( np.sum(ignore))) self.configs = {}", "frames of the requested type. Raises: PypeItError: Raised if the", "the root of the # MasterFrames and QA for icbit", "if present if usrdata is not None: self.merge(usrdata) # Impose", "setup[skey] if config_only else setup def get_configuration_names(self, ignore=None, return_index=False, configs=None):", "keyword allows MasterFrames to be used with multiple calibration groups.", "and continue. usrdata (astropy.table.Table, optional): Parsed for frametype for a", "first occurence of these configurations. configs (:obj:`str`, :obj:`list`, optional): One", "is not checked. If None, this is set by :func:`unique_configurations`.", "the configuration should be the same as returned by the", "an empty set of configuration keys # meaning that the", "see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to instead report a warning", "to print and which column to use for sorting. Args:", "configuration self.table['setup'][indx] = cfg_key continue # Find the unique values", "def construct_obstime(self, row): \"\"\" Construct the MJD of when the", "using # combinations of the full set of metadata exprng", "dictionary should be the same as in the table, and", "frame. \"\"\" if 'comb_id' not in self.keys(): self['comb_id'] = -1", "for each standard and science frame. \"\"\" if 'comb_id' not", "calibration group bit based on the string values of the", "todo:: - Why isn't frametype just in the user-provided data?", "table so that it is unaltered output_tbl = self.table.copy() #", "not correctly defined ' \\ 'for {0}; values must be", "class; use :func:`unique_configurations` instead. Args: copy (:obj:`bool`, optional): Return a", "from pypeit import utils from pypeit.core import framematch from pypeit.core", "Spectrograph # if useIDname and 'idname' not in self.keys(): #", "== 'table': # Instead of writing, just return the modified", "corrupt header cards mjd[mjd == None] = -99999.0 isort =", "dictionary cfg = self.unique_configurations(copy=True, rm_none=True) # TODO: We should edit", "frame types have not yet been defined (see :func:`get_frame_types`), this", "the modified table return output_tbl # Always write the table", "set. First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row):", "time, units from pypeit import msgs from pypeit import utils", "each file. The metadata is validated using checks specified by", "`usrdata` column to the existing data type. If it can't", "with possible alterations. This method *should not* be called by", "convenience function for the writing routines. Args: ignore (:obj:`list`, optional):", "self.spectrograph = spectrograph self.par = par if not isinstance(self.par, PypeItPar):", "unique configuration self.configs = {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx", "specified frame type is included. Returns: list: List of file", "the type in the internal table. See above. Raises: TypeError:", "want this to run. # Validate, print out a warning", "optional): Return an array of 0-indexed indices instead of a", "frame types and bits. Args: type_bits (numpy.ndarray): Integer bitmask with", "self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there a reason why this is", "is None or isinstance(metakey, str), \\ 'CODING ERROR: metadata keywords", "If a tuple, it must include detectors designated as a", "be None.') # Find the frames of each type in", "for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it ff =", "dispangle) Returns: bool: True if the row matches the input", "PypeItError: Raised if the `framebit` column is not set in", "that the calibration groups are valid. This currently only checks", "and return a Table with the frame types and bits.", "is not valid. FileExistsError: Raised if overwrite is False and", "# don't have to do these gymnastics. Or better yet,", "frames with invalid {0}.'.format(key)) good &= indx if np.all(good): #", "in cfg_limits.keys(): # NOTE: For now, check that the configuration", "in the metadata table. The internal table is edited *in", "the dictionary, respectively. The number of keys therefore *must* match", "not exist, it is created. cfg_lines (:obj:`list`, optional): The list", "list of unique setup names. A second returned object provides", "= setups[use] indx = indx[use] return setups, indx if return_index", "be a list.'.format(self.spectrograph.__class__.__name__) # Check that the metadata are valid", "assigned in the spectrograph class definition. # This should probably", "Used to grab the rtol value for float meta (e.g.", "file name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) # Get the setup", "output == 'table': # Instead of writing, just return the", "of the unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used", "have \\'filename\\' column!') # Make sure the data are correctly", "because the # data was read from a pypeit file)", "and I'm just not using it... Args: frametype (:obj:`dict`): A", "hasattr(files, '__len__') else [files] # Build lists to fill data", "'filename' not in usrdata.keys(): raise KeyError('The user-provided table must have", "file output. Args: row (:obj:`int`): The 0-indexed row of the", "if det is None else [det] #for d in _det:", "not in self.keys(): raise ValueError(f'Cannot sort by {sort_col}. Not a", "the *.calib file. _cfg = {} for setup in cfg.keys():", "first; try running set_configurations.') configs = np.unique(self['setup'].data).tolist() if 'None' in", "of this type that match any of the # meta", "the observation. If None, constructed using :func:`construct_obstime`. Returns: str: The", "of writing, just return the modified table return output_tbl #", "been ignored ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is None: #", "if 'decker' not in self.keys() else self['decker'][row] slitwid = 'none'", "defined. For now this is a simple grouping of frames", "the 'setup' column to have been defined. For now this", "to the set of objects (science or standard frames) to", "exist if 'calibbit' in self.keys(): del self['calibbit'] # Groups have", "import yaml from astropy import table, coordinates, time, units from", "the desired behavior since if there are # empty or", "nested dictionary, one dictionary per configuration with the associated values", "of files with a given frame type. The frames must", "if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else:", "overwrite is False and the file exists. \"\"\" # Check", "the table included in the selected calibration group. Raises: PypeItError:", "does not exist, this sets the combination groups to be", "# Select and sort the data by a given column", "- Here's where we could add a SPIT option. Args:", "types for i, ftype in enumerate(self.type_bitmask.keys()): # # Initialize: Flag", "rtol value for float meta (e.g. dispangle) Returns: bool: True", "If None, all detectors are included. config_only (:obj:`bool`, optional): Just", "types.') for ifile,ftypes in user.items(): indx = self['filename'] == ifile", "'run get_frame_types.') calibs = '0' if n_cfg == 1 else", "file. overwrite (:obj:`bool`, optional): Overwrite any existing file with the", "table. \"\"\" if 'framebit' not in self.keys(): msgs.error('Frame types are", "def set_user_added_columns(self): \"\"\" Set columns that the user *might* add", "copy else self.configs if rm_none and 'None' in _cfg.keys(): del", "dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra == 'None'", "the science frames are associated with one calibration group. TODO:", "configurations matched to this provided string or list of strings", "f.write('\\n') # Just to be explicit that the method returns", "somewhere else or just removed. assert isinstance(cfg_limits[key], list), \\ 'CODING", "# data was read from a pypeit file) if 'calib'", "Use & or | ? Using idname above gets overwritten", "= self.spectrograph.pypeit_file_keys() extras = [] # comb, bkg columns if", "} } } #_det = np.arange(self.spectrograph.ndet)+1 if det is None", "correctly assigned in the spectrograph class definition. # This should", "self.table['frametype'] if 'framebit' in self.keys(): del self.table['framebit'] # # TODO:", "ignore is not None and cbit in ignore: continue #", "cfgs = self.unique_configurations(copy=ignore is not None) if ignore is not", "in self.keys(): raise ValueError(f'Cannot sort by {sort_col}. Not a valid", "[files] # Build lists to fill data = {k:[] for", "each. \"\"\" _cfg = deepcopy(self.configs) if copy else self.configs if", "sorting the output. If None, the table is printed in", "may be empty or have corrupt headers:\\n' for file in", "of the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to", "identifiers ('A', 'B', etc.) and the row index where it", "Grab Meta for meta_key in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key,", "data_lines = ff.getvalue().split('\\n')[:-1] if ofile is None: # Output file", "The table must have a `filename` column that is used", "C, etc), the calibration group is the same as the", "not in _configs: use = np.isin(setups, _configs) setups = setups[use]", "columns=None, sort_col=None, overwrite=False, header=None): \"\"\" Write the metadata either to", "spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the rtol value for float", "data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the", "B, C, etc), the calibration group is the same as", "why this is not an attribute of # PypeItMetaData? def", "assigns all frames to a single calibration group, if the", "columns. .. note:: :attr:`table` is edited in place. Args: columns", "frame types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will", "file paths that match the frame type and science frame", "config, spectrograph): \"\"\" Queries whether a row from the fitstbl", "root) if not os.path.isdir(odir): os.makedirs(odir) # Create the output file", "output_cols = np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True): \"\"\"", "[] # comb, bkg columns if write_bkg_pairs: extras += ['calib',", "from a single instrument configuration. :attr:`table` is modified in-place. See", "rows of the table to write. If None, all rows", "``PypeIt`` columns for manual extraction configs (:obj:`str`, :obj:`list`, optional): One", "1 unique = j == len(self.configs) if unique: if cfg_indx", "columns that the user might add self.set_user_added_columns() # Validate instrument", "set to current directory. If the output directory does not", "that # proved too difficult. class PypeItMetaData: \"\"\" Provides a", "not yet been defined. copy (:obj:`bool`, optional): Return a deep", "used to select the configurations to include in the returned", "The master key is the combination of the configuration, the", "used to set the internal :attr:`configs`. If this attribute is", "frame index, if it is provided. Args: ftype (str): The", "Force the calibration groups to be reconstructed if the 'calib'", ":func:`unique_configurations`. force (:obj:`bool`, optional): Force the configurations to be reset.", "reformatting it. And it may be something to put in", "= usrdata[idx] # Add the directory and file name to", "np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't identify the following files:\") for f", "the string values of the 'calib' column. \"\"\" # Find", "have already been set if 'calib' in self.keys() and 'calibbit'", "the metadata table (pypeit file):\\n' indx = np.where(np.logical_not(good))[0] for i", "the bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] = 0 # Set", "by # # flagging all as true # indx =", "of each type in this group cfg[setup[0]][cbit] = {} for", "in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why isn't frametype", "output (:obj:`str`, optional): Output signature or file name. If None,", "are None in the instantiation of PypeItMetaData.' ' The table", "groups to be either undefined or to be unique for", "for file typing.') # Start msgs.info(\"Typing files\") type_bits = np.zeros(len(self),", "the output file name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) # Get", "with undefined configurations n_cfg = len(configs) # TODO: Science frames", "# TODO: Use & or | ? Using idname above", "either undefined or to be unique for each science or", "None, set to current directory. If the output directory does", "List of ``PypeIt`` files generated. \"\"\" # Set output path", "This is ignored if `data` is also provided. This functionality", "'bkg_id', 'manual'], [int, int, str]) # Initialize internal attributes self.configs", "# Alert the user that some of the frames are", "to an assert statement... raise ValueError('CODING ERROR: Found high-dimensional column.')", "from the selected row. \"\"\" _cfg_keys = self.spectrograph.configuration_keys() if cfg_keys", "selected continue # Assign the group; ensure the integers are", "index else indx def find_frame_files(self, ftype, calib_ID=None): \"\"\" Return the", "*.calib file. _cfg = {} for setup in cfg.keys(): _cfg[setup]", "The bitmask used to keep track of the calibration group", "instrument configuration table without \\'setup\\' ' 'column; run set_configurations.') if", "\"\"\" def __init__(self, spectrograph, par, files=None, data=None, usrdata=None, strict=True): if", "to include. If None, all detectors are included. config_only (:obj:`bool`,", "configuration. This will change! # The configuration must be present", "# Write the output to an ascii file with open(ofile,", "Return a deep copy of :attr:`configs` instead of the object", "integer array if index=True, with the rows that contain the", "import BitMask # TODO: Turn this into a DataContainer #", "frame types from the input metadata object. .. todo:: -", "in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']: if col not", "it. Args: ofile (:obj:`str`): Name for the output sorted file.", "with the same name. ignore (:obj:`list`, optional): Ignore calibration groups", "to clean_configurations? I didn't add it # here, because this", "since if there are # empty or corrupt files we", "pypeit file instead Args: ofile (:obj:`str`): Name for the output", "ID name or start by # # flagging all as", "file; otherwise raise an exception. header (:obj:`str`, :obj:`list`, optional): One", "by allowing some frame types to have no association with", "for any of the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to", "raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.') #", "so just print it print('\\n'.join(data_lines)) return None # Write the", "that column. This is used to set the internal :attr:`configs`.", "values are not # unique for this configuration. if uniq_meta.size", "of setup identifiers ('A', 'B', etc.) and the row index", "(:obj:`str`, :obj:`list`, optional): A list of columns to include in", "If the user edits back in a frame that has", "could add a SPIT option. Args: flag_unknown (:obj:`bool`, optional): Instead", "with a specific frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is", "be used in the fits table> \"\"\" # Columns for", "configuration indx = self['setup'] == setup if not np.any(indx): continue", "twilight flat frame that was' + msgs.newline() + 'missed by", "this table can be used to set the frame type", "string. If None or ``'all'``, all columns in are written;", "user data, if present if usrdata is not None: self.merge(usrdata)", "in ['framebit', 'frametype', 'filename', 'directory']: if col not in tbl_cols:", "None are provided, the vanilla configuration is included. write_bkg_pairs (:obj:`bool`,", "self.table[c].astype(t) def _build(self, files, strict=True, usrdata=None): \"\"\" Generate the fitstbl", "PypeItError: Raised if the 'setup' isn't been defined. \"\"\" if", "class. For the data table, one should typically provide either", "truncated at 4 characters. self.table['calib'] = np.full(len(self), 'None', dtype=object) for", "to edit frame_type (:obj:`str`, :obj:`list`): One or more frame types", "'setup' column is not defined, or if `global_frames` is provided", "listed in # the ignore_frames indx = np.arange(len(self)) ignore_frames =", "the file list from which to grab the data from", "in self['calib'][i].replace(':',',').split(',')]) # Check against current maximum ngroups = max(l+1,", "The rest of this just nominally sets the calibration #", "Configuration identifiers are iterations through the # upper-case letters: A,", "if not data['directory'][idx]: data['directory'][idx] = '.' # Read the fits", "and the keywords used to set the configuration should be", "else ','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames: indx = np.where(self.find_frames(ftype))[0] for", "badcol = [col not in all_cols for col in tbl_cols]", "make_pypeit_file from pypeit.bitmask import BitMask # TODO: Turn this into", "match = [] for k in config.keys(): # Deal with", "Warn the user that the matching meta values are not", "generated within PypeIt. match_type (:obj:`bool`, optional): Attempt to match the", "'%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':',''))", "return # TODO: The rest of this just nominally sets", "'none' if 'dispangle' not in self.keys() else self['dispangle'][row] dichroic =", "to # this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also:", "yaml from astropy import table, coordinates, time, units from pypeit", "in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) + double_alphabet cfg_indx = 0", ":func:`fits.getheader` fails to read any of the headers. Set to", "\"\"\" Set combination groups. .. note:: :attr:`table` is edited in", "not os.path.isdir(odir): os.makedirs(odir) # Create the output file name ofiles[j]", "flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None, merge=True): \"\"\"", "get_frame_types) and I'm just not using it... Args: frametype (:obj:`dict`):", "set by config_indpendent_frames are not ' \\ 'correctly defined for", "bit based on the string values of the 'calib' column.", "#ftype_in_group = self.find_frames(key) & in_group ftype_in_group = self.find_frames(key) & in_cbit", "(:obj:`bool`, optional): Overwrite any existing file; otherwise raise an exception.", "all files are from a single instrument configuration. :attr:`table` is", "group integer. Returns: numpy.ndarray: Boolean array selecting those frames in", "range(len(self)): if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science", "be used with multiple calibration groups. Args: row (:obj:`int`): The", "indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the internal table", "unique for this configuration. if uniq_meta.size != 1: msgs.warn('When setting", "_cfg.keys(): del _cfg['None'] return _cfg def unique_configurations(self, force=False, copy=False, rm_none=False):", "with what can be read from the pypeit file. The", "No information, keep going continue # Convert to a list", "a PypeItMetaData object. def __getitem__(self, item): return self.table.__getitem__(item) def __setitem__(self,", "setup if not np.any(indx): continue subtbl = self.table[output_cols][indx] # Write", "'binning': binning, # PypeIt orientation binning of a science image", "d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only else setup", "= ff.getvalue().split('\\n')[:-1] if ofile is None: # Output file not", "warning if there is problem try: time.Time(data['mjd'], format='mjd') except ValueError:", "configuration {0} does not have unique '.format(cfg_key) + '{0} values.'", "to meet the other checks in this call. # indx", "\"\"\" Generate the fitstbl that will be at the heart", "defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot get setup", "type: {0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool) for ftype in ignore_frames:", "each file. The class is used to provide the header", "ignoring any 'None' # configurations... cfg = self.unique_configurations(copy=True, rm_none=True) #", "configurations, but the frame types have not been set yet.", "calib_ID=None, index=False): \"\"\" Find the rows with the associated frame", "the frame. obstime (:class:`astropy.time.Time`, optional): The MJD of the observation.", "if ignore is not None: for key in cfgs.keys(): if", "optional): The list of configuration lines to include in the", "a string assert metakey is None or isinstance(metakey, str), \\", "extras += ['manual'] for key in extras: if key not", "allowed by the instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A", "the fits table> \"\"\" # Columns for output columns =", "if ftype == 'None': return self['framebit'] == 0 # Select", "put in the relevant spectrograph class. Args: row (:obj:`int`): The", "If the 'setup' columns does not exist, fill the configuration", "Construct the MJD of when the frame was observed. ..", "= self.type_bitmask.turn_off(b, flag='standard') continue # If an object exists within", "None in the instantiation of PypeItMetaData.' ' The table will", "input if not isinstance(usrdata, table.Table): raise TypeError('Must provide an astropy.io.table.Table", "relevant spectrograph class. Args: row (:obj:`int`): The 0-indexed row used", "enumerate(self.type_bitmask.keys()): # # Initialize: Flag frames with the correct ID", "also be matched to the relevant science frame. Args: ftype", "keys(self): return self.table.keys() def sort(self, col): return self.table.sort(col) def merge(self,", "in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it ff = open(ofile, 'w')", "the provided configuration match with the metadata keywords. Also raised", "be included in the fitstbl (nearly the complete list). Args:", "expected that this table can be used to set the", "= np.unique(self.table[metakey][in_cfg].data) # Warn the user that the matching meta", "optional): A list of columns to include in the output", "follow-on code so that we # don't have to do", "Finish up (note that this is called above if user", "# Allow for str RA, DEC (backwards compatability) if key", "copied to a new dictionary. Returns: :obj:`dict`: A nested dictionary,", "have not been defined yet. \"\"\" # Set the default", "columns for manual extraction configs (:obj:`str`, :obj:`list`, optional): One or", "file... return None def find_calib_group(self, grp): \"\"\" Find all the", "(:obj:`bool`, optional): Force the configurations to be redetermined. Otherwise the", "None: output_path = os.getcwd() # Find unique configurations, always ignoring", "def get_configuration_names(self, ignore=None, return_index=False, configs=None): \"\"\" Get the list of", "configurations. If run before the ``'setup'`` column is initialized, this", "spectrograph are used. configs (:obj:`dict`): A dictionary of the unique", "meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype # Deal with None's properly", "for i in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i])", "self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra == 'None' or dec ==", "execution of # pypeit_setup. If the user edits back in", "science image } } } #_det = np.arange(self.spectrograph.ndet)+1 if det", "the table and specify any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt", "calibration group *for all rows*. force (:obj:`bool`, optional): Force the", "not flag_unknown: msgs.error(\"Check these files before continuing\") # Finish up", "def _get_cfgs(self, copy=False, rm_none=False): \"\"\" Convenience method to return :attr:`configs`", "group; this needs to have dtype=object, otherwise # any changes", "coordinates.') else: # TODO: Do we want to do this", ":obj:`tuple`, optional): The 1-indexed detector number(s). If a tuple, it", "== 0 else int(n) for n in self['calib'][i].replace(':',',').split(',')]) # Check", "of columns to be used in the fits table> \"\"\"", "set_configurations and set_calibration_groups.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already", "the correct length. Returns: list: List of the full paths", "setup): \"\"\" Finalize the build of the table based on", "set_calibration_groups(self, global_frames=None, default=False, force=False): \"\"\" Group calibration frames into sets.", "tbl_cols: continue indx = np.where([t == col for t in", "frame. Args: ftype (str): The frame type identifier. See the", "= deepcopy(cfg[setup]) cfg = _cfg # Iterate through the calibration", "can be written (this is here because the spectrograph #", "be used in all calibration groups # (like biases and", "for calib, comb_id and bkg_id write_manual (:obj:`bool`, optional): Add additional", "msgs.error('To ignore frames, types must have been defined; run get_frame_types.')", "optional): Function will fault if there is a problem with", "this is a simple grouping of frames with the same", ":obj:`str`: Master key with configuration, calibration group(s), and detector. Raises:", "specify an output file. Returns: `astropy.table.Table`: The table object that", "meta_data_model.keys(): # Is this meta data?? dtype = meta_data_model[key]['dtype'] else:", "self.keys(): del self['calibbit'] # Groups have already been set if", "I can always insert at the beginning of the list", "to grab the rtol value for float meta (e.g. dispangle)", "A dictionary of the unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The", "[], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return return ofiles def", "- Maintain a detailed description of the logic. The 'calib'", "provides the indices of the first occurrence of these setups,", "one dictionary per configuration with the associated values of the", "would be good to get around this. Is it related", "groups # (like biases and darks) if global_frames is not", "configuration names. This provides just the list of setup identifiers", "(:obj:`str`, :obj:`list`, optional): One or more strings to write to", "the provided table. \"\"\" meta_data_model = meta.get_meta_data_model() # Check the", "the frame type of each file. The metadata is validated", "of each frame from that configuration with a given calibration", "Loop on keys in config match = [] for k", "indx, frame_type, append=False): \"\"\" Edit the frame type by hand.", "Turn on the relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) #", "msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to", "(:obj:`bool`, optional): Append the frame type. If False, all existing", "boolean array, or an integer array if index=True, with the", "= self.table.copy() # Select the output rows if a vector", "# TODO: This should be converted to an assert statement...", "the fits headers or the data directly. If neither are", "not in self.keys(): msgs.error('Cannot write sorted instrument configuration table without", "Raises: PypeItError: Raised if the 'setup' or 'calibbit' columns haven't", "the file. If None are provided, the vanilla configuration is", "0): # find_frames will throw an exception if framebit is", "obstime is None else obstime tiso = time.Time(_obstime, format='isot') dtime", "background frame pairs. write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns", "pypeit.core import flux_calib from pypeit.core import parse from pypeit.core import", "# Get the setups to write if configs is None", "user is not None!) msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits, merge=merge) def", "of the file, on string per file line; ``# ``", "from pypeit import msgs from pypeit import utils from pypeit.core", "to run. # Validate, print out a warning if there", "that the groups are valid self._check_calib_groups() def find_frames(self, ftype, calib_ID=None,", "returned objects. If ``'all'``, pass back all configurations. Otherwise, only", "returned by the spectrograph `configuration_keys` method. The latter is not", "include detectors designated as a viable mosaic for :attr:`spectrograph`; see", "cannot use it for file typing.') # Start msgs.info(\"Typing files\")", "np.unique(self.table[metakey][in_cfg].data) # Warn the user that the matching meta values", "done return # At this point, we need the frame", "functionality is only used when building the metadata from the", "fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None): \"\"\" Return", "we could add a SPIT option. Args: flag_unknown (:obj:`bool`, optional):", "will be removed from the metadata table (pypeit file):\\n' indx", "just the list of setup identifiers ('A', 'B', etc.) and", "different # configuration. for i in indx[1:]: j = 0", "that identify object and background frame pairs. write_manual (:obj:`bool`, optional):", "and fill is not None: self['setup'] = fill return _configs", "table. Returns: :obj:`astropy.table.Table`: A Table with two columns, the type", "par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior.", "are included. config_only (:obj:`bool`, optional): Just return the dictionary with", "alterations. This method *should not* be called by any method", "= flux_calib.find_standard_file(ra, dec, check=True) b = self.type_bitmask.turn_off(b, flag='science' if foundstd", "If run after the ``'setup'`` column has been set, this", "set the code behavior. files (:obj:`str`, :obj:`list`, optional): The list", "method outside of this class; use :func:`unique_configurations` instead. Args: copy", "= {} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg = _cfg # Iterate", "srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']] #", "configuration. for i in indx[1:]: j = 0 for c", "= (self.table['setup'] == 'None') & self.find_frames(ftype) if not np.any(indx): continue", "self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row, det=None, config_only=False): \"\"\"", "will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None):", "output_tbl = output_tbl[rows] # Select and sort the data by", "the input if not isinstance(usrdata, table.Table): raise TypeError('Must provide an", "output file. Can be provided as a list directly or", "self.keys(): self['comb_id'] = -1 if 'bkg_id' not in self.keys(): self['bkg_id']", "``output`` does not specify an output file. Returns: `astropy.table.Table`: The", "self['calib'][i].replace(':',',').split(',')]) # Check against current maximum ngroups = max(l+1, ngroups)", "the table row to use to construct the configuration. cfg_keys", "ERROR: metadata keywords set by config_indpendent_frames are not ' \\", "configurations? Or can we # make the default 'all'? if", "A second returned object provides the indices of the first", "if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists. Use ovewrite=True", "'calibbit' in self.keys() and not force: return # Groups have", "default_keys(): return [ 'directory', 'filename', 'instrume' ] def keys(self): return", "the full paths of one or more frames. \"\"\" if", "first unique configuration self.configs = {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys)", "instantiation of PypeItMetaData.') usr_row = usrdata[idx] # Add the directory", "index is provided, the frames must also be matched to", "# And remove 'em self.table = self.table[good] def _set_calib_group_bits(self): \"\"\"", "Construct the root name primarily for PypeIt file output. Args:", "the configurations are **not** reset unless you call the function", "objects (science or standard frames) to a unique integer. If", "+ str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase]", "not in self.keys(): msgs.error('Cannot provide instrument setup without \\'setup\\' column;", "that would have been printed/written to disk is returned. Otherwise,", "present, set a single calibration group *for all rows*. force", "np.any(not_setup): # All are set, so we're done return #", "output_tbl = output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols] if output ==", "# any changes to the strings will be truncated at", "# Return return ofiles def write(self, output=None, rows=None, columns=None, sort_col=None,", "output_path is None: output_path = os.getcwd() # Find unique configurations,", "output_tbl[sort_col] != None is_None = np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)])", "(science or standard frames) to a unique integer. If the", "ftype_colmA fbits_colm = table.Column(type_bits, name='framebit') t = table.Table([ftype_colm, fbits_colm]) if", "= np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols]", "PypeIt. match_type (:obj:`bool`, optional): Attempt to match the data type", "observation. If None, constructed using :func:`construct_obstime`. Returns: str: The root", "self.keys() else self['decker'][row] slitwid = 'none' if 'slitwid' not in", "in _cfg_keys} def master_key(self, row, det=1): \"\"\" Construct the master", "this column. indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx): msgs.warn('Found", "if len(setup) != 1: msgs.error('Each calibration group must be from", "for spectrographs setup = {skey: {'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle},", "set_frame_types(self, type_bits, merge=True): \"\"\" Set and return a Table with", "SPIT option. Args: flag_unknown (:obj:`bool`, optional): Instead of crashing out", "exists. \"\"\" # Check the file can be written (this", "key in cfgs.keys(): if key in ignore: del cfgs[key] #", "use this assert to check that the # metakey is", "user might add self.set_user_added_columns() # Validate instrument name self.spectrograph.vet_instrument(self.table) def", "= usrdata[key][srt] def finalize_usr_build(self, frametype, setup): \"\"\" Finalize the build", "All values good, so we're done return # Alert the", "spectrograph used to collect the data save to each file.", "corrupt files we still want this to run. # Validate,", "ignore=None): \"\"\" Write the calib file. The calib file provides", "`usrdata`. You can avoid this step by setting `match_type=False`. Args:", "have None in # their MJD. This is the desired", "C, etc. double_alphabet = [str_i + str_j for str_i in", "Write the file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths)", "(:obj:`bool`, optional): Return row indices with the first occurence of", "configs=None): \"\"\" Get the list of the unique configuration names.", "changes to the strings will be truncated at 4 characters.", "some files have None in # their MJD. This is", "{:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy() #", "to read any of the headers. Set to False to", "= [str_i + str_j for str_i in string.ascii_uppercase for str_j", "the main configuration file for PypeIt, configuring the control-flow and", "been defined. For now this is a simple grouping of", "additional ``PypeIt`` columns for manual extraction configs (:obj:`str`, :obj:`list`, optional):", "a string with comma-separated types. merge (:obj:`bool`, optional): Merge the", "The spectrograph used to collect the data save to each", "FileExistsError(f'{ofile} already exists; set flag to overwrite.') # Check the", "file. Returns: `astropy.table.Table`: The table object that would have been", "of the other files show a different # configuration. for", "deepcopy(self.configs) if copy else self.configs if rm_none and 'None' in", "report a warning and continue. usrdata (astropy.table.Table, optional): Parsed for", "a given frame type. The frames must also match the", "(e.g. grating angle) if isinstance(config[k], float): if row[k] is None:", "not in self.keys() and default: self['calib'] = '0' # Make", "self.type_bitmask.flagged(type_bits, flag='standard') for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx],", "import deepcopy import datetime from IPython import embed import numpy", "interface to the relevant fits file metadata used during the", "= False if len(existing_keys) > 0 and match_type: for key", "selected row. \"\"\" _cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None", "'framebit' in self.keys(): msgs.warn('Removing existing frametype and framebit columns.') if", "# Make sure the data are correctly ordered srt =", "Select frames indx = self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is not", "have been defined; run get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations", "is not None: if 'frametype' not in self.keys(): msgs.error('To ignore", "groups without \\'setup\\' and \\'calibbit\\' ' 'columns; run set_configurations and", "frame. Returns: astropy.time.Time: The MJD of the observation. \"\"\" return", "PypeItError: Raised if the 'setup' or 'calibbit' columns haven't been", "Args: row (:obj:`int`): The 0-indexed row used to construct the", "format='ascii.fixed_width') ff.write('##end\\n') ff.close() # TODO: Do we need a calib", "in the internal table. See above. Raises: TypeError: Raised if", "science or standard frame, see :func:`set_combination_groups`. .. note:: This should", "this sets the combination groups to be either undefined or", "in self.keys(): del self['calibbit'] # Groups have already been set", "= np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't identify the following files:\") for", "user-provided data? It may be (see get_frame_types) and I'm just", "np.where([t == col for t in tbl_cols])[0][0] if indx !=", "int(icbit) # for yaml # Skip this group if ignore", "is problem try: time.Time(data['mjd'], format='mjd') except ValueError: mjd = np.asarray(data['mjd'])", "that the instrument setup has only one configuration. if len(cfg_keys)", "self.table['filename'])[0][0] for f in usrdata['filename']] # Convert types if possible", "table and specify any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters", "Force the configurations to be reset. fill (:obj:`str`, optional): If", "not defined. \"\"\" if 'calibbit' not in self.keys(): msgs.error('Calibration groups", "data['filename'][idx] = os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx] = '.' #", "files is None else self._build(files, strict=strict, usrdata=usrdata)) # Merge with", "and type are expected to be the key and value", "number groups by searching for the maximum number # provided,", "as in the table, and the keywords used to set", "image } } } #_det = np.arange(self.spectrograph.ndet)+1 if det is", "now, check that the configuration values were # correctly assigned", "# At this point, we need the frame type to", "is None: # No group selected continue # Assign the", "self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx): \"\"\" Return the full paths", "typically pulled from the PypeIt file. This function: - sets", "an argument and converted to a zero-filled string with two", "the logic. The 'calib' column has a string type to", "in the table. columns (:obj:`str`, :obj:`list`, optional): A list of", "# this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also: #", "line; ``# `` is added to the beginning of each", "obstime (:class:`astropy.time.Time`, optional): The MJD of the observation. If None,", "# Take only those present output_cols = np.array(columns) return output_cols[np.isin(output_cols,", "not in self.keys() else self['dispangle'][row] dichroic = 'none' if 'dichroic'", "dictionary, one dictionary per configuration with the associated values of", "if 'calib' not in self.keys() and default: self['calib'] = '0'", "input metadata object. .. todo:: - Here's where we could", "Find the frames in this group #in_group = self.find_calib_group(i) in_cbit", "frame was observed. .. todo:: - Consolidate with :func:`convert_time` ?", "same name. ignore (:obj:`list`, optional): Ignore calibration groups in the", "following files:\") for f in self['filename'][indx]: msgs.info(f) if not flag_unknown:", "frames are associated with one calibration group. TODO: Is this", "None else self._build(files, strict=strict, usrdata=usrdata)) # Merge with user data,", "isinstance(header, list) else [header] for h in _header: f.write(f'# {h}\\n')", "the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the string 'None',", "(nearly the complete list). Args: write_bkg_pairs (:obj:`bool`, optional): Add additional", "to have dtype=object, otherwise # any changes to the strings", "isn't been defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot", "for ftype, metakey in ignore_frames.items(): # TODO: For now, use", "Output file not defined so just print it print('\\n'.join(data_lines)) return", "metadata keywords set by config_indpendent_frames are not ' \\ 'correctly", "\"\"\" Generate the list of columns to be included in", "__len__(self): return self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format(", "checked. If None, this is set by :func:`unique_configurations`. force (:obj:`bool`,", "' length={0}\\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format(", "good, so we're done return # Alert the user that", "row (:obj:`int`): The 0-indexed row of the frame. Returns: astropy.time.Time:", "be converted to an assert statement... raise ValueError('CODING ERROR: Found", "for a few instruments (e.g. VLT) where meta data may", "is not None and len(rows) != len(self.table): raise ValueError('Boolean vector", "to determine unique configurations.') # If the frame types have", "of these configurations. configs (:obj:`str`, :obj:`list`, optional): One or more", "frame # TODO: Should this be 'standard' or 'science' or", "this group if ignore is not None and cbit in", "Construct the setups dictionary cfg = self.unique_configurations(copy=True, rm_none=True) # TODO:", "for backwards compatibility, but we should consider reformatting it. And", "to subclass this from astropy.table.Table, but that # proved too", "+= ' {0}\\n'.format(file) msgs.warn(msg) # Return return data # TODO:", "is primarily used for QA now; but could probably use", "in the provided list. return_index (:obj:`bool`, optional): Return row indices", "'comb_id' not in self.keys(): self['comb_id'] = -1 if 'bkg_id' not", "else np.atleast_1d(configs) # TODO: Why do we need to specify", "be unique for each science or standard frame, see :func:`set_combination_groups`.", "def _check_calib_groups(self): \"\"\" Check that the calibration groups are valid.", "listing the data files to read. This function writes the", "ifile,ftypes in user.items(): indx = self['filename'] == ifile type_bits[indx] =", "None, all detectors are included. config_only (:obj:`bool`, optional): Just return", "the top-level designation of the configuration itself. Returns: dict: The", "must have been defined; ' 'run get_frame_types.') calibs = '0'", "there were corrupt header cards mjd[mjd == None] = -99999.0", "names *must* match configuration_keys() for spectrographs setup = {skey: {'--':", "The frames must also match the science frame index, if", "in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])]", "one directory .. include:: ../include/links.rst \"\"\" import os import io", "the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to instead", "this needs to have dtype=object, otherwise # any changes to", "configurations, always ignoring any 'None' # configurations... cfg = self.unique_configurations(copy=True,", "selected configurations to ignore rm = np.logical_not(np.isin(setups, ignore)) setups =", ":obj:`astropy.table.Table`: A Table with two columns, the type names and", "initialize the combination group and background group columns, and/or to", "files assumed to be from a single configuration.') return self._get_cfgs(copy=copy,", "in ignore: continue # Find the frames in this group", "if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs)))", "if header is not None: _header = header if isinstance(header,", "selected calibration group. Raises: PypeItError: Raised if the 'calibbit' column", "exist, this sets the combination groups to be either undefined", "def master_key(self, row, det=1): \"\"\" Construct the master key for", "the frame types. The length must match the existing number", "in self.keys() else self['binning'][row] skey = 'Setup {}'.format(self['setup'][row]) # Key", "into the existing table. This will *overwrite* any existing columns.", "\"\"\" # Set output path if output_path is None: output_path", "row (:obj:`int`): The 0-indexed row used to construct the key.", "bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the nearest standard", "bits. table (:class:`astropy.table.Table`): The table with the relevant metadata for", "``'table'``, the table that would have been printed/written to disk", "None: self['setup'] = fill return _configs = self.unique_configurations() if configs", "(:obj:`int`, :obj:`tuple`, optional): The 1-indexed detector number(s). If a tuple,", "each instrument. Args: output_path (:obj:`str`, optional): Root path for the", "usrdata (:obj:`astropy.table.Table`, optional): A user provided set of data used", "frame types are overwitten by the provided type. \"\"\" if", "the configuration. cfg_keys (:obj:`list`, optional): The list of metadata keys", "+= ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And remove 'em self.table =", "The calib file provides the unique instrument configurations (setups) and", "config (dict): Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab", "'frametype' in self.keys() or 'framebit' in self.keys(): msgs.warn('Removing existing frametype", "self.table['setup'] = 'None' nrows = len(self) for i in range(nrows):", "column.') #embed(header='372 of metadata') elif key in meta_data_model.keys(): # Is", "Columns to pad string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') #", "# Find the nearest standard star to each science frame", "object and background frame pairs. write_manual (:obj:`bool`, optional): Add additional", "should probably go somewhere else or just removed. assert isinstance(cfg_limits[key],", "if the 'setup' or 'calibbit' columns haven't been defined. \"\"\"", "could probably use the pypeit file instead Args: ofile (:obj:`str`):", "user that some of the frames are going to be", "The valid values for configuration keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`.", "but that # proved too difficult. class PypeItMetaData: \"\"\" Provides", "'filename', 'instrume' ] def keys(self): return self.table.keys() def sort(self, col):", "provided. This functionality is only used when building the metadata", "type without a # configuration indx = (self.table['setup'] == 'None')", "Construct file ff = open(ofile, 'w') for setup in cfgs.keys():", "columns. Returns: `astropy.table.Table`: Table with two columns, the frame type", "association with an instrument configuration - This is primarily used", "same as the calibration bit number, and the detector number", "'idname' not in self.keys(): # raise ValueError('idname is not set", "if 'framebit' in self.keys(): del self.table['framebit'] # # TODO: This", "configuration indx = (self.table['setup'] == 'None') & self.find_frames(ftype) if not", "'setup' in self.keys() and not force: return if 'setup' not", "frames have configurations that cannot be reduced by PypeIt' \\", "that group. ngroups = 0 for i in range(len(self)): if", "\"\"\" Set and return a Table with the frame types", "obstime tiso = time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return", "of each frame; see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional): A", "type and continue. user (:obj:`dict`, optional): A dictionary with the", "from the input metadata object. .. todo:: - Here's where", "col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']: if col", "to construct the configuration. cfg_keys (:obj:`list`, optional): The list of", "these configurations. configs (:obj:`str`, :obj:`list`, optional): One or more strings", "not force: self._set_calib_group_bits() self._check_calib_groups() return # TODO: The rest of", "a pypeit file in data-table format. The pypeit file is", "for file in bad_files: msg += ' {0}\\n'.format(file) msgs.warn(msg) #", "numpy.ndarray: A boolean array, or an integer array if index=True,", "flag='science' if foundstd else 'standard') # Find the files without", "to include in the output file. Can be provided as", "table. This will *overwrite* any existing columns. Returns: `astropy.table.Table`: Table", "_impose_types(self, columns, types): \"\"\" Impose a set of types on", "*in place*. If the 'setup' column already exists, the configurations", "a set of types on certain columns. .. note:: :attr:`table`", "overwrite.'.format(ofile)) # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs", "not None: self['setup'] = fill return _configs = self.unique_configurations() if", "sure the data are correctly ordered srt = [np.where(f ==", "background group columns, and/or to initialize the combination groups to", "units from pypeit import msgs from pypeit import utils from", "for f in self['filename'][indx]: msgs.info(f) if not flag_unknown: msgs.error(\"Check these", ":attr:`table`, meaning this method may modify that attribute directly. The", "not in self.keys(): self['bkg_id'] = -1 if assign_objects and np.all(self['comb_id']", "= output_tbl[rows] # Select and sort the data by a", "row used to construct the key. det (:obj:`int`, :obj:`tuple`, optional):", "\"\"\" # Making Columns to pad string array ftype_colmA =", "configuration ID is the same as included in the configuration", "the table. columns (:obj:`str`, :obj:`list`, optional): A list of columns", "from a pypeit file) if 'calib' in self.keys() and 'calibbit'", "in self.keys() else self['dispname'][row] dispangle = 'none' if 'dispangle' not", "selecting those frames in the table included in the selected", "all frames to a single calibration group, if the 'calib'", "in self.keys(): msgs.warn('Cannot associate standard with science frames without sky", "Check if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name list does not", "Impose a set of types on certain columns. .. note::", "group list grp = parse.str2list(self['calib'][i], ngroups) if grp is None:", "# All are set, so we're done return # Some", "row index where it first occurs. This is different from", "the fitstbl that will be at the heart of PypeItMetaData.", "None: if 'frametype' not in self.keys(): msgs.error('To set global frames,", "self['decker'][row] slitwid = 'none' if 'slitwid' not in self.keys() else", "Merge the frame typing into the exiting table. Returns: :obj:`astropy.table.Table`:", "files are None in the instantiation of PypeItMetaData.' ' The", "done return # Alert the user that some of the", "type) if usrdata is None: usr_row = None else: #", "from the input dictionary if user is not None: if", "the nearest standard star to each science frame # TODO:", "in the provided list. write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt``", "optional): If the 'calib' column is not present, set a", "0: msgs.warn('Ignoring {0} frames with configuration set to None.'.format( np.sum(ignore)))", "key. det (:obj:`int`, :obj:`tuple`, optional): The 1-indexed detector number(s). If", "self['bkg_id'] = -1 if assign_objects and np.all(self['comb_id'] < 0): #", "is not None: # Remove the selected configurations to ignore", "an `astropy.io.table.Table` KeyError: Raised if `filename` is not a key", "Returns: dict: A dictionary with the metadata values from the", "be 'standard' or 'science' or both? if 'ra' not in", "in self.keys(): msgs.error('To set global frames, types must have been", "det is None else [det] #for d in _det: #", "types. setup (:obj:`str`): If the 'setup' columns does not exist,", "ftype, calib_ID=None, index=False): \"\"\" Find the rows with the associated", "self.construct_obstime(row) if obstime is None else obstime tiso = time.Time(_obstime,", "= self['idname'] == self.spectrograph.idname(ftype) if useIDname \\ # else np.ones(len(self),", "reset. fill (:obj:`str`, optional): If the 'setup' column does not", "processing level of the metadata table, are directory, filename, frametype,", "# Get the subtable of frames taken in this configuration", "return np.where(indx)[0] if index else indx def find_frame_files(self, ftype, calib_ID=None):", "the requested type. Raises: PypeItError: Raised if the `framebit` column", "setup dictionary. .. todo:: - This is for backwards compatibility,", "provided type. \"\"\" if not append: self['framebit'][indx] = 0 self['framebit'][indx]", "pypeit file. The 'calibbit' column is actually what is used", "frame. This is not the same as the backwards compatible", "cfg_limits[key]) if not np.all(indx): msgs.warn('Found frames with invalid {0}.'.format(key)) good", "this be 'standard' or 'science' or both? if 'ra' not", "If ``'all'``, pass back all configurations. Otherwise, only return the", "and framebit columns.') if 'frametype' in self.keys(): del self.table['frametype'] if", "is edited in place. Args: columns (:obj:`list`): List of column", "np.all(indx): msgs.warn('Found frames with invalid {0}.'.format(key)) good &= indx if", "'setup' column already exists, the configurations are **not** reset unless", "calibs # Set the bits based on the string representation", "the fits metadata required by PypeIt. .. include common links,", "output columns = self.spectrograph.pypeit_file_keys() extras = [] # comb, bkg", "\\'calibbit\\' ' 'columns; run set_configurations and set_calibration_groups.') if os.path.isfile(ofile) and", "data to include in the table and specify any validation", "is edited *in place*. If the 'setup' column already exists,", "returns None when writing to a # file... return None", "def row_match_config(row, config, spectrograph): \"\"\" Queries whether a row from", "frames into sets. Requires the 'setup' column to have been", "is called above if user is not None!) msgs.info(\"Typing completed!\")", "not set. First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self,", "= open(ofile, 'w') for setup in cfgs.keys(): # Get the", "not in self.keys(): msgs.error('Cannot provide master key string without setup", "1: msgs.error('Science frames can only be assigned to a single", "output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the pypeit files ofiles", "the DEIMOS image reader will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups()", "the vanilla configuration is included. write_bkg_pairs (:obj:`bool`, optional): When constructing", "'dec' not in self.keys(): msgs.warn('Cannot associate standard with science frames", "object, include two columns called `comb_id` and `bkg_id` that identify", "the configurations are not set not_setup = self.table['setup'] == 'None'", "Convenience method to return :attr:`configs` with possible alterations. This method", "+ 'frames, configuration {0} does not have unique '.format(cfg_key) +", "'frametype' not in self.keys(): msgs.error('To ignore frames, types must have", "call to clean_configurations? I didn't add it # here, because", "# Output file not defined so just print it print('\\n'.join(data_lines))", "back all configurations. Otherwise, only return the configurations matched to", "automatic identification.') b = self.type_bitmask.turn_off(b, flag='standard') continue # If an", "group that it must match. If None, any row of", "# http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we can force type_names() in", "len(self): raise ValueError('The user-provided dictionary does not match table length.')", "raise ValueError('Boolean vector selecting output rows has incorrect length.') #", "pypeit file. Each selected column must be a valid pypeit", "all the configurations to the provided `setup` - assigns all", "c in self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph): break j +=", "type are expected to be the key and value of", "configuration match with the metadata keywords. Also raised when some", "crashing out if there are unidentified files, leave without a", "This function: - sets the frame types based on the", "possible existing_keys = list(set(self.table.keys()) & set(usrdata.keys())) radec_done = False if", "are valid self._check_calib_groups() def find_frames(self, ftype, calib_ID=None, index=False): \"\"\" Find", "assign more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx", "return the # correct type... if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm", "self.keys(): raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.')", "in `usrdata` to the type in the internal table. See", "from that configuration with a given calibration group. .. todo::", "- This is primarily used for QA now; but could", "msgs.error('No frames to use to define configurations!') # Get the", "printed/written to disk is returned. Otherwise, the string is interpreted", "if output in [None, 'table'] else output if ofile is", ":func:`convert_time` ? Args: row (:obj:`int`): The 0-indexed row of the", "append=False): \"\"\" Edit the frame type by hand. Args: indx", "if 'calibbit' not in self.keys(): msgs.error('Calibration groups are not set.", "is None else [det] #for d in _det: # setup[skey][str(d).zfill(2)]", "calibration group bits. table (:class:`astropy.table.Table`): The table with the relevant", "ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] if ofile is None:", "copy import deepcopy import datetime from IPython import embed import", "Is this meta data?? dtype = meta_data_model[key]['dtype'] else: dtype =", "can be used to initialize columns that the user might", "# TODO: This needs to be moved into each Spectrograph", "'frames, configuration {0} does not have unique '.format(cfg_key) + '{0}", "not None: if 'frametype' not in self.keys(): msgs.error('To ignore frames,", "framematch from pypeit.core import flux_calib from pypeit.core import parse from", "0: self.configs = {} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files assumed", "skey = 'Setup {}'.format(self['setup'][row]) # Key names *must* match configuration_keys()", "a boolean array of the correct length. Returns: list: List", "if foundstd else 'standard') # Find the files without any", "file. If None are provided, the vanilla configuration is included.", "in self.keys(): msgs.error('Cannot write calibration groups without \\'setup\\' and \\'calibbit\\'", "and the detector. The configuration ID is the same as", "behavior. If not provided, the default parameters specific to the", "if 'binning' not in self.keys() else self['binning'][row] skey = 'Setup", "if 'setup' not in self.keys(): msgs.error('Must have defined \\'setup\\' column", "None and cbit in ignore: continue # Find the frames", "column.') # Ignore any NoneTypes indx = output_tbl[sort_col] != None", "the object itself. rm_none (:obj:`bool`, optional): Remove any configurations set", "include in the table. The type can be anything allowed", "only one configuration. if len(cfg_keys) == 0: self.configs = {}", "output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): \"\"\" Write a pypeit file", "row of the frame. Returns: astropy.time.Time: The MJD of the", "= 'None' nrows = len(self) for i in range(nrows): for", "are expected to be the key and value of the", "(int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for d,f in zip(self['directory'][indx],", "print it print('\\n'.join(data_lines)) return None # Write the output to", "indx = np.where(self.find_frames(ftype))[0] for i in indx: self['calib'][i] = calibs", "not None: for key in cfgs.keys(): if key in ignore:", "group selected continue # Assign the group; ensure the integers", "keys therefore *must* match the number of files in :attr:`table`.", "be None for file:' + msgs.newline() + f) msgs.warn('The above", "of the keywords in the provided configuration match with the", "will try to match the data type of the `usrdata`", "the calibration # group if 'setup' not in self.keys(): msgs.error('Must", "fault if :func:`fits.getheader` fails to read any of the headers.", "KeyError: Raised if `filename` is not a key in the", "files. strict (:obj:`bool`, optional): Function will fault if there is", "+ double_alphabet cfg_indx = 0 # TODO: Placeholder: Allow an", "# Skip this group if ignore is not None and", "valid pypeit metadata keyword, specific to :attr:`spectrograph`. Additional valid keywords,", "for d,f in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits, merge=True): \"\"\"", "ignore is not None: # Remove the selected configurations to", "maximum ngroups = max(l+1, ngroups) # Define the bitmask and", "self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta for meta_key in self.spectrograph.meta.keys(): value", "msg = 'Time invalid for {0} files.\\n'.format(len(bad_files)) msg += 'Continuing,", "self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1 # Check if any of", "not None and 'all' not in _configs: use = np.isin(setups,", "each string. Ignored if ``output`` does not specify an output", "with an instrument configuration - This is primarily used for", "the data type in `usrdata` to the type in the", "column is actually what is used to determine the calibration", "str), \\ 'CODING ERROR: metadata keywords set by config_indpendent_frames are", "4 characters. self.table['calib'] = np.full(len(self), 'None', dtype=object) for i in", "set(usrdata.keys())) radec_done = False if len(existing_keys) > 0 and match_type:", ":obj:`list`): One or more files to use to build the", "= par if not isinstance(self.par, PypeItPar): raise TypeError('Input parameter set", "check that the # metakey is either not set or", "calibration group should only contain # frames from a single", "items in the metadata table listed by the spectrograph ``configuration_keys``", "and '#' in value: value = value.replace('#', '') msgs.warn('Removing troublesome", "any of the headers. Set to False to report a", "meaning this method may modify that attribute directly. The valid", "io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] if ofile", "files _files = files if hasattr(files, '__len__') else [files] #", "not crash if some files have None in # their", "that the method returns None when writing to a #", "optional): The data to include in the table. The type", "k in config.keys(): # Deal with floating configs (e.g. grating", "in_cfg = self['setup'] == setup if not np.any(in_cfg): continue paths", "will just add the column anyway, with the type in", "# # See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or", "This will change! # The configuration must be present to", "optional): Force the calibration groups to be reconstructed if the", "table is dictated by the header keywords specified for the", "self.spectrograph.idname(ftype) if useIDname \\ # else np.ones(len(self), dtype=bool) # Include", "int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm", "= self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1 # Check if any", "instrument setup without \\'setup\\' column; ' 'run set_configurations.') dispname =", "key and value of the dictionary, respectively. The number of", "a Table with the frame types and bits. Args: type_bits", "len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames can only be assigned to", "full paths of one or more frames. \"\"\" if isinstance(indx,", "of frame types from the input metadata object. .. todo::", "\\ # else np.ones(len(self), dtype=bool) # Include a combination of", "Returns: `numpy.ndarray`_: Array of columns to be used in the", "string from copy import deepcopy import datetime from IPython import", "names; run set_configurations.') # Unique configurations setups, indx = np.unique(self['setup'],", "read from the pypeit file. The 'calibbit' column is actually", "np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1 def", "the beginning of the list for col in ['framebit', 'frametype',", "= int(icbit) # for yaml # Skip this group if", "= '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) for ftype", "pypeit import utils from pypeit.core import framematch from pypeit.core import", "list of files with a given frame type. The frames", "(cf. ``force``). .. warning:: Any frame types returned by the", "Args: configs (:obj:`dict`, optional): A nested dictionary, one dictionary per", "single identifier. Ignores other inputs. Raises: PypeItError: Raised if none", "list(string.ascii_uppercase) + double_alphabet cfg_indx = 0 # TODO: Placeholder: Allow", "# Or we can force type_names() in bitmask to always", "exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng']", "data=None, usrdata=None, strict=True): if data is None and files is", "not checked. If None, this is set by :func:`unique_configurations`. force", "write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is not None) if ignore is", "isinstance(self.par, PypeItPar): raise TypeError('Input parameter set must be of type", "# No matching meta data defined, so just set all", "Args: force (:obj:`bool`, optional): Force the configurations to be redetermined.", "based on the string values of the 'calib' column. \"\"\"", "in _configs.items(): if len(set(cfg.keys()) - set(self.keys())) > 0: msgs.error('Configuration {0}", "metadata table generated within PypeIt. match_type (:obj:`bool`, optional): Attempt to", "the groups are valid self._check_calib_groups() def find_frames(self, ftype, calib_ID=None, index=False):", "the combination of the configuration, the calibration group, and the", "of keys therefore *must* match the number of files in", "= self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found {0} unique configurations.'.format(len(self.configs)))", "Args: row (astropy.table.Row): From fitstbl config (dict): Defines the configuration", "construct the key. det (:obj:`int`, :obj:`tuple`, optional): The 1-indexed detector", "spectrograph): \"\"\" Queries whether a row from the fitstbl matches", "frame types. \"\"\" # Checks if 'frametype' in self.keys() or", "character from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata", "def frame_paths(self, indx): \"\"\" Return the full paths to one", "of the frames are going to be # removed msg", "go somewhere else or just removed. assert isinstance(cfg_limits[key], list), \\", "the table contents. rows (`numpy.ndarray`_, optional): A boolean vector selecting", "frame type by hand. Args: indx (:obj:`int`): The 0-indexed row", "was provided if rows is not None: output_tbl = output_tbl[rows]", "The 'calibbit' column is actually what is used to determine", "Select frames in the same calibration group indx &= self.find_calib_group(calib_ID)", "np.isin(self[key], cfg_limits[key]) if not np.all(indx): msgs.warn('Found frames with invalid {0}.'.format(key))", "self.table['setup'][indx] = cfg_key continue # Find the unique values of", "import parse from pypeit.core import meta from pypeit.io import dict_to_lines", "the detector. The configuration ID is the same as included", "headarr = self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta for meta_key in", "Table with two columns, the frame type name and bits.", "Convert the string to the group list grp = parse.str2list(self['calib'][i],", "isinstance(columns, list) else columns.split(',') badcol = [col not in all_cols", "self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return", "files. If None, set to current directory. If the output", "provided the table is instantiated without any data. Args: spectrograph", "this configuration. if uniq_meta.size != 1: msgs.warn('When setting the instrument", "for i in range(nrows): for d, cfg in _configs.items(): if", "required Returns: dict: Dictionary with the data to assign to", "'comb_id' or 'bkg_id' columns do not exist, they're set to", "two digits (the maximum number of detectors is 99). Using", "calibration group. Args: grp (:obj:`int`): The calibration group integer. Returns:", "self.keys(): msgs.error('To account for ignored frames, types must have been", "Raised if the 'setup' or 'calibbit' columns haven't been defined.", "before continuing\") # Finish up (note that this is called", "'{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path, root) if not os.path.isdir(odir): os.makedirs(odir)", "array selecting those frames in the table included in the", "not None: output_tbl = output_tbl[rows] # Select and sort the", "self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename'] = ['None']*len(_files) # Build the", "dictionary. Args: indx (:obj:`int`): The index of the table row", "{0} defined using unavailable keywords!'.format(k)) self.table['setup'] = 'None' nrows =", "parse.str2list(self['calib'][i], ngroups) if grp is None: # No group selected", "# Check the input if not isinstance(usrdata, table.Table): raise TypeError('Must", "be of type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() # Build table", "keep going continue # Convert to a list of numbers", "Initialize columns that the user might add self.set_user_added_columns() # Validate", "is None else np.atleast_1d(configs) # TODO: Why do we need", "associated metadata for each. \"\"\" _cfg = deepcopy(self.configs) if copy", "types must have been defined; run get_frame_types.') ignore_frames = list(ignore_frames.keys())", "or more strings to write to the top of the", "configs for k, cfg in _configs.items(): if len(set(cfg.keys()) - set(self.keys()))", "return _configs = self.unique_configurations() if configs is None else configs", "[] for k in config.keys(): # Deal with floating configs", "the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will be ignored in the", "to match to the metadata table generated within PypeIt. **Note**:", "have corrupt headers:\\n' for file in bad_files: msg += '", "the combination group and background group columns, and/or to initialize", "(setups) and the association of each frame from that configuration", "defined using unavailable keywords!'.format(k)) self.table['setup'] = 'None' nrows = len(self)", "useIDname and 'idname' not in self.keys(): # raise ValueError('idname is", "too difficult. class PypeItMetaData: \"\"\" Provides a table and interface", "self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys return {k:self.table[k][indx] for", "the reduction. The content of the fits table is dictated", "np.arange(self.spectrograph.ndet)+1 if det is None else [det] #for d in", "'frametype' not in self.keys(): msgs.error('To account for ignored frames, types", "is not the same as the backwards compatible \"setup\" dictionary.", "without \\'setup\\' and \\'calibbit\\' ' 'columns; run set_configurations and set_calibration_groups.')", "is provided. Args: ftype (str): The frame type identifier. See", "this is set by :func:`unique_configurations`. force (:obj:`bool`, optional): Force the", "fits file metadata used during the reduction. The content of", "in usrdata.keys(): self.table[key] = usrdata[key][srt] def finalize_usr_build(self, frametype, setup): \"\"\"", "columns to print and which column to use for sorting.", "assert to check that the # metakey is either not", "self.keys() or 'framebit' in self.keys(): msgs.warn('Removing existing frametype and framebit", "(note that this is called above if user is not", "in columns: columns += [key] # Take only those present", "# Configuration identifiers are iterations through the # upper-case letters:", "self.keys(): self['manual'] = '' def write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False,", "\"\"\" is_science = self.find_frames('science') for i in range(len(self)): if not", "in enumerate(cfg_keys): # Create the output directory root = '{0}_{1}'.format(self.spectrograph.name,", "det (:obj:`int`, optional): The 1-indexed detector to include. If None,", "of this class; use :func:`unique_configurations` instead. Args: copy (:obj:`bool`, optional):", "compatible \"setup\" dictionary. Args: indx (:obj:`int`): The index of the", "from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata for", "if global_frames is not None: if 'frametype' not in self.keys():", "column already set. Finding unique configurations.') uniq, indx = np.unique(self['setup'],", "by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific to each instrument.", "into each Spectrograph # if useIDname and 'idname' not in", "f in self['filename'][indx]: msgs.info(f) if not flag_unknown: msgs.error(\"Check these files", "ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME # # TODO:", "the PypeIt file. This function: - sets the frame types", "continuing\") # Finish up (note that this is called above", "initialize the combination groups to the set of objects (science", "for backwards compatibility, but we should consider reformatting/removing it. Args:", "{k:[] for k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename'] =", "same configuration. .. todo:: - Maintain a detailed description of", "len(set(cfg.keys()) - set(self.keys())) > 0: msgs.error('Configuration {0} defined using unavailable", "ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done = True else: usrdata[key][~nones] =", "does not specify an output file. Returns: `astropy.table.Table`: The table", "one calibration group. TODO: Is this appropriate for NIR data?", "for the output pypeit files. If None, set to current", "if np.all(good): # All values good, so we're done return", "if c in self.keys(): self.table[c] = self.table[c].astype(t) def _build(self, files,", "where meta data may not be required Returns: dict: Dictionary", "for key in usrdata.keys(): self.table[key] = usrdata[key][srt] def finalize_usr_build(self, frametype,", "is provided but the frame types have not been defined", "them in # reverse order so I can always insert", "not set or a string assert metakey is None or", "of the metadata table, are directory, filename, frametype, framebit, setup,", "self.spectrograph.config_independent_frames() if ignore_frames is not None: if 'frametype' not in", "'dithoff', 'calib', 'comb_id', 'bkg_id']: if col not in tbl_cols: continue", "Provides a table and interface to the relevant fits file", "that we # don't have to do these gymnastics. Or", "metadata for each fits file to use in the data", "written; if ``'pypeit'``, the columns are the same as those", "Args: assign_objects (:obj:`bool`, optional): If all of 'comb_id' values are", "defined (see :func:`get_frame_types`), this method will fault! Args: force (:obj:`bool`,", "the instrument setup has only one configuration. if len(cfg_keys) ==", "\"\"\" _cfg = deepcopy(self.configs) if copy else self.configs if rm_none", "If None are provided, the vanilla configuration is included. write_bkg_pairs", "Build lists to fill data = {k:[] for k in", "two columns, the frame type name and bits. \"\"\" #", "use in the data reduction. \"\"\" def __init__(self, spectrograph, par,", "self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None, merge=True): \"\"\" Generate", "but the following frames may be empty or have corrupt", "types have not been defined yet. \"\"\" if self.configs is", "Select the output rows if a vector was provided if", "write_bkg_pairs=False, write_manual=False, configs=None): \"\"\" Write a pypeit file in data-table", "edit the relevant follow-on code so that we # don't", "as np import yaml from astropy import table, coordinates, time,", "selecting output rows has incorrect length.') # Get the columns", "the rows in the table. columns (:obj:`str`, :obj:`list`, optional): A", "= True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include the user", "or isinstance(metakey, str), \\ 'CODING ERROR: metadata keywords set by", "self.configs = {} for i in range(len(uniq)): if ignore[i]: continue", "change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode", "for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key with configuration,", "not force: return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys(): msgs.info('Setup", "which to grab the data from the fits headers or", "None or isinstance(metakey, str), \\ 'CODING ERROR: metadata keywords set", "that the science frames are associated with one calibration group.", "else obstime tiso = time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')", "the frame type name and bits. \"\"\" # Making Columns", "and not overwrite: msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))", "files:\") for f in self['filename'][indx]: msgs.info(f) if not flag_unknown: msgs.error(\"Check", "desired behavior since if there are # empty or corrupt", "string or list of strings (e.g., ['A','C']). See :attr:`configs`. Raises:", "the relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the", "if len(existing_keys) > 0 and match_type: for key in existing_keys:", "valid values for configuration keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\"", "algorithmic parameters and listing the data files to read. This", "Args: indx (:obj:`int`): The 0-indexed row in the table to", "of frames taken in this configuration indx = self['setup'] ==", "gymnastics. Or better yet, just stop # producing/using the *.calib", "is the same as included in the configuration column (A,", "checks that the science frames are associated with one calibration", "the group; ensure the integers are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i],", "= np.arange(len(sci_std_idx), dtype=int) + 1 def set_user_added_columns(self): \"\"\" Set columns", "pass back all configurations. Otherwise, only return the configurations matched", "self['target'][row].replace(\" \", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self, row,", "instead of the object itself. rm_none (:obj:`bool`, optional): Remove any", "# Get the list of keys to use cfg_keys =", "indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key def clean_configurations(self): \"\"\"", "configuration table without \\'setup\\' ' 'column; run set_configurations.') if os.path.isfile(ofile)", "list) else [configs] cfg_keys = [key for key in cfg.keys()", "a detailed description of the logic. The 'calib' column has", "{} for setup in cfg.keys(): _cfg[setup] = {} _cfg[setup]['--'] =", "optional): Merge the types and bits into the existing table.", "list) else [header] for h in _header: f.write(f'# {h}\\n') f.write('\\n')", "!= len(self.table): raise ValueError('Boolean vector selecting output rows has incorrect", "# If the frame types have been set, ignore anything", "unique for each science or standard frame, see :func:`set_combination_groups`. ..", "not an `astropy.io.table.Table` KeyError: Raised if `filename` is not a", "self.spectrograph.pypeit_file_keys() extras = [] # comb, bkg columns if write_bkg_pairs:", "from a single configuration if len(setup) != 1: msgs.error('Each calibration", "row in the table to edit frame_type (:obj:`str`, :obj:`list`): One", "`global_frames` is provided but the frame types have not been", "if configs is None else np.atleast_1d(configs) # TODO: Why do", "it is provided. Args: ftype (str): The frame type identifier.", "calibration group, if the 'calib' column does not exist -", "== configs[i]) & (self['framebit'] > 0)] = str(i) # Allow", "if 'setup' in self.keys(): msgs.info('Setup column already set. Finding unique", "frames with undefined configurations n_cfg = len(configs) # TODO: Science", "True. Returns: :obj:`list`: List of ``PypeIt`` files generated. \"\"\" #", "in `usrdata`, the function will try to match the data", "par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior.", "is set by :func:`unique_configurations`. force (:obj:`bool`, optional): Force the configurations", "account for ignored frames, types must have been defined; run", "status message msg = 'Time invalid for {0} files.\\n'.format(len(bad_files)) msg", "dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row]", "data values indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key def", "of type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() # Build table self.table", "the setups dictionary cfg = self.unique_configurations(copy=True, rm_none=True) # TODO: We", "len(tbl_cols) for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']:", "have already been set if 'setup' in self.keys() and not", "cfgs.keys(): if key in ignore: del cfgs[key] # Construct file", "master key is the combination of the configuration, the calibration", "other checks in this call. # indx &= self.spectrograph.check_frame_type(ftype, self.table,", "global_frames (:obj:`list`, optional): A list of strings with the frame", "group. ngroups = 0 for i in range(len(self)): if self['calib'][i]", "**Note**: This is ignored if `data` is also provided. This", "returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph` will be ignored", "dictionary, respectively. The number of keys therefore *must* match the", "if not append: self['framebit'][indx] = 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type)", "ngroups = max(l+1, ngroups) # Define the bitmask and initialize", "'frametype', 'filename', 'directory']: if col not in tbl_cols: continue indx", "this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn the user that", "utils.yamlify(cfg[setup])}, level=1) # Get the paths in_cfg = self['setup'] ==", "to use in all calibration groups (e.g., ['bias', 'dark']). default", "of the dictionary, respectively. The number of keys therefore *must*", "header (:obj:`str`, :obj:`list`, optional): One or more strings to write", "if cfg_limits is None: # No values specified, so we're", "decs.astype(dtype) radec_done = True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include", "that have multiple types, the types should be provided as", "group if ignore is not None and cbit in ignore:", "assign_objects and np.all(self['comb_id'] < 0): # find_frames will throw an", "# Ignore frames with undefined configurations n_cfg = len(configs) #", "this implementation, slicing the PypeItMetaData object # will return an", "the default if requested and 'calib' doesn't exist yet if", "configuration values were # correctly assigned in the spectrograph class", "for yaml # Skip this group if ignore is not", "or more 0-indexed rows in the table with the frames", "list.'.format(self.spectrograph.__class__.__name__) # Check that the metadata are valid for this", "in self.keys(): msgs.error('To account for ignored frames, types must have", "if ignore is not None and cbit in ignore: continue", "PypeIt' \\ ' and will be removed from the metadata", "to use to define configurations!') # Get the list of", "self.keys(): msgs.error('Must have defined \\'setup\\' column first; try running set_configurations.')", "frames associated with each configuration. The output data table is", "See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why isn't frametype just", "columns += [key] # Take only those present output_cols =", "This is not the same as the backwards compatible \"setup\"", "continue subtbl = self.table[output_cols][indx] # Write the file ff.write('##########################################################\\n') ff.write('Setup", "table object that would have been written/printed if ``output ==", "+= 1 # Check if any of the other files", "if 'calib' in self.keys() and 'calibbit' in self.keys() and not", "nominally follow an execution of # pypeit_setup. If the user", "to the relevant fits file metadata used during the reduction.", "= -99999.0 isort = np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width')", "data files to read. This function writes the columns selected", "flux_calib from pypeit.core import parse from pypeit.core import meta from", "the input metadata object. .. todo:: - Here's where we", "Raises: PypeItError: Raised if the `framebit` column is not set", "self.table[good] def _set_calib_group_bits(self): \"\"\" Set the calibration group bit based", "or to the screen. The method allows you to set", "the list of the unique configuration names. This provides just", "Array of columns to be used in the fits table>", "or to be unique for each science or standard frame,", "any row of the specified frame type is included. index", "identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index", "table> \"\"\" # Columns for output columns = self.spectrograph.pypeit_file_keys() extras", "construct the configuration. If None, the `configuration_keys` of :attr:`spectrograph` is", "it. And it may be something to put in the", "and match_type: for key in existing_keys: if len(self.table[key].shape) > 1:", "with the rows that contain the frames of the requested", "| ? Using idname above gets overwritten by # this", "'dec'] and not radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones]", "without setup and calibbit; ' 'run set_configurations and set_calibration_groups.') det_name", "Turn this into a DataContainer # Initially tried to subclass", "# invalid key, at least for now the DEIMOS image", "integers are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): \"\"\"", "to False to report a warning and continue. usrdata (astropy.table.Table,", "the file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) #", "groups are valid. This currently only checks that the science", "= self.table['setup'] == 'None' if not np.any(not_setup): # All are", "group should only contain # frames from a single configuration", "tried to subclass this from astropy.table.Table, but that # proved", "# Initialize: Flag frames with the correct ID name or", "Find the unique configurations in this group, ignoring any #", "star to each science frame # TODO: Should this be", "the full set of metadata exprng = self.par['scienceframe']['exprng'] if ftype", "key, at least for now the DEIMOS image reader will", "def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))])", "a problem with the reading the header for any of", "del _cfg['None'] return _cfg def unique_configurations(self, force=False, copy=False, rm_none=False): \"\"\"", "be called by any method outside of this class; use", "Returns: bool: True if the row matches the input configuration", "links, assuming primary doc root is up one directory ..", "but the bits have not (likely because the # data", "in ['all', 'None']: # No information, keep going continue #", "Return row indices with the first occurence of these configurations.", "each configuration, determine if any of the frames with #", "= self.table[key].dtype # Deal with None's properly nones = usrdata[key]", "requested and 'calib' doesn't exist yet if 'calib' not in", "[ 'directory', 'filename', 'instrume' ] def keys(self): return self.table.keys() def", "configurations that cannot be reduced by PypeIt' \\ ' and", "= meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done", "= self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): \"\"\" Check that the calibration", "name='frametype') # KLUDGE ME # # TODO: It would be", "representation of the groups self._set_calib_group_bits() # Check that the groups", "# Find the frames in this group #in_group = self.find_calib_group(i)", "the 'setup' column does not exist, fill the configuration setup", "\"\"\" if 'setup' not in self.keys(): msgs.error('Cannot write sorted instrument", "for ifile,ftypes in user.items(): indx = self['filename'] == ifile type_bits[indx]", "string is interpreted as the name of an ascii file", "that configuration with a given calibration group. .. todo:: -", "interpreted as the name of an ascii file to which", "configs=None): \"\"\" Write a pypeit file in data-table format. The", "astropy.table.Table, not a PypeItMetaData object. def __getitem__(self, item): return self.table.__getitem__(item)", "'configuration with a valid letter identifier; i.e., the ' 'configuration", "self.keys() and 'calibbit' not in self.keys() and not force: self._set_calib_group_bits()", "current directory. If the output directory does not exist, it", "frames without a known type. calib_ID (:obj:`int`, optional): Index of", "raise ValueError('CODING ERROR: Found high-dimensional column.') #embed(header='372 of metadata') elif", "matching meta data defined, so just set all # the", "indx = indx[rm] # Restrict _configs = None if configs", "['A','C']). See :attr:`configs`. Raises: PypeItError: Raised if the 'setup' isn't", "class PypeItMetaData: \"\"\" Provides a table and interface to the", "cfg_limits is None: # No values specified, so we're done", "appropriate for NIR data? \"\"\" is_science = self.find_frames('science') for i", "self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the pypeit files ofiles = [None]*len(cfg_keys)", "# the ignored frame types should be assigned to it:", "' {0}\\n'.format(file) msgs.warn(msg) # Return return data # TODO: In", "\\'filename\\' column!') # Make sure the data are correctly ordered", "have values that will yield good PypeIt reductions. Any frames", "frame type. If the index is provided, the frames must", "directly. The valid values for configuration keys is set by", "Return the unique instrument configurations. If run before the ``'setup'``", "below to not crash if some files have None in", "ignore (:obj:`list`, optional): Ignore configurations in the provided list. return_index", "> 0: msgs.warn('Ignoring {0} frames with configuration set to None.'.format(", "if indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the internal", "return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx): \"\"\" Return the full", "(:obj:`int`): The 0-indexed row used to construct the key. det", "order so I can always insert at the beginning of", "Not a valid column.') # Ignore any NoneTypes indx =", "Check that the metadata are valid for this column. indx", "(:obj:`str`, optional): Output signature or file name. If None, the", "Returns: list: List of file paths that match the frame", "using checks specified by the provided spectrograph class. For the", "ofile = None if output in [None, 'table'] else output", "continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the", "use to construct the configuration. If None, the `configuration_keys` of", "frames to this (first) configuration self.table['setup'][indx] = cfg_key continue #", "# # Or we can force type_names() in bitmask to", "= table.Column(type_bits, name='framebit') t = table.Table([ftype_colm, fbits_colm]) if merge: self['frametype']", "neither are provided the table is instantiated without any data.", "grouping of frames with the same configuration. .. todo:: -", "First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row): \"\"\"", "if len(set(cfg.keys()) - set(self.keys())) > 0: msgs.error('Configuration {0} defined using", "related to # this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See", "The 0-indexed row used to construct the key. det (:obj:`int`,", "frames can only be assigned to a single calibration group.')", "{1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) # JFH", "not a science frame is # assigned to that group.", "else [configs] cfg_keys = [key for key in cfg.keys() if", "is provided. \"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx): \"\"\"", "in indx: self['calib'][i] = calibs # Set the bits based", "probably a standard star foundstd = flux_calib.find_standard_file(ra, dec, check=True) b", "otherwise raise an exception. header (:obj:`str`, :obj:`list`, optional): One or", "the rtol value for float meta (e.g. dispangle) Returns: bool:", "if 'bkg_id' not in self.keys(): self['bkg_id'] = -1 if assign_objects", "\"\"\"Return the number of calibration groups.\"\"\" return None if self.calib_bitmask", "just nominally sets the calibration # group based on the", "crash if some files have None in # their MJD.", "of each string. Ignored if ``output`` does not specify an", "should only contain # frames from a single configuration if", "frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there a reason", "the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame types.", "configuration if len(setup) != 1: msgs.error('Each calibration group must be", "calibbit. sort_col (:obj:`str`, optional): Name of the column to use", "exception if framebit is not # set... sci_std_idx = np.where(np.any([self.find_frames('science'),", "usr_row = usrdata[idx] # Add the directory and file name", "searching for the maximum number # provided, regardless of whether", "optional): A user provided set of data used to supplement", "frames with the same configuration. .. todo:: - Maintain a", "['None']*len(_files) # Build the table for idx, ifile in enumerate(_files):", "and file name to the table data['directory'][idx], data['filename'][idx] = os.path.split(ifile)", "designated as a viable mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns:", "of # PypeItMetaData? def row_match_config(row, config, spectrograph): \"\"\" Queries whether", "the build of the table based on user-provided data, typically", "if configs is None or configs == 'all' or configs", "with this single identifier. Ignores other inputs. Raises: PypeItError: Raised", "(self.table['setup'] == 'None') & self.find_frames(ftype) if not np.any(indx): continue if", "'setup' not in self.keys(): msgs.error('Cannot write sorted instrument configuration table", "self['binning'][row] skey = 'Setup {}'.format(self['setup'][row]) # Key names *must* match", "same name. ignore (:obj:`list`, optional): Ignore configurations in the provided", "the configuration itself. Returns: dict: The pypeit setup dictionary with", "the function with ``force=True``. Args: configs (:obj:`dict`, optional): A nested", "and 'calibbit' in self.keys() and not force: return # Groups", "Allow some frame types to be used in all calibration", "name. If None, the table contents are printed to the", "argument of instantiation of PypeItMetaData.') usr_row = usrdata[idx] # Add", "and not radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] =", "If an object exists within 20 arcmins of a listed", "because the latter determines and provides the configurations themselves. This", "'science' or both? if 'ra' not in self.keys() or 'dec'", "for j,setup in enumerate(cfg_keys): # Create the output directory root", "msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\"", "get_setup(self, row, det=None, config_only=False): \"\"\" Construct the setup dictionary. ..", "the number of calibration groups.\"\"\" return None if self.calib_bitmask is", "lines setup_lines = dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get the", "See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # # Or we can", "avoid this step by setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A", "not defined, or if `global_frames` is provided but the frame", "ofiles = [None]*len(cfg_keys) for j,setup in enumerate(cfg_keys): # Create the", "by :func:`unique_configurations`. force (:obj:`bool`, optional): Force the configurations to be", "the same name. ignore (:obj:`list`, optional): Ignore calibration groups in", "'None' in configs: configs.remove('None') # Ignore frames with undefined configurations", "numpy.array: The list of unique setup names. A second returned", "in place. This function can be used to initialize columns", "self.table['setup'][indx] = cfg_key def clean_configurations(self): \"\"\" Ensure that configuration-defining keywords", "the key. det (:obj:`int`, :obj:`tuple`, optional): The 1-indexed detector number(s).", "print and which column to use for sorting. Args: output", "with ``force=True``. Args: configs (:obj:`dict`, optional): A nested dictionary, one", "regardless of whether or not a science frame is #", "The index of the table row to use to construct", "columns to be included in the fitstbl (nearly the complete", "specify any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to", "' 'run set_configurations and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\"", "isinstance(usrdata, table.Table): raise TypeError('Must provide an astropy.io.table.Table instance.') if 'filename'", "a call to clean_configurations? I didn't add it # here,", "to None.'.format( np.sum(ignore))) self.configs = {} for i in range(len(uniq)):", "consider reformatting it. And it may be something to put", "something to put in the relevant spectrograph class. Args: row", "calibration bit number, and the detector number is provided as", "the metadata table, are directory, filename, frametype, framebit, setup, calib,", "on the processing level of the metadata table, are directory,", "NIR data? \"\"\" is_science = self.find_frames('science') for i in range(len(self)):", "(:obj:`astropy.table.Table`, optional): A user provided set of data used to", "msgs.error('File name list does not match user-provided metadata table. See", "keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of the calibration", "the internal table. See above. Raises: TypeError: Raised if `usrdata`", "manual extraction configs (:obj:`str`, :obj:`list`, optional): One or more strings", "{0}'.format( ', '.join(tbl_cols[badcol]))) # Make sure the basic parameters are", "dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get the paths in_cfg =", "= -1 if 'bkg_id' not in self.keys(): self['bkg_id'] = -1", "If all of 'comb_id' values are less than 0 (meaning", "in np.unique(self['calibbit'].data): cbit = int(icbit) # for yaml # Skip", "all rows are written. Shape must match the number of", "columns called `comb_id` and `bkg_id` that identify object and background", "check that the configuration values were # correctly assigned in", "that it matches with what can be read from the", "not in self.keys(): msgs.error('Cannot get setup names; run set_configurations.') #", "exist, fill the configuration setup columns with this single identifier.", "self.calib_bitmask is None else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False, force=False):", "cfg_indx == len(cfg_iter): msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]]", "# All values good, so we're done return # Alert", "None in # their MJD. This is the desired behavior", "Find the files without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if", "method allows you to set the columns to print and", "'w') as f: if header is not None: _header =", "to match the data type in `usrdata` to the type", "directory and file name to the table data['directory'][idx], data['filename'][idx] =", "first file to set the first unique configuration self.configs =", "values must be None or a string.'.format( self.spectrograph.__class__.__name__) # Get", "science frame. Args: ftype (str): The frame type identifier. See", "Science frames can only have one calibration group # Assign", "See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame types. \"\"\" # Checks", "completed!\") return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\" Generate", "configurations (setups) and the association of each frame from that", "of ``PypeIt`` files generated. \"\"\" # Set output path if", "is not an attribute of # PypeItMetaData? def row_match_config(row, config,", "the `usrdata` column to the existing data type. If it", "dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame types from the input", "'calibbit' column is not defined. \"\"\" if 'calibbit' not in", "iterations through the # upper-case letters: A, B, C, etc.", "group based on the configuration. This will change! # The", "time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \",", "in _configs] if len(cfg_keys) == 0: msgs.error('No setups to write!')", "to write to the top of the file, on string", "TODO: Should this be 'standard' or 'science' or both? if", "in usrdata['filename']] # Convert types if possible existing_keys = list(set(self.table.keys())", "# JFH Changed the below to not crash if some", "column!') # Make sure the data are correctly ordered srt", "i in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found", "continue if 'frametype' not in self.keys(): msgs.error('To account for ignored", "the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None and", "to set the code behavior. files (:obj:`str`, :obj:`list`, optional): The", "array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME # #", "pypeit metadata keyword, specific to :attr:`spectrograph`. Additional valid keywords, depending", "and calibbit; ' 'run set_configurations and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det)", "Copy the internal table so that it is unaltered output_tbl", "in the fits table> \"\"\" # Columns for output columns", "# correct type... if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits),", "length. Returns: list: List of the full paths of one", "the configurations to be reset. fill (:obj:`str`, optional): If the", "# Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs =", "in setup: setup.remove('None') # Make sure that each calibration group", "groups to the set of objects (science or standard frames)", "called for a preconstructed # pypeit file, which should nominally", "# the ignore_frames indx = np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if", "\\ ' and will be removed from the metadata table", "does not exist, fill the configuration setup columns with this", "exiting table. Returns: :obj:`astropy.table.Table`: A Table with two columns, the", "set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def find_frame_calib_groups(self, row): \"\"\" Find the", "np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols] if output", "List of file paths that match the frame type and", "with the associated frame type. If the index is provided,", "of an ascii file to which to write the table", "self.keys(): msgs.error('To ignore frames, types must have been defined; run", "Each selected column must be a valid pypeit metadata keyword,", "When constructing the :class:`pypeit.metadata.PypeItMetaData` object, include two columns called `comb_id`", "does not exist, it is created. cfg_lines (:obj:`list`, optional): The", "continue indx = np.where([t == col for t in tbl_cols])[0][0]", "\\ 'CODING ERROR: valid_configuration_values is not correctly defined ' \\", "are less than 0 (meaning they're unassigned), the combination groups", "# No values specified, so we're done return good =", "contains the column in `usrdata`, the function will try to", "in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0}", "not been defined yet. \"\"\" # Set the default if", "dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"), self.spectrograph.camera,", "match configuration_keys() for spectrographs setup = {skey: {'--': {'disperser': {'dispname':", "bits based on the string representation of the groups self._set_calib_group_bits()", "force (:obj:`bool`, optional): Force the calibration groups to be reconstructed", "provide either the file list from which to grab the", "(:obj:`int`): The 0-indexed row used to construct the setup. det", "not set in table; cannot use it for file typing.')", "str(i) # Allow some frame types to be used in", "return self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name),", "the frames of each type in this group cfg[setup[0]][cbit] =", "table is instantiated without any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The", "used when building the metadata from the fits files. strict", "valid keywords, depending on the processing level of the metadata", "# Build table self.table = table.Table(data if files is None", "'dispname' not in self.keys() else self['dispname'][row] dispangle = 'none' if", "See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the string", "else: # The np.all allows for arrays in the Table", "It may be (see get_frame_types) and I'm just not using", "this single identifier. \"\"\" self.get_frame_types(user=frametype) # TODO: Add in a", "or as a comma-separated string. If None or ``'all'``, all", "have multiple types, the types should be provided as a", "in self.keys(): self.table[c] = self.table[c].astype(t) def _build(self, files, strict=True, usrdata=None):", "indx def find_frame_files(self, ftype, calib_ID=None): \"\"\" Return the list of", "from astropy import table, coordinates, time, units from pypeit import", "on string per file line; ``# `` is added to", "in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot provide master", "Iterate through the calibration bit names as these are the", "self.calib_bitmask = None # Initialize columns that the user might", "(:obj:`bool`, optional): When constructing the :class:`pypeit.metadata.PypeItMetaData` object, include two columns", "column to use for sorting the output. If None, the", "screen. If ``'table'``, the table that would have been printed/written", "f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') # Just to be explicit that the", "been defined. copy (:obj:`bool`, optional): Return a deep copy of", "default format. Raises: PypeItError: Raised if the 'setup' isn't been", "ignore_frames is None: # Nope, we're still done return #", "== None] # Print status message msg = 'Time invalid", "to be explicit that the method returns None when writing", "the frame types have not been set yet. \"\"\" #", "a table of frame types from the input metadata object.", "zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg)))", "from pypeit.core import meta from pypeit.io import dict_to_lines from pypeit.par", "is only used when building the metadata from the fits", "return_index (:obj:`bool`, optional): Return row indices with the first occurence", "types (:obj:`list`): List of types \"\"\" for c,t in zip(columns,", "self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and '#' in value: value =", "comma-separated string. If None or ``'all'``, all columns in are", "by any method outside of this class; use :func:`unique_configurations` instead.", "MJD. This is the desired behavior since if there are", "types are overwitten by the provided type. \"\"\" if not", "if configs is None else configs for k, cfg in", "the relevant metadata for each fits file to use in", "files to read. This function writes the columns selected by", "return the configurations matched to this provided string or list", "signature or file name. If None, the table contents are", "with a given calibration group. .. todo:: - This is", "to use for sorting the output. If None, the table", "we're done return # Alert the user that some of", "the pypeit file. The 'calibbit' column is actually what is", "# Find the unique values of meta for this configuration", "not in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot provide", "that you want all the configurations? Or can we #", "`numpy.ndarray`_: Array of columns to be used in the fits", "in meta_data_model.keys(): # Is this meta data?? dtype = meta_data_model[key]['dtype']", "more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx +=", "from the pypeit file. The 'calibbit' column is actually what", "msg += ' {0}\\n'.format(file) msgs.warn(msg) # Return return data #", "in :attr:`table`. For frames that have multiple types, the types", "an ascii file to which to write the table contents.", "file in bad_files: msg += ' {0}\\n'.format(file) msgs.warn(msg) # Return", "configuration for {0} '.format(ftype) + 'frames, configuration {0} does not", "in bad_files: msg += ' {0}\\n'.format(file) msgs.warn(msg) # Return return", "the specified frame type is included. index (:obj:`bool`, optional): Return", "return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there a reason why this", "read any of the headers. Set to False to report", "= t['framebit'] return t def edit_frame_type(self, indx, frame_type, append=False): \"\"\"", "to return if columns in [None, 'all']: tbl_cols = list(self.keys())", "group, ignoring any # undefined ('None') configurations #setup = np.unique(self['setup'][in_group]).tolist()", "os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name list does not match user-provided", "? Using idname above gets overwritten by # this if", "flagging all as true # indx = self['idname'] == self.spectrograph.idname(ftype)", ":func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits is None: #", "to the beginning of each string. Ignored if ``output`` does", "the table for idx, ifile in enumerate(_files): # User data", "[int, int, str]) # Initialize internal attributes self.configs = None", "going to be # removed msg = 'The following frames", "may be (see get_frame_types) and I'm just not using it...", "is not a key in the provided table. \"\"\" meta_data_model", "raise ValueError('The following columns are not valid: {0}'.format( ', '.join(tbl_cols[badcol])))", "any of the configurations are not set not_setup = self.table['setup']", "_configs.items(): if len(set(cfg.keys()) - set(self.keys())) > 0: msgs.error('Configuration {0} defined", "mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key with", "&= np.logical_not(self.find_frames(ftype)) indx = indx[use] if len(indx) == 0: msgs.error('No", "Loop over the frame types for i, ftype in enumerate(self.type_bitmask.keys()):", "not None and os.path.isfile(ofile) and not overwrite: raise FileExistsError(f'{ofile} already", "index where it first occurs. This is different from :func:`unique_configurations`", "self.configs = {} self.configs[cfg_iter[cfg_indx]] = {} msgs.info('All files assumed to", "= header if isinstance(header, list) else [header] for h in", "determine the calibration # group if 'setup' not in self.keys():", "in indx: msg += ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And remove", "is the main configuration file for PypeIt, configuring the control-flow", "# set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx),", "if copy else self.configs if rm_none and 'None' in _cfg.keys():", "# Include a combination of instrument-specific checks using # combinations", "elsewhere # Check if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name list", "frames from a single configuration if len(setup) != 1: msgs.error('Each", "Select and sort the data by a given column if", "only determined if :attr:`configs` has not yet been defined. copy", "_configs = self.unique_configurations() if configs is None else configs for", "of crashing out if there are unidentified files, leave without", "self.unique_configurations() if configs is None else configs for k, cfg", "indices of the first occurrence of these setups, if requested.", "frame that has an # invalid key, at least for", "need to specify 'all' here? Can't `configs is # None`", "list: List of file paths that match the frame type", "# combinations of the full set of metadata exprng =", "not valid, or if the column to use for sorting", "on certain columns. .. note:: :attr:`table` is edited in place.", "reduction. \"\"\" def __init__(self, spectrograph, par, files=None, data=None, usrdata=None, strict=True):", "strings used to select the configurations to include in the", "the table with the frames to return. Can be an", "if not np.all(indx): msgs.warn('Found frames with invalid {0}.'.format(key)) good &=", "to be the key and value of the dictionary, respectively.", "fbits_colm]) if merge: self['frametype'] = t['frametype'] self['framebit'] = t['framebit'] return", "# TODO: In this implementation, slicing the PypeItMetaData object #", "determine if any of the frames with # the ignored", "and only one instrument ' 'configuration with a valid letter", "identification.') b = self.type_bitmask.turn_off(b, flag='standard') continue # If an object", "If None, constructed using :func:`construct_obstime`. Returns: str: The root name", "to be used with multiple calibration groups. Args: row (:obj:`int`):", "# fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None): \"\"\"", "if rows is not None: output_tbl = output_tbl[rows] # Select", "set if 'setup' in self.keys() and not force: return if", "respectively. The number of keys therefore *must* match the number", "set the columns to print and which column to use", "the frames of this type that match any of the", "= [] for k in config.keys(): # Deal with floating", "if metakey is None: # No matching meta data defined,", "spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData:", "dict: Dictionary with the data to assign to :attr:`table`. \"\"\"", "value.replace('#', '') msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(", "rows is not None: output_tbl = output_tbl[rows] # Select and", "configurations!') # Get the list of keys to use cfg_keys", "if ``output`` does not specify an output file. Returns: `astropy.table.Table`:", "if len(user.keys()) != len(self): raise ValueError('The user-provided dictionary does not", "the 'calibbit' column is not defined. \"\"\" if 'calibbit' not", "pypeit file, which should nominally follow an execution of #", "reductions. Any frames that do not are removed from :attr:`table`,", "\"\"\" Construct the master key for the file in the", "for k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) data['filename'] = ['None']*len(_files)", "match the frame type and science frame ID, if the", "ME # # TODO: It would be good to get", "'filename', 'directory']: if col not in tbl_cols: continue indx =", "unique configuration names. This provides just the list of setup", "to overwrite.'.format(ofile)) # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual)", "directory, filename, frametype, framebit, setup, calib, and calibbit. sort_col (:obj:`str`,", "# User data (for frame type) if usrdata is None:", "of metadata') elif key in meta_data_model.keys(): # Is this meta", "#embed(header='372 of metadata') elif key in meta_data_model.keys(): # Is this", "= list(string.ascii_uppercase) + double_alphabet cfg_indx = 0 # TODO: Placeholder:", "it in the metadata table. The internal table is edited", "reformatting/removing it. Args: ofile (:obj:`str`): Name for the output sorted", "not exist, fill the configuration setup columns with this single", "tbl_cols = columns if isinstance(columns, list) else columns.split(',') badcol =", "import make_pypeit_file from pypeit.bitmask import BitMask # TODO: Turn this", "bad_files = filenames[mjd == None] # Print status message msg", "table to edit frame_type (:obj:`str`, :obj:`list`): One or more frame", "same as included in the configuration column (A, B, C,", "self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\" Generate the list", "routines. Args: ignore (:obj:`list`, optional): Ignore configurations in the provided", "the strings will be truncated at 4 characters. self.table['calib'] =", "frames of this type without a # configuration indx =", "if isinstance(header, list) else [header] for h in _header: f.write(f'#", "the list of files with a given frame type. The", "'calib' column already exists. Raises: PypeItError: Raised if 'setup' column", "rm_none=False): \"\"\" Return the unique instrument configurations. If run before", "hand. Args: indx (:obj:`int`): The 0-indexed row in the table", "# Write the pypeit files ofiles = [None]*len(cfg_keys) for j,setup", "in _configs.keys(): in_cfg = self.table['setup'] == cfg_key for ftype, metakey", "for {0} files.\\n'.format(len(bad_files)) msg += 'Continuing, but the following frames", "The pypeit setup dictionary with the default format. Raises: PypeItError:", "yet. \"\"\" # Configurations have already been set if 'setup'", "should be provided as a string with comma-separated types. setup", "configuration, calibration group(s), and detector. Raises: PypeItError: Raised if the", "the :class:`pypeit.metadata.PypeItMetaData` object, include two columns called `comb_id` and `bkg_id`", "done elsewhere # Check if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File name", "frames of this type that match any of the #", "valid. This currently only checks that the science frames are", "(str): The frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`.", "a string type to make sure that it matches with", "metadata table. The internal table is edited *in place*. If", "standard star to each science frame # TODO: Should this", "msgs.error('Configuration {0} defined using unavailable keywords!'.format(k)) self.table['setup'] = 'None' nrows", "== 0: msgs.error('No frames to use to define configurations!') #", "few instruments (e.g. VLT) where meta data may not be", "the metadata table generated within PypeIt. match_type (:obj:`bool`, optional): Attempt", "should consider reformatting/removing it. Args: ofile (:obj:`str`): Name for the", "frametype, framebit, setup, calib, and calibbit. sort_col (:obj:`str`, optional): Name", "'decker' not in self.keys() else self['decker'][row] slitwid = 'none' if", "the provided list. Raises: PypeItError: Raised if the 'setup' or", "be either undefined or to be unique for each science", "# TODO: Should this be 'standard' or 'science' or both?", "The table object that would have been written/printed if ``output", "group and background group columns, and/or to initialize the combination", "before the ``'setup'`` column is initialized, this function determines the", "msgs.error('Science frames can only be assigned to a single calibration", "with io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] if", "# Validate, print out a warning if there is problem", "provided. Args: ftype (str): The frame type identifier. See the", "unaltered output_tbl = self.table.copy() # Select the output rows if", "columns, and/or to initialize the combination groups to the set", "merge (:obj:`bool`, optional): Merge the frame typing into the exiting", "indx: msg += ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And remove 'em", "indx = self.type_bitmask.flagged(type_bits, flag='standard') for b, f, ra, dec in", "with configuration set to None.'.format( np.sum(ignore))) self.configs = {} for", "row of the specified frame type is included. index (:obj:`bool`,", "f: if header is not None: _header = header if", "TODO: Add in a call to clean_configurations? I didn't add", "If copy is True, this is done *after* :attr:`configs` is", "\"\"\" Construct the root name primarily for PypeIt file output.", "# here, because this method is only called for a", "self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use & or | ? Using idname", "Set columns that the user *might* add .. note:: :attr:`table`", "usrdata[key][srt] def finalize_usr_build(self, frametype, setup): \"\"\" Finalize the build of", "with configuration, calibration group(s), and detector. Raises: PypeItError: Raised if", "types should be provided as a string with comma-separated types.", "Construct the setup dictionary. .. todo:: - This is for", "configs is None else np.atleast_1d(configs) # TODO: Why do we", "types and bits. Args: type_bits (numpy.ndarray): Integer bitmask with the", "provided table to supplement or overwrite the metadata. If the", "PypeItMetaData object # will return an astropy.table.Table, not a PypeItMetaData", "frames to use to define configurations!') # Get the list", "the frame type. If False, all existing frame types are", "not exist, they're set to -1. Args: assign_objects (:obj:`bool`, optional):", "Returns: numpy.ndarray: A boolean array, or an integer array if", "self.table.__getitem__(item) def __setitem__(self, item, value): return self.table.__setitem__(item, value) def __len__(self):", "ERROR: Found high-dimensional column.') #embed(header='372 of metadata') elif key in", "each standard and science frame. \"\"\" if 'comb_id' not in", "or dec == 'None': msgs.warn('RA and DEC must not be", "tbl_cols = list(self.keys()) elif columns == 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True)", "'1,1' if 'binning' not in self.keys() else self['binning'][row] skey =", "be a twilight flat frame that was' + msgs.newline() +", "are iterations through the # upper-case letters: A, B, C,", "can only have one calibration group # Assign everything from", "in that column. This is used to set the internal", "utils from pypeit.core import framematch from pypeit.core import flux_calib from", "binning, # PypeIt orientation binning of a science image }", "setup def get_configuration_names(self, ignore=None, return_index=False, configs=None): \"\"\" Get the list", "def __repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))])", "column must be a valid pypeit metadata keyword, specific to", "in self.keys() and not force: return if 'setup' not in", "the group list grp = parse.str2list(self['calib'][i], ngroups) if grp is", "frame_paths(self, indx): \"\"\" Return the full paths to one or", "provided. \"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self, indx): \"\"\" Return", "columns.split(',') badcol = [col not in all_cols for col in", "configurations. Otherwise, only return the configurations matched to this provided", "with the frame types. The length must match the existing", "Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data", "used to set the frame type of each fits file.", "List of types \"\"\" for c,t in zip(columns, types): if", "optional): Return row indices with the first occurence of these", ":attr:`configs` with possible alterations. This method *should not* be called", "modify that attribute directly. The valid values for configuration keys", "is not None: if len(user.keys()) != len(self): raise ValueError('The user-provided", "table contents are printed to the screen. If ``'table'``, the", "single files _files = files if hasattr(files, '__len__') else [files]", "('None') configurations #setup = np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if 'None'", "len(cfg_iter): msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i,", "frame types to ignore but the frame types have not", "The 0-indexed row used to construct the setup. det (:obj:`int`,", "cfgs.keys(): # Get the subtable of frames taken in this", "ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the internal table so that", "in zip(columns, types): if c in self.keys(): self.table[c] = self.table[c].astype(t)", "indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure the dithers", "is the same as the calibration bit number, and the", "'calibbit' not in self.keys(): msgs.error('Cannot write calibration groups without \\'setup\\'", "the configurations to include in the returned objects. If ``'all'``,", "file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return", "pypeit.core import meta from pypeit.io import dict_to_lines from pypeit.par import", "if self.calib_bitmask is None else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False,", "it for file typing.') # Start msgs.info(\"Typing files\") type_bits =", "\"\"\" # Columns for output columns = self.spectrograph.pypeit_file_keys() extras =", "construct the configuration. cfg_keys (:obj:`list`, optional): The list of metadata", "the headers. Set to False to report a warning and", "PypeItError: Raised if the 'setup' isn't defined and split is", "if any of the configurations are not set not_setup =", "the data are correctly ordered srt = [np.where(f == self.table['filename'])[0][0]", "- assigns all frames to a single calibration group, if", "used to set the frame type of each file. The", "= output_tbl[tbl_cols] if output == 'table': # Instead of writing,", "does not exist - if the 'comb_id' column does not", "Write the output to an ascii file with open(ofile, 'w')", "as the backwards compatible \"setup\" dictionary. Args: indx (:obj:`int`): The", "the indices of the first occurrence of these setups, if", "self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is not None) if ignore", "(A, B, C, etc), the calibration group is the same", "level=1) # Get the paths in_cfg = self['setup'] == setup", "name. ignore (:obj:`list`, optional): Ignore calibration groups in the provided", "'calib' not in self.keys() and default: self['calib'] = '0' #", "header if isinstance(header, list) else [header] for h in _header:", "None: # No group selected continue # Assign the group;", "names types (:obj:`list`): List of types \"\"\" for c,t in", "Find the calibration groups associated with a specific frame. \"\"\"", "== self.table['filename'])[0][0] for f in usrdata['filename']] # Convert types if", "is mostly a convenience function for the writing routines. Args:", "table. data (table-like, optional): The data to include in the", "required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and '#'", "= dict_to_lines({'Setup {0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get the paths in_cfg", "Raised if overwrite is False and the file exists. \"\"\"", "in the spectrograph class definition. # This should probably go", "read from the file headers. The table must have a", "current maximum ngroups = max(l+1, ngroups) # Define the bitmask", ":attr:`configs`. If this attribute is not None, this function simply", "columns do not exist, they're set to -1. Args: assign_objects", "TypeError('Input parameter set must be of type PypeItPar.') self.type_bitmask =", "not None and cbit in ignore: continue # Find the", "grp is None: # No group selected continue # Assign", "to the pypeit file output. .. todo:: - This is", "if cfg_indx == len(cfg_iter): msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter)))", "else self['slitlen'][row] binning = '1,1' if 'binning' not in self.keys()", "det (:obj:`int`, :obj:`tuple`, optional): The 1-indexed detector number(s). If a", "self['dispangle'][row] dichroic = 'none' if 'dichroic' not in self.keys() else", "list of the unique configuration names. This provides just the", "meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] = decs.astype(dtype) radec_done =", "the header keywords specified for the provided spectrograph. It is", "overwrite (:obj:`bool`, optional): Overwrite any existing file with the same", "in this call. # indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx", "row. The master key is the combination of the configuration,", "it must match. If None, any row of the specified", "the provided `setup` - assigns all frames to a single", "'None' if not np.any(not_setup): # All are set, so we're", "flag=ftype) # Find the nearest standard star to each science", "self['frametype'] = t['frametype'] self['framebit'] = t['framebit'] return t def edit_frame_type(self,", "' and will be removed from the metadata table (pypeit", "or overwrite the metadata. If the internal table already contains", "then it is probably a standard star foundstd = flux_calib.find_standard_file(ra,", "file. This function: - sets the frame types based on", "can always insert at the beginning of the list for", "# Groups have already been set if 'calib' in self.keys()", "copy=False, rm_none=False): \"\"\" Convenience method to return :attr:`configs` with possible", "for this column. indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx):", "self['filename'][indx])] def set_frame_types(self, type_bits, merge=True): \"\"\" Set and return a", "ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): \"\"\"", "included in the pypeit file. Each selected column must be", "edit_frame_type(self, indx, frame_type, append=False): \"\"\" Edit the frame type by", "data['filename'] = ['None']*len(_files) # Build the table for idx, ifile", "string values of the 'calib' column. \"\"\" # Find the", "configurations to be reset. fill (:obj:`str`, optional): If the 'setup'", "cfg_indx += 1 msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none)", "to a configuration, the spectrograph defined frames that have been", "Add additional ``PypeIt`` columns for calib, comb_id and bkg_id write_manual", "(:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior. If", "key in _configs] if len(cfg_keys) == 0: msgs.error('No setups to", "\"\"\" Set the calibration group bit based on the string", "if the 'calibbit' column is not defined. \"\"\" if 'calibbit'", "pypeit file in data-table format. The pypeit file is the", "the user edits back in a frame that has an", "checks using # combinations of the full set of metadata", "Return the list of files with a given frame type.", "with one calibration group. TODO: Is this appropriate for NIR", "optional): Force the configurations to be reset. fill (:obj:`str`, optional):", "master key for the file in the provided row. The", "with science frames without sky coordinates.') else: # TODO: Do", "set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int)", "assigned to a configuration, the spectrograph defined frames that have", "to instead report a warning and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):", "not np.any(not_setup): # All are set, so we're done return", "Create the output file name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) #", "Set to False to instead report a warning and continue.", "os.path.isdir(odir): os.makedirs(odir) # Create the output file name ofiles[j] =", "bitmask used to set the frame type of each fits", "if overwrite is False and the file exists. \"\"\" #", "header for any of the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set", "elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else: # The", "file with the same name. ignore (:obj:`list`, optional): Ignore calibration", "the sorted file. The sorted file lists all the unique", "user-provided metadata table. See ' 'usrdata argument of instantiation of", "single instrument configuration. :attr:`table` is modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`.", "== col for t in tbl_cols])[0][0] if indx != ncol-1:", "checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code", "content of the fits table is dictated by the header", "np.sum(ignore))) self.configs = {} for i in range(len(uniq)): if ignore[i]:", "usrdata['filename']] # Convert types if possible existing_keys = list(set(self.table.keys()) &", "(:class:`astropy.table.Table`): The table with the relevant metadata for each fits", "msgs.error('Cannot write sorted instrument configuration table without \\'setup\\' ' 'column;", "Any frames that do not are removed from :attr:`table`, meaning", "'None' in _cfg.keys(): del _cfg['None'] return _cfg def unique_configurations(self, force=False,", "been set but the bits have not (likely because the", "& in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group],", "the configurations themselves. This is mostly a convenience function for", "from the fitstbl matches the input configuration Args: row (astropy.table.Row):", "False, all existing frame types are overwitten by the provided", "there is problem try: time.Time(data['mjd'], format='mjd') except ValueError: mjd =", "columns for calib, comb_id and bkg_id write_manual (:obj:`bool`, optional): Add", "dec, check=True) b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard')", "# Iterate through the calibration bit names as these are", "columns.') if 'frametype' in self.keys(): del self.table['frametype'] if 'framebit' in", "key in ignore: del cfgs[key] # Construct file ff =", "for arrays in the Table (e.g. binning) match.append(np.all(config[k] == row[k]))", "valid: {0}'.format( ', '.join(tbl_cols[badcol]))) # Make sure the basic parameters", "the combination groups to be either undefined or to be", "not in self.keys(): msgs.error('To set global frames, types must have", "\"\"\" Return the full paths to one or more frames.", "if key in _configs] if len(cfg_keys) == 0: msgs.error('No setups", "for key in extras: if key not in columns: columns", "be matched to the relevant science frame. Args: ftype (str):", "output pypeit files. If None, set to current directory. If", "this method may modify that attribute directly. The valid values", "types on certain columns. .. note:: :attr:`table` is edited in", "l = np.amax([ 0 if len(n) == 0 else int(n)", "usrdata[key] == 'None' usrdata[key][nones] = None # Rest # Allow", ":func:`set_combination_groups`. .. note:: This should only be run if all", "to an ascii file with open(ofile, 'w') as f: if", "First run get_frame_types.') if ftype == 'None': return self['framebit'] ==", "table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME # # TODO: It would", "row indices with the first occurence of these configurations. configs", "columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the pypeit files", "columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str]) # Initialize internal", "to be redetermined. Otherwise the configurations are only determined if", "It is expected that this table can be used to", "strings (e.g., ['A','C']). See :attr:`configs`. Raises: PypeItError: Raised if the", "usrdata['dec'][~nones] = decs.astype(dtype) radec_done = True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype)", "this attribute is not None, this function simply returns :attr:`config`", "= table.Table([ftype_colm, fbits_colm]) if merge: self['frametype'] = t['frametype'] self['framebit'] =", "set to the string 'None', this returns all frames without", "designated by the user. The file name and type are", "[col not in all_cols for col in tbl_cols] if np.any(badcol):", "write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction", "raise KeyError('The user-provided table must have \\'filename\\' column!') # Make", "cbit = int(icbit) # for yaml # Skip this group", "use it for file typing.') # Start msgs.info(\"Typing files\") type_bits", "anything allowed by the instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional):", "cannot be None.') # Find the frames of each type", "to set the columns to print and which column to", "'slitlen':slitlen}, 'binning': binning, # PypeIt orientation binning of a science", "metadata for each. \"\"\" _cfg = deepcopy(self.configs) if copy else", "np.full(len(self), 'None', dtype=object) for i in range(n_cfg): self['calib'][(self['setup'] == configs[i])", "set in the table. \"\"\" if 'framebit' not in self.keys():", "configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found {0}", "ignored frame types should be assigned to it: for cfg_key", "ftype_in_group = self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for", "if ftype == 'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use", "is None else cfg_keys return {k:self.table[k][indx] for k in _cfg_keys}", "# will return an astropy.table.Table, not a PypeItMetaData object. def", "msgs.error('Each calibration group must be from one and only one", "correct ID name or start by # # flagging all", "the configurations are only determined if :attr:`configs` has not yet", "overwrite.'.format(ofile)) # Construct the setups dictionary cfg = self.unique_configurations(copy=True, rm_none=True)", "elif key in meta_data_model.keys(): # Is this meta data?? dtype", "i in range(len(self)): if self['calib'][i] in ['all', 'None']: # No", "Root path for the output pypeit files. If None, set", "can be used to initialize the combination group and background", "each. Raises: PypeItError: Raised if there are list of frame", "ofile is not None and os.path.isfile(ofile) and not overwrite: raise", "writes the columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be", "user-provided frame types.') for ifile,ftypes in user.items(): indx = self['filename']", "the columns to return if columns in [None, 'all']: tbl_cols", "index, if it is provided. Args: ftype (str): The frame", "(:class:`BitMask`): The bitmask used to keep track of the calibration", "flag_unknown=False, user=None, merge=True): \"\"\" Generate a table of frame types", "configurations n_cfg = len(configs) # TODO: Science frames can only", "class. Args: row (:obj:`int`): The 0-indexed row used to construct", "matched to this provided string or list of strings (e.g.,", "ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\" Write the sorted file. The sorted", "state. overwrite (:obj:`bool`, optional): Overwrite any existing file; otherwise raise", "column does not exist, this sets the combination groups to", "not the same as the backwards compatible \"setup\" dictionary. Args:", "'none' if 'slitwid' not in self.keys() else self['slitwid'][row] slitlen =", "and the association of each frame from that configuration with", "the metadata. If the internal table already contains the column", "of these setups, if requested. Raises: PypeItError: Raised if the", "calibration group. Raises: PypeItError: Raised if the 'calibbit' column is", "(:obj:`str`): If the 'setup' columns does not exist, fill the", "a key in the provided table. \"\"\" meta_data_model = meta.get_meta_data_model()", "# Is this meta data?? dtype = meta_data_model[key]['dtype'] else: dtype", "to have no association with an instrument configuration - This", "type is included. index (:obj:`bool`, optional): Return an array of", "the columns are the same as those included in the", "are # empty or corrupt files we still want this", "Table with two columns, the type names and the type", "needs to be moved into each Spectrograph # if useIDname", "Returns: dict: The pypeit setup dictionary with the default format.", "type. If False, all existing frame types are overwitten by", "constructing the :class:`pypeit.metadata.PypeItMetaData` object, include two columns called `comb_id` and", "reduced by PypeIt' \\ ' and will be removed from", "if 'slitlen' not in self.keys() else self['slitlen'][row] binning = '1,1'", "metakey is None or isinstance(metakey, str), \\ 'CODING ERROR: metadata", "of instrument-specific checks using # combinations of the full set", "self.table.keys() def sort(self, col): return self.table.sort(col) def merge(self, usrdata, match_type=True):", "def find_frames(self, ftype, calib_ID=None, index=False): \"\"\" Find the rows with", "spectrograph. It is expected that this table can be used", "where it first occurs. This is different from :func:`unique_configurations` because", "match the number of the rows in the table. columns", "be run if all files are from a single instrument", "A dictionary with the metadata values from the selected row.", "be unique for each standard and science frame. \"\"\" if", "frame type of each fits file. calib_bitmask (:class:`BitMask`): The bitmask", "the root name primarily for PypeIt file output. Args: row", "keywords used to set the configuration should be the same", "frame_type (:obj:`str`, :obj:`list`): One or more frame types to append/overwrite.", ":attr:`configs` has not yet been defined. copy (:obj:`bool`, optional): Return", "\"\"\" Provides a class that handles the fits metadata required", "self.keys() and default: self['calib'] = '0' # Make sure the", "_det: # setup[skey][str(d).zfill(2)] \\ # = {'binning': binning, 'det': d,", "the maximum number # provided, regardless of whether or not", "must have been defined; run get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique", "msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys)", "names. This provides just the list of setup identifiers ('A',", "assert metakey is None or isinstance(metakey, str), \\ 'CODING ERROR:", "list). Args: write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for", "calibration groups.\"\"\" return None if self.calib_bitmask is None else self.calib_bitmask.nbits", "self.keys() and fill is not None: self['setup'] = fill return", "the provided spectrograph. It is expected that this table can", "the ignored frame types should be assigned to it: for", "set the frame type of each file. The metadata is", "in the keyword allows MasterFrames to be used with multiple", "match the data type in `usrdata` to the type in", "for this configuration uniq_meta = np.unique(self.table[metakey][in_cfg].data) # Warn the user", "output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) # Write the pypeit", "ID, if the latter is provided. \"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))", "f.write('\\n'.join(data_lines)) f.write('\\n') # Just to be explicit that the method", "after the ``'setup'`` column has been set, this simply constructs", "the full paths to one or more frames. Args: indx", "PypeItMetaData? def row_match_config(row, config, spectrograph): \"\"\" Queries whether a row", "np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist() # Get the data lines", "return self.table.sort(col) def merge(self, usrdata, match_type=True): \"\"\" Use the provided", "`setup` - assigns all frames to a single calibration group,", "{skey: {'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker':", "1: msgs.error('Each calibration group must be from one and only", "msgs.error(\"Check these files before continuing\") # Finish up (note that", "matching meta values are not # unique for this configuration.", "1-indexed detector number(s). If a tuple, it must include detectors", "insert at the beginning of the list for col in", "assigned to a single calibration group.') @property def n_calib_groups(self): \"\"\"Return", "A dictionary with the types designated by the user. The", "file ff = open(ofile, 'w') for setup in cfgs.keys(): #", "the configurations to the provided `setup` - assigns all frames", "from pypeit.core import parse from pypeit.core import meta from pypeit.io", "the correct ID name or start by # # flagging", "self.table[output_cols][indx] # Write the file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1))", "'standard' or 'science' or both? if 'ra' not in self.keys()", "value = value.replace('#', '') msgs.warn('Removing troublesome # character from {0}.", "Just return the dictionary with the configuration, don't include the", "the file, on string per file line; ``# `` is", "\"\"\" Write the calib file. The calib file provides the", "the file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n')", "'bkg_id'] # manual if write_manual: extras += ['manual'] for key", "additional ``PypeIt`` columns for manual extraction Returns: `numpy.ndarray`_: Array of", "user is not None: if len(user.keys()) != len(self): raise ValueError('The", "in self.keys(): msgs.error('Cannot provide master key string without setup and", "frame; see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional): A list of", "not defined so just print it print('\\n'.join(data_lines)) return None #", "mjd = np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files = filenames[mjd ==", "= self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys return {k:self.table[k][indx]", "\"\"\" _cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys", "or list of strings (e.g., ['A','C']). Returns: numpy.array: The list", "determines and provides the configurations themselves. This is mostly a", "assign_objects=True): \"\"\" Set combination groups. .. note:: :attr:`table` is edited", "used to initialize columns that the user might add \"\"\"", "slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row]", "column. \"\"\" # Find the number groups by searching for", "the types designated by the user. The file name and", "disk is returned. Otherwise, the string is interpreted as the", "this if the frames to meet the other checks in", "already exists. Raises: PypeItError: Raised if 'setup' column is not", "is only called for a preconstructed # pypeit file, which", "name for file output. \"\"\" _obstime = self.construct_obstime(row) if obstime", "if there are list of frame types to ignore but", "False if len(existing_keys) > 0 and match_type: for key in", "ValueError('The user-provided dictionary does not match table length.') msgs.info('Using user-provided", "np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl = output_tbl[tbl_cols][srt] else: output_tbl", "column is not defined. \"\"\" if 'calibbit' not in self.keys():", "rows if a vector was provided if rows is not", "# TODO: For now, use this assert to check that", "ngroups) # Define the bitmask and initialize the bits self.calib_bitmask", "= -1 if assign_objects and np.all(self['comb_id'] < 0): # find_frames", ".. include:: ../include/links.rst \"\"\" import os import io import string", "calibration group(s), and detector. Raises: PypeItError: Raised if the 'setup'", "not exist if 'calibbit' in self.keys(): del self['calibbit'] # Groups", "frame type is included. index (:obj:`bool`, optional): Return an array", "return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\" Generate the", "main configuration file for PypeIt, configuring the control-flow and algorithmic", "frame that was' + msgs.newline() + 'missed by the automatic", "already exists; set flag to overwrite.') # Check the rows", "the same as those included in the pypeit file. Each", "table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm = ftype_colmA fbits_colm = table.Column(type_bits,", "# Initialize internal attributes self.configs = None self.calib_bitmask = None", "the unique configurations in this group, ignoring any # undefined", "cfg in _configs.items(): if len(set(cfg.keys()) - set(self.keys())) > 0: msgs.error('Configuration", "the columns to include are not valid, or if the", "['calib', 'comb_id', 'bkg_id'] # manual if write_manual: extras += ['manual']", "are removed from :attr:`table`, meaning this method may modify that", "(:obj:`str`, optional): Name of the column to use for sorting", "to include are not valid, or if the column to", "Always write the table in ascii format with io.StringIO() as", "configuration. If None, the `configuration_keys` of :attr:`spectrograph` is used. Returns:", "grp) def _check_calib_groups(self): \"\"\" Check that the calibration groups are", "(e.g. dispangle) Returns: bool: True if the row matches the", "\\ # = {'binning': binning, 'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']}", "j = 0 for c in self.configs.values(): if row_match_config(self.table[i], c,", "msgs.warn('Both data and files are None in the instantiation of", "removed from :attr:`table`, meaning this method may modify that attribute", "sets the frame types based on the provided object -", "of 'comb_id' values are less than 0 (meaning they're unassigned),", "the control-flow and algorithmic parameters and listing the data files", "self.keys(): del self.table['framebit'] # # TODO: This needs to be", "been written/printed if ``output == 'table'``. Otherwise, the method always", "for col in tbl_cols] if np.any(badcol): raise ValueError('The following columns", "Why isn't frametype just in the user-provided data? It may", "rm_none (:obj:`bool`, optional): Remove any configurations set to 'None'. If", "self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1 # Check if", "the method returns None when writing to a # file...", "do we need to specify 'all' here? Can't `configs is", "determine unique configurations.') # If the frame types have been", "if isinstance(value, str) and '#' in value: value = value.replace('#',", "+= ['calib', 'comb_id', 'bkg_id'] # manual if write_manual: extras +=", "to one or more frames. Args: indx (:obj:`int`, array-like): One", "\"\"\" Use the provided table to supplement or overwrite the", "self.keys() else self['dispangle'][row] dichroic = 'none' if 'dichroic' not in", "[key] # Take only those present output_cols = np.array(columns) return", "cfg_keys = list(cfg.keys()) else: _configs = configs if isinstance(configs, list)", "more strings to write to the top of the file,", "the same as returned by the spectrograph `configuration_keys` method. The", "spectrograph defined frames that have been ignored in the determination", "Args: grp (:obj:`int`): The calibration group integer. Returns: numpy.ndarray: Boolean", "If None, any row of the specified frame type is", "Raises: ValueError: Raised if the columns to include are not", "modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo:: - Why isn't", "= deepcopy(self.configs) if copy else self.configs if rm_none and 'None'", "= self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard') # Find the", "running set_configurations.') configs = np.unique(self['setup'].data).tolist() if 'None' in configs: configs.remove('None')", "0-indexed row used to construct the key. det (:obj:`int`, :obj:`tuple`,", "in self.keys() else self['slitwid'][row] slitlen = 'none' if 'slitlen' not", "data table, one should typically provide either the file list", "Args: copy (:obj:`bool`, optional): Return a deep copy of :attr:`configs`", "Get the list of frames of this type without a", "headers. The table must have a `filename` column that is", "the ``'setup'`` column has been set, this simply constructs the", "using it... Args: frametype (:obj:`dict`): A dictionary with the types", "name. ignore (:obj:`list`, optional): Ignore configurations in the provided list.", "file or to the screen. The method allows you to", "This should only be run if all files are from", "if 'ra' not in self.keys() or 'dec' not in self.keys():", "indx if np.all(good): # All values good, so we're done", "of the frames with # the ignored frame types should", "the determination of the unique configurations, but the frame types", "we need the frame type to continue if 'frametype' not", "list from which to grab the data from the fits", "the combination groups to the set of objects (science or", ":attr:`table` is edited in place. This function can be used", "to the group list grp = parse.str2list(self['calib'][i], ngroups) if grp", "Or we can force type_names() in bitmask to always return", "row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] = d # Check if any", "n in self['calib'][i].replace(':',',').split(',')]) # Check against current maximum ngroups =", "= np.isin(self[key], cfg_limits[key]) if not np.all(indx): msgs.warn('Found frames with invalid", "print('\\n'.join(data_lines)) return None # Write the output to an ascii", "of metadata exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \\", "= self['filename'] == ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits,", "msgs.error('To set global frames, types must have been defined; '", "in _configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] = d #", "array, or an integer array if index=True, with the rows", "strict=True, usrdata=None): \"\"\" Generate the fitstbl that will be at", "One or more files to use to build the table.", "the configuration. If None, the `configuration_keys` of :attr:`spectrograph` is used.", "requested. Raises: PypeItError: Raised if the 'setup' isn't been defined.", "if row_match_config(self.table[i], c, self.spectrograph): break j += 1 unique =", "with :func:`convert_time` ? Args: row (:obj:`int`): The 0-indexed row of", "provided `setup` - assigns all frames to a single calibration", "`framebit` column is not set in the table. \"\"\" if", "(:obj:`str`, optional): If the 'setup' column does not exist, fill", "ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm = ftype_colmA fbits_colm", "sorting. Args: output (:obj:`str`, optional): Output signature or file name.", "table, and the keywords used to set the configuration should", "all frames without a known type. calib_ID (:obj:`int`, optional): Index", ".. warning:: Any frame types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method", "Generate the list of columns to be included in the", "Why do we need to specify 'all' here? Can't `configs", "sort_col (:obj:`str`, optional): Name of the column to use for", "data from the fits headers or the data directly. If", "indx[use] if len(indx) == 0: msgs.error('No frames to use to", "reader will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx,", "== 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols = list(self.keys()) tbl_cols", "just in the user-provided data? It may be (see get_frame_types)", "allowing some frame types to have no association with an", "used with multiple calibration groups. Args: row (:obj:`int`): The 0-indexed", "ValueError('CODING ERROR: Found high-dimensional column.') #embed(header='372 of metadata') elif key", "configs is None else configs for k, cfg in _configs.items():", "boolean array. Returns: numpy.ndarray: A boolean array, or an integer", "None: usr_row = None else: # TODO: This check should", "include in the table and specify any validation checks. par", "was' + msgs.newline() + 'missed by the automatic identification.') b", "self.find_calib_group(i) in_cbit = self['calibbit'] == cbit # Find the unique", "correct type... if int(str(ftype_colmA.dtype)[2:]) < 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9',", "None: # Output file not defined so just print it", "full set of metadata exprng = self.par['scienceframe']['exprng'] if ftype ==", "self.get_frame_types(user=frametype) # TODO: Add in a call to clean_configurations? I", "used for QA now; but could probably use the pypeit", "cfg_key in _configs.keys(): in_cfg = self.table['setup'] == cfg_key for ftype,", "'setup' not in self.keys() and fill is not None: self['setup']", "table is identical to the pypeit file output. .. todo::", "# Make sure the basic parameters are the first few", "'columns; run set_configurations and set_calibration_groups.') if os.path.isfile(ofile) and not overwrite:", "the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific to each instrument. Args:", "each science frame # TODO: Should this be 'standard' or", "{} msgs.info('All files assumed to be from a single configuration.')", "unique combinations of the items in the metadata table listed", "get setup names; run set_configurations.') # Unique configurations setups, indx", "or a string.'.format( self.spectrograph.__class__.__name__) # Get the list of frames", "configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the rtol value for", "used to initialize the combination group and background group columns,", "j += 1 unique = j == len(self.configs) if unique:", "= 0 # Set the calibration bits for i in", "# Use the first file to set the first unique", "usrdata (:obj:`astropy.table.Table`): A user provided set of data used to", "outside of this class; use :func:`unique_configurations` instead. Args: copy (:obj:`bool`,", "groups. Args: row (:obj:`int`): The 0-indexed row used to construct", "included. write_bkg_pairs (:obj:`bool`, optional): When constructing the :class:`pypeit.metadata.PypeItMetaData` object, include", "that some of the frames are going to be #", "place. Args: columns (:obj:`list`): List of column names types (:obj:`list`):", "break j += 1 unique = j == len(self.configs) if", "with two columns, the type names and the type bits.", "in the provided configuration match with the metadata keywords. Also", "unique for each standard and science frame. \"\"\" if 'comb_id'", "files to include in the table. data (table-like, optional): The", "'comb_id', 'bkg_id'] # manual if write_manual: extras += ['manual'] for", "= {'binning': binning, 'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey]", "the same as in the table, and the keywords used", "copy of :attr:`configs` instead of the object itself. rm_none (:obj:`bool`,", "in _configs: use = np.isin(setups, _configs) setups = setups[use] indx", "\"\"\" # Loop on keys in config match = []", "the following frames may be empty or have corrupt headers:\\n'", "rows are written. Shape must match the number of the", "of frames with the same configuration. .. todo:: - Maintain", "this function determines the unique instrument configurations by finding unique", "metadata associated with each configuration. The metadata keywords in the", "match.append(False) else: # The np.all allows for arrays in the", "file. Can be provided as a list directly or as", "``'all'``, pass back all configurations. Otherwise, only return the configurations", "of the column to use for sorting the output. If", "If the frame types have been set, ignore anything listed", "the frame type and science frame ID, if the latter", "{0}'.format(setup): utils.yamlify(cfg[setup])}, level=1) # Get the paths in_cfg = self['setup']", "these gymnastics. Or better yet, just stop # producing/using the", "column already exists, the configurations are **not** reset unless you", "output sorted file. overwrite (:obj:`bool`, optional): Overwrite any existing file", "by # this if the frames to meet the other", "input dictionary if user is not None: if len(user.keys()) !=", "so just set all # the frames to this (first)", "in bitmask to always return the # correct type... if", "single configuration if len(setup) != 1: msgs.error('Each calibration group must", "= self['setup'] == setup if not np.any(in_cfg): continue paths =", "primarily for PypeIt file output. Args: row (:obj:`int`): The 0-indexed", "of the specified frame type is included. index (:obj:`bool`, optional):", "sure that each calibration group should only contain # frames", "problem with the reading the header for any of the", "same calibration group indx &= self.find_calib_group(calib_ID) # Return return np.where(indx)[0]", "been defined. \"\"\" if 'setup' not in self.keys() or 'calibbit'", "isinstance(value, str) and '#' in value: value = value.replace('#', '')", "= uniq == 'None' if np.sum(ignore) > 0: msgs.warn('Ignoring {0}", "that handles the fits metadata required by PypeIt. .. include", "the output directory does not exist, it is created. cfg_lines", "if 'setup' not in self.keys() and fill is not None:", "os.path.join(output_path, root) if not os.path.isdir(odir): os.makedirs(odir) # Create the output", "of the 'calib' column. \"\"\" # Find the number groups", "include are not valid, or if the column to use", "'CODING ERROR: metadata keywords set by config_indpendent_frames are not '", "to initialize columns that the user might add \"\"\" if", "types to have no association with an instrument configuration -", "indx = np.where(np.logical_not(good))[0] for i in indx: msg += '", "calibration groups (e.g., ['bias', 'dark']). default (:obj:`bool`, optional): If the", "to be moved into each Spectrograph # if useIDname and", "self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row): \"\"\" Construct the MJD", "Use the user-defined frame types from the input dictionary if", "probably go somewhere else or just removed. assert isinstance(cfg_limits[key], list),", "those frames in the table included in the selected calibration", "optional): Merge the frame typing into the exiting table. Returns:", "string or list of strings (e.g., ['A','C']). Returns: numpy.array: The", "and specify any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used", "the unique instrument configurations (setups) and the association of each", "ignored frames, types must have been defined; run ' 'get_frame_types.')", "= str(i) # Allow some frame types to be used", "axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1 def set_user_added_columns(self): \"\"\"", "of 0-indexed indices instead of a boolean array. Returns: numpy.ndarray:", "col in ['framebit', 'frametype', 'filename', 'directory']: if col not in", "keyword, specific to :attr:`spectrograph`. Additional valid keywords, depending on the", "if `data` is also provided. This functionality is only used", "isinstance(cfg_limits[key], list), \\ 'CODING ERROR: valid_configuration_values is not correctly defined", "of data used to supplement or overwrite metadata read from", "with the frame types and bits. Args: type_bits (numpy.ndarray): Integer", "(e.g. VLT) where meta data may not be required Returns:", "frame types from the input dictionary if user is not", "if write_bkg_pairs: extras += ['calib', 'comb_id', 'bkg_id'] # manual if", "= self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is not None: # Select", "= [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']] # Convert", "fits table> \"\"\" # Columns for output columns = self.spectrograph.pypeit_file_keys()", "'dichroic' not in self.keys() else self['dichroic'][row] decker = 'none' if", "@property def n_calib_groups(self): \"\"\"Return the number of calibration groups.\"\"\" return", "to make sure that it matches with what can be", "Make sure that each calibration group should only contain #", "configurations to be redetermined. Otherwise the configurations are only determined", "frames, types must have been defined; run get_frame_types.') ignore_frames =", "../include/links.rst \"\"\" import os import io import string from copy", "msgs.warn('RA and DEC must not be None for file:' +", "frame is # assigned to that group. ngroups = 0", "{'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, # PypeIt orientation binning", "msgs.error('Cannot get setup names; run set_configurations.') # Unique configurations setups,", "same as the backwards compatible \"setup\" dictionary. Args: indx (:obj:`int`):", "Otherwise, the method always returns None. Raises: ValueError: Raised if", "isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f) for d,f in", "_cfg def unique_configurations(self, force=False, copy=False, rm_none=False): \"\"\" Return the unique", "this point, we need the frame type to continue if", "dtype = self.table[key].dtype # Deal with None's properly nones =", "'') msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format( meta_key,", "for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the string 'None', this returns", "None, the `configuration_keys` of :attr:`spectrograph` is used. Returns: dict: A", "self.keys() else self['dichroic'][row] decker = 'none' if 'decker' not in", "that it must match. If None, any row of the", "framebit columns.') if 'frametype' in self.keys(): del self.table['frametype'] if 'framebit'", "the spectrograph # needs to be defined first) ofile =", "= self.find_frames(key) & in_group ftype_in_group = self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key]", "in cfg.keys() if key in _configs] if len(cfg_keys) == 0:", "str) and '#' in value: value = value.replace('#', '') msgs.warn('Removing", "def set_calibration_groups(self, global_frames=None, default=False, force=False): \"\"\" Group calibration frames into", "this is not an attribute of # PypeItMetaData? def row_match_config(row,", "must have a `filename` column that is used to match", "range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) msgs.info('Found {0} unique", "within 20 arcmins of a listed standard, # then it", "and the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame", "Function will fault if there is a problem with the", "mean that you want all the configurations? Or can we", "pypeit.core import framematch from pypeit.core import flux_calib from pypeit.core import", "frame from that configuration with a given calibration group. ..", "Dictionary with the data to assign to :attr:`table`. \"\"\" #", "uniq, indx = np.unique(self['setup'], return_index=True) ignore = uniq == 'None'", "Name of the column to use for sorting the output.", "force: self._set_calib_group_bits() self._check_calib_groups() return # TODO: The rest of this", "files is None: # Warn that table will be empty", "'calibbit' columns haven't been defined. \"\"\" if 'setup' not in", "spectrograph `configuration_keys` method. The latter is not checked. If None,", "ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\" Write the sorted file.", "# If an object exists within 20 arcmins of a", "types): if c in self.keys(): self.table[c] = self.table[c].astype(t) def _build(self,", "with type: {0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool) for ftype in", "'dichroic': dichroic, 'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, #", "been ignored in the determination of the unique configurations, but", "determine the calibration group of each frame; see :attr:`calib_bitmask`. Args:", "Find the frames of each type in this group cfg[setup[0]][cbit]", "frames must also be matched to the relevant science frame.", "the last # few columns ncol = len(tbl_cols) for col", "type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set the frame type", "Generate the fitstbl that will be at the heart of", "frames indx = self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is not None:", "frames with # the ignored frame types should be assigned", "if rm_none and 'None' in _cfg.keys(): del _cfg['None'] return _cfg", "indx = np.where([t == col for t in tbl_cols])[0][0] if", "string with two digits (the maximum number of detectors is", "Args: files (:obj:`str`, :obj:`list`): One or more files to use", "ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n') mjd =", "call the function with ``force=True``. Args: configs (:obj:`dict`, optional): A", "the table contents are printed to the screen. If ``'table'``,", "_cfg # Iterate through the calibration bit names as these", "have been set, ignore anything listed in # the ignore_frames", "a zero-filled string with two digits (the maximum number of", "build the table. strict (:obj:`bool`, optional): Function will fault if", "not np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist() # Get the data", "in_cbit = self['calibbit'] == cbit # Find the unique configurations", "the number of the rows in the table. columns (:obj:`str`,", "to be reset. fill (:obj:`str`, optional): If the 'setup' column", "Check that the calibration groups are valid. This currently only", "str_j in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) + double_alphabet cfg_indx =", "data lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as ff:", "det=None, config_only=False): \"\"\" Construct the setup dictionary. .. todo:: -", "this simply constructs the configuration dictionary using the unique configurations", "\"\"\" Find the calibration groups associated with a specific frame.", "Build table self.table = table.Table(data if files is None else", "and sort the data by a given column if sort_col", "number of keys therefore *must* match the number of files", "= self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta for meta_key in self.spectrograph.meta.keys():", "file list from which to grab the data from the", "'calib' column has a string type to make sure that", "'calibbit' not in self.keys(): msgs.error('Cannot provide master key string without", "the setups to write if configs is None or configs", "values good, so we're done return # Alert the user", "to define configurations!') # Get the list of keys to", "exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn on the", "overwrite the metadata. If the internal table already contains the", "-1 if 'bkg_id' not in self.keys(): self['bkg_id'] = -1 if", "configurations.') # If the frame types have been set, ignore", "`astropy.table.Table`: Table with two columns, the frame type name and", "(:obj:`str`, :obj:`list`): One or more files to use to build", "Allow an empty set of configuration keys # meaning that", "= 'none' if 'slitwid' not in self.keys() else self['slitwid'][row] slitlen", "of the # MasterFrames and QA for icbit in np.unique(self['calibbit'].data):", "gets overwritten by # this if the frames to meet", "# setup[skey][str(d).zfill(2)] \\ # = {'binning': binning, 'det': d, #", "calibration group of each frame; see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`,", "combination and background IDs are the last # few columns", "by PypeIt' \\ ' and will be removed from the", "bkg columns if write_bkg_pairs: extras += ['calib', 'comb_id', 'bkg_id'] #", "multiple types, the types should be provided as a string", "for k in config.keys(): # Deal with floating configs (e.g.", "len(cfg_keys) == 0: msgs.error('No setups to write!') # Grab output", "append (:obj:`bool`, optional): Append the frame type. If False, all", "files (:obj:`str`, :obj:`list`): One or more files to use to", "called `comb_id` and `bkg_id` that identify object and background frame", "or 'bkg_id' columns do not exist, they're set to -1.", "valid self._check_calib_groups() def find_frames(self, ftype, calib_ID=None, index=False): \"\"\" Find the", "and not force: return # Groups have been set but", "This is for backwards compatibility, but we should consider reformatting/removing", "table to write. If None, all rows are written. Shape", "is used to provide the header keyword data to include", "continue # If an object exists within 20 arcmins of", "keywords!'.format(k)) self.table['setup'] = 'None' nrows = len(self) for i in", "# Validate instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types): \"\"\"", "self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] = 0 # Set the calibration", "the frames associated with the provided calibration group. Args: grp", "sure the dithers and combination and background IDs are the", "file. The 'calibbit' column is actually what is used to", "provided set of data used to supplement or overwrite metadata", "flat frame that was' + msgs.newline() + 'missed by the", "the provided list. write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns", ":obj:`list`, optional): One or more strings used to select the", "instrument-specific checks using # combinations of the full set of", "= '1,1' if 'binning' not in self.keys() else self['binning'][row] skey", "- set(self.keys())) > 0: msgs.error('Configuration {0} defined using unavailable keywords!'.format(k))", "group of each frame; see :attr:`calib_bitmask`. Args: global_frames (:obj:`list`, optional):", "specified for the provided spectrograph. It is expected that this", "setup = {skey: {'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic,", "column if sort_col is not None: if sort_col not in", "each type in this group cfg[setup[0]][cbit] = {} for key", "follow an execution of # pypeit_setup. If the user edits", "\"\"\" if 'manual' not in self.keys(): self['manual'] = '' def", "in the provided table. \"\"\" meta_data_model = meta.get_meta_data_model() # Check", "be provided as a list directly or as a comma-separated", "cfg_keys (:obj:`list`, optional): The list of metadata keys to use", "observed. .. todo:: - Consolidate with :func:`convert_time` ? Args: row", "# Get the columns to return if columns in [None,", "etc.) and the row index where it first occurs. This", "and os.path.isfile(ofile) and not overwrite: raise FileExistsError(f'{ofile} already exists; set", "for {0}'.format(os.path.split(ifile)[1])) # JFH Changed the below to not crash", "table.Column(type_bits, name='framebit') t = table.Table([ftype_colm, fbits_colm]) if merge: self['frametype'] =", "self['dispname'][row] dispangle = 'none' if 'dispangle' not in self.keys() else", "this from astropy.table.Table, but that # proved too difficult. class", "(:obj:`bool`, optional): Attempt to match the data type in `usrdata`", "are only determined if :attr:`configs` has not yet been defined.", "' 'columns; run set_configurations and set_calibration_groups.') if os.path.isfile(ofile) and not", "\"\"\" Find all the frames associated with the provided calibration", "None is_None = np.logical_not(indx) srt = np.append(np.where(is_None)[0], np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)]) output_tbl =", "\"\"\" if 'framebit' not in self.keys(): msgs.error('Frame types are not", "Or better yet, just stop # producing/using the *.calib file.", "if the column to use for sorting is not valid.", "overwrite metadata read from the file headers. The table must", "\"\"\" # Set the default if requested and 'calib' doesn't", "use the pypeit file instead Args: ofile (:obj:`str`): Name for", "undefined or to be unique for each science or standard", "are not set. First run get_frame_types.') if ftype == 'None':", "The content of the fits table is dictated by the", "instead. Args: copy (:obj:`bool`, optional): Return a deep copy of", "not valid: {0}'.format( ', '.join(tbl_cols[badcol]))) # Make sure the basic", "# TODO: Why do we need to specify 'all' here?", "'0' # Make sure the calibbit column does not exist", "type. Raises: PypeItError: Raised if the `framebit` column is not", "for output columns = self.spectrograph.pypeit_file_keys() extras = [] # comb,", "are provided, the vanilla configuration is included. write_bkg_pairs (:obj:`bool`, optional):", "with two columns, the frame type name and bits. \"\"\"", "the data save to each file. The class is used", "is not None and cbit in ignore: continue # Find", "to put in the relevant spectrograph class. Args: row (:obj:`int`):", "# No information, keep going continue # Convert to a", "is not None!) msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self,", "to the relevant science frame. Args: ftype (str): The frame", "(setups) and the frames associated with each configuration. The output", "PypeIt parameters used to set the code behavior. If not", "= 'none' if 'dispangle' not in self.keys() else self['dispangle'][row] dichroic", "TODO: Science frames can only have one calibration group #", "it. - This is complicated by allowing some frame types", "of the table based on user-provided data, typically pulled from", "# Warn the user that the matching meta values are", "the frame types for i, ftype in enumerate(self.type_bitmask.keys()): # #", "to report a warning and continue. usrdata (astropy.table.Table, optional): Parsed", "combination groups. .. note:: :attr:`table` is edited in place. This", "TODO: Do we want to do this here? indx =", "by the header keywords specified for the provided spectrograph. It", "ignore rm = np.logical_not(np.isin(setups, ignore)) setups = setups[rm] indx =", "is dictated by the header keywords specified for the provided", "\\'setup\\' ' 'column; run set_configurations.') if os.path.isfile(ofile) and not overwrite:", "data['directory'] = ['None']*len(_files) data['filename'] = ['None']*len(_files) # Build the table", "The calibration group integer. Returns: numpy.ndarray: Boolean array selecting those", "(:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior. files", "used to provide the header keyword data to include in", "allows MasterFrames to be used with multiple calibration groups. Args:", "and background group columns, and/or to initialize the combination groups", "configs is None or configs == 'all' or configs ==", "the calibration groups to be reconstructed if the 'calib' column", "inputs. Raises: PypeItError: Raised if none of the keywords in", "PypeIt file output. Args: row (:obj:`int`): The 0-indexed row of", "supplement or overwrite the metadata. If the internal table already", "# Find the files without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits))", "the spectrograph ``configuration_keys`` method. If run after the ``'setup'`` column", "arrays in the Table (e.g. binning) match.append(np.all(config[k] == row[k])) #", "fits file to use in the data reduction. \"\"\" def", "association of each frame from that configuration with a given", "ignoring any # undefined ('None') configurations #setup = np.unique(self['setup'][in_group]).tolist() setup", "up one directory .. include:: ../include/links.rst \"\"\" import os import", "to construct the setup. det (:obj:`int`, optional): The 1-indexed detector", "data in the table for key in usrdata.keys(): self.table[key] =", "assumed to be from a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none)", "as f: if header is not None: _header = header", "d in _det: # setup[skey][str(d).zfill(2)] \\ # = {'binning': binning,", "are unidentified files, leave without a type and continue. user", "the combination groups are set to be unique for each", "None, this is set by :func:`unique_configurations`. force (:obj:`bool`, optional): Force", "return None # Write the output to an ascii file", "are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): \"\"\" Check", "rest of this just nominally sets the calibration # group", "types from the input dictionary if user is not None:", "in place. This function can be used to initialize the", "that configuration-defining keywords all have values that will yield good", "overwrite.') # Check the rows input if rows is not", "existing_keys: if len(self.table[key].shape) > 1: # NOT ALLOWED!! # TODO:", "provided but the frame types have not been defined yet.", "each configuration. The metadata keywords in the dictionary should be", "If not provided, the default parameters specific to the provided", "include in the file. If None are provided, the vanilla", "The method allows you to set the columns to print", "data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) # JFH Changed the below", "bits. Args: type_bits (numpy.ndarray): Integer bitmask with the frame types.", "the metadata from the fits files. strict (:obj:`bool`, optional): Function", "TODO: We should edit the relevant follow-on code so that", "report a warning and continue. Attributes: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph", "frametype for a few instruments (e.g. VLT) where meta data", "and not force: self._set_calib_group_bits() self._check_calib_groups() return # TODO: The rest", "optional): Output signature or file name. If None, the table", "TODO: The rest of this just nominally sets the calibration", "os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it ff", "in the Table (e.g. binning) match.append(np.all(config[k] == row[k])) # Check", "if framebit is not # set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')],", "if 'frametype' in self.keys() or 'framebit' in self.keys(): msgs.warn('Removing existing", "data = {k:[] for k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files)", "type is included. Returns: list: List of file paths that", "} } #_det = np.arange(self.spectrograph.ndet)+1 if det is None else", "be something to put in the relevant spectrograph class. Args:", "len(rows) != len(self.table): raise ValueError('Boolean vector selecting output rows has", "self.keys(): msgs.warn('Removing existing frametype and framebit columns.') if 'frametype' in", "ignore_frames indx = np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is", "``force``). .. warning:: Any frame types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`", "todo:: - Maintain a detailed description of the logic. The", "behavior. files (:obj:`str`, :obj:`list`, optional): The list of files to", "- if the 'comb_id' column does not exist, this sets", "= np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame types from", "determination of the unique configurations, but the frame types have", "the 'comb_id' column does not exist, this sets the combination", "table. strict (:obj:`bool`, optional): Function will fault if :func:`fits.getheader` fails", "{} for i in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] =", "instantiation of :class:`astropy.table.Table`. usrdata (:obj:`astropy.table.Table`, optional): A user provided set", "ensure the integers are unique self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def", "indx = self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is not None: #", "exception. header (:obj:`str`, :obj:`list`, optional): One or more strings to", "= os.path.join(odir, '{0}.pypeit'.format(root)) # Get the setup lines setup_lines =", "# Select frames in the same calibration group indx &=", "if some files have None in # their MJD. This", "typing into the exiting table. Returns: :obj:`astropy.table.Table`: A Table with", "or standard frame, see :func:`set_combination_groups`. .. note:: This should only", "[ os.path.join(d,f) for d,f in zip(self['directory'][ftype_in_group], self['filename'][ftype_in_group])] # Write it", "= 'none' if 'dispname' not in self.keys() else self['dispname'][row] dispangle", "overwritten by # this if the frames to meet the", "without any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to", "and bkg_id write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for", "in self.keys(): self['bkg_id'] = -1 if assign_objects and np.all(self['comb_id'] <", ":func:`get_frame_types`), this method will fault! Args: force (:obj:`bool`, optional): Force", "'slitlen' not in self.keys() else self['slitlen'][row] binning = '1,1' if", "Alert the user that some of the frames are going", "None if output in [None, 'table'] else output if ofile", "and the file exists. \"\"\" # Check the file can", "is not None: _header = header if isinstance(header, list) else", "read from a pypeit file) if 'calib' in self.keys() and", "# then it is probably a standard star foundstd =", "Is this appropriate for NIR data? \"\"\" is_science = self.find_frames('science')", "metadata to determine unique configurations.') # If the frame types", "index of the table row to use to construct the", "all detectors are included. config_only (:obj:`bool`, optional): Just return the", "data table is identical to the pypeit file output. ..", "output_cols[np.isin(output_cols, self.keys())].tolist() def set_combination_groups(self, assign_objects=True): \"\"\" Set combination groups. ..", "the file exists. \"\"\" # Check the file can be", "meta (e.g. dispangle) Returns: bool: True if the row matches", "the configuration, don't include the top-level designation of the configuration", "not force: return # Groups have been set but the", "j == len(self.configs) if unique: if cfg_indx == len(cfg_iter): msgs.error('Cannot", "(:obj:`bool`, optional): If the 'calib' column is not present, set", "the latter is provided. \"\"\" return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID)) def frame_paths(self,", "a type and continue. user (:obj:`dict`, optional): A dictionary with", "don't have to do these gymnastics. Or better yet, just", "column to use for sorting is not valid. FileExistsError: Raised", "match any of the # meta data values indx &=", "(:obj:`list`): List of types \"\"\" for c,t in zip(columns, types):", "name to the table data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if not", "is not set in the table. \"\"\" if 'framebit' not", "now; but could probably use the pypeit file instead Args:", "identify the following files:\") for f in self['filename'][indx]: msgs.info(f) if", "have one calibration group # Assign everything from the same", "Or can we # make the default 'all'? if configs", "_cfg[setup]['--'] = deepcopy(cfg[setup]) cfg = _cfg # Iterate through the", "For now this is a simple grouping of frames with", "an assert statement... raise ValueError('CODING ERROR: Found high-dimensional column.') #embed(header='372", "defined, so just set all # the frames to this", "detectors is 99). Using the calibration bit in the keyword", "The sorted file lists all the unique instrument configurations (setups)", "ofile (:obj:`str`): Name for the output sorted file. overwrite (:obj:`bool`,", "group *for all rows*. force (:obj:`bool`, optional): Force the calibration", "boolean vector selecting the rows of the table to write.", "(:obj:`list`): List of column names types (:obj:`list`): List of types", "# Remove the selected configurations to ignore rm = np.logical_not(np.isin(setups,", "for k, cfg in _configs.items(): if len(set(cfg.keys()) - set(self.keys())) >", "the frames must also be matched to the relevant science", "if key in ['ra', 'dec'] and not radec_done: ras, decs", "nominally sets the calibration # group based on the configuration.", "# raise ValueError('idname is not set in table; cannot use", "specify 'all' here? Can't `configs is # None` mean that", "Get the setups to write if configs is None or", "default parameters specific to the provided spectrograph are used. configs", "np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else: # The np.all", "row from the fitstbl matches the input configuration Args: row", "self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to determine unique configurations.') # If", "instead Args: ofile (:obj:`str`): Name for the output sorted file.", "Changed the below to not crash if some files have", "user that the matching meta values are not # unique", "filenames[mjd == None] # Print status message msg = 'Time", "(astropy.table.Row): From fitstbl config (dict): Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph):", "default if requested and 'calib' doesn't exist yet if 'calib'", "Use ovewrite=True to overwrite.'.format(ofile)) # Grab output columns output_cols =", "information, keep going continue # Convert to a list of", "None self.calib_bitmask = None # Initialize columns that the user", "has incorrect length.') # Get the columns to return if", "msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) # JFH Changed the below to", "raise an exception. header (:obj:`str`, :obj:`list`, optional): One or more", "ignore but the frame types have not been defined yet.", "rows is not None and len(rows) != len(self.table): raise ValueError('Boolean", "calibration group must be from one and only one instrument", "method. If run after the ``'setup'`` column has been set,", "indx (:obj:`int`, array-like): One or more 0-indexed rows in the", "any NoneTypes indx = output_tbl[sort_col] != None is_None = np.logical_not(indx)", "this provided string or list of strings (e.g., ['A','C']). See", "defined so just print it print('\\n'.join(data_lines)) return None # Write", "groups are valid self._check_calib_groups() def find_frames(self, ftype, calib_ID=None, index=False): \"\"\"", "at least for now the DEIMOS image reader will #", "of the rows in the table. columns (:obj:`str`, :obj:`list`, optional):", "['manual'] for key in extras: if key not in columns:", "Ignored if ``output`` does not specify an output file. Returns:", "output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] if ofile is None: #", "is not an `astropy.io.table.Table` KeyError: Raised if `filename` is not", "= 'Setup {}'.format(self['setup'][row]) # Key names *must* match configuration_keys() for", "type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame types. \"\"\"", "self.table.__setitem__(item, value) def __len__(self): return self.table.__len__() def __repr__(self): return self.table._base_repr_(html=False,", "0-indexed indices instead of a boolean array. Returns: numpy.ndarray: A", "of the full set of metadata exprng = self.par['scienceframe']['exprng'] if", "within PypeIt. **Note**: This is ignored if `data` is also", "exists within 20 arcmins of a listed standard, # then", "overwrite: msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile)) # Grab", "metadata table generated within PypeIt. **Note**: This is ignored if", "as returned by the spectrograph `configuration_keys` method. The latter is", "frames, types must have been defined; ' 'run get_frame_types.') calibs", "format='mjd') except ValueError: mjd = np.asarray(data['mjd']) filenames = np.asarray(data['filename']) bad_files", "\"\"\" cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits is None: # No", "a simple grouping of frames with the same configuration. ..", "None for file:' + msgs.newline() + f) msgs.warn('The above file", "ifile in enumerate(_files): # User data (for frame type) if", "be from one and only one instrument ' 'configuration with", "i, ftype in enumerate(self.type_bitmask.keys()): # # Initialize: Flag frames with", ":func:`construct_obstime`. Returns: str: The root name for file output. \"\"\"", "if np.any(indx): msgs.info(\"Couldn't identify the following files:\") for f in", "self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn", "created. cfg_lines (:obj:`list`, optional): The list of configuration lines to", "return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod def", "to do this here? indx = self.type_bitmask.flagged(type_bits, flag='standard') for b,", "ignore: del cfgs[key] # Construct file ff = open(ofile, 'w')", "return None and the frame types have not yet been", "output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is not None)", "setup columns with this single identifier. \"\"\" self.get_frame_types(user=frametype) # TODO:", ":attr:`spectrograph` is used. Returns: dict: A dictionary with the metadata", "# KLUDGE ME # # TODO: It would be good", "the ``'setup'`` column is initialized, this function determines the unique", "list. return_index (:obj:`bool`, optional): Return row indices with the first", "self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1 def set_user_added_columns(self): \"\"\" Set", "where we could add a SPIT option. Args: flag_unknown (:obj:`bool`,", "or more files to use to build the table. strict", "use :func:`unique_configurations` instead. Args: copy (:obj:`bool`, optional): Return a deep", "bits. \"\"\" # Making Columns to pad string array ftype_colmA", "the code behavior. files (:obj:`str`, :obj:`list`, optional): The list of", "to use for sorting is not valid. FileExistsError: Raised if", "= self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None, merge=True): \"\"\" Generate a", "and include it in the metadata table. The internal table", "backwards compatibility, but we should consider reformatting/removing it. Args: ofile", "in the table. data (table-like, optional): The data to include", "properly nones = usrdata[key] == 'None' usrdata[key][nones] = None #", "`configs is # None` mean that you want all the", "file could be a twilight flat frame that was' +", "without a known type. calib_ID (:obj:`int`, optional): Index of the", "np.where(self.find_frames(ftype))[0] for i in indx: self['calib'][i] = calibs # Set", "usr_row = None else: # TODO: This check should be", "' 'column; run set_configurations.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0}", "by the spectrograph `configuration_keys` method. The latter is not checked.", "data type of the `usrdata` column to the existing data", "(table-like, optional): The data to include in the table. The", "not have unique '.format(cfg_key) + '{0} values.' .format(meta)) # Find", "9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm = ftype_colmA", "of the configuration, the calibration group, and the detector. The", "+ 'missed by the automatic identification.') b = self.type_bitmask.turn_off(b, flag='standard')", "in self.keys() else self['dichroic'][row] decker = 'none' if 'decker' not", "and DEC must not be None for file:' + msgs.newline()", "(:obj:`bool`, optional): Function will fault if there is a problem", "# Check that the metadata are valid for this column.", "master_key(self, row, det=1): \"\"\" Construct the master key for the", "this (first) configuration self.table['setup'][indx] = cfg_key continue # Find the", "overwitten by the provided type. \"\"\" if not append: self['framebit'][indx]", "setup[skey][str(d).zfill(2)] \\ # = {'binning': binning, 'det': d, # 'namp':", "extras += ['calib', 'comb_id', 'bkg_id'] # manual if write_manual: extras", "lists to fill data = {k:[] for k in self.spectrograph.meta.keys()}", "= np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is not None:", "keep track of the calibration group bits. table (:class:`astropy.table.Table`): The", "Args: row (:obj:`int`): The 0-indexed row of the frame. obstime", "is actually what is used to determine the calibration group", "copy=False, rm_none=False): \"\"\" Return the unique instrument configurations. If run", "# Make sure that each calibration group should only contain", "if 'setup' in self.keys() and not force: return if 'setup'", "'column; run set_configurations.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already", "to be # removed msg = 'The following frames have", "def set_configurations(self, configs=None, force=False, fill=None): \"\"\" Assign each frame to", "reduction. The content of the fits table is dictated by", "output file name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) # Get the", "= self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines", "def get_setup(self, row, det=None, config_only=False): \"\"\" Construct the setup dictionary.", "spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod def default_keys(): return [ 'directory',", "frames that have multiple types, the types should be provided", "# Columns for output columns = self.spectrograph.pypeit_file_keys() extras = []", "\"\"\" self.get_frame_types(user=frametype) # TODO: Add in a call to clean_configurations?", "the rows of the table to write. If None, all", "add the column anyway, with the type in `usrdata`. You", "in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits, merge=True): \"\"\" Set and", "key string without setup and calibbit; ' 'run set_configurations and", "the types and bits into the existing table. This will", "sorted instrument configuration table without \\'setup\\' ' 'column; run set_configurations.')", "remove 'em self.table = self.table[good] def _set_calib_group_bits(self): \"\"\" Set the", "is included. Returns: list: List of file paths that match", "is edited in place. This function can be used to", "the rows that contain the frames of the requested type.", "in the table and specify any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`):", "self.spectrograph): break j += 1 unique = j == len(self.configs)", "indx = indx[use] if len(indx) == 0: msgs.error('No frames to", "Use the first file to set the first unique configuration", "True, this is done *after* :attr:`configs` is copied to a", "self.keys())].tolist() def set_combination_groups(self, assign_objects=True): \"\"\" Set combination groups. .. note::", "based on user-provided data, typically pulled from the PypeIt file.", "that each calibration group should only contain # frames from", "# TODO: Turn this into a DataContainer # Initially tried", "each Spectrograph # if useIDname and 'idname' not in self.keys():", ".. note:: :attr:`table` is edited in place. This function can", "the first occurence of these configurations. configs (:obj:`str`, :obj:`list`, optional):", "metadata keyword, specific to :attr:`spectrograph`. Additional valid keywords, depending on", "combinations of the items in the metadata table listed by", "not in tbl_cols: continue indx = np.where([t == col for", "been set, this simply constructs the configuration dictionary using the", "not in self.keys() and fill is not None: self['setup'] =", "of PypeItMetaData.') usr_row = usrdata[idx] # Add the directory and", "and bits. Args: type_bits (numpy.ndarray): Integer bitmask with the frame", "data defined, so just set all # the frames to", "as these are the root of the # MasterFrames and", "incorrect length.') # Get the columns to return if columns", "latter determines and provides the configurations themselves. This is mostly", "cfg.keys(): _cfg[setup] = {} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg = _cfg", "description of the logic. The 'calib' column has a string", "else: dtype = self.table[key].dtype # Deal with None's properly nones", "strict (:obj:`bool`, optional): Function will fault if :func:`fits.getheader` fails to", "FileExistsError: Raised if overwrite is False and the file exists.", "== 'None': return self['framebit'] == 0 # Select frames indx", "in _header: f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') # Just to", "return self.table.keys() def sort(self, col): return self.table.sort(col) def merge(self, usrdata,", "in range(len(self)): # Convert the string to the group list", "that contain the frames of the requested type. Raises: PypeItError:", "type name and bits. \"\"\" # Making Columns to pad", "self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph): break j += 1 unique", "there a reason why this is not an attribute of", "spectrograph ``configuration_keys`` method. If run after the ``'setup'`` column has", "paths in_cfg = self['setup'] == setup if not np.any(in_cfg): continue", "Otherwise, only return the configurations matched to this provided string", "files generated. \"\"\" # Set output path if output_path is", "groups associated with a specific frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) #", "parameters used to set the code behavior. If not provided,", "the screen. The method allows you to set the columns", "of the table row to use to construct the configuration.", "be specific to each instrument. Args: output_path (:obj:`str`, optional): Root", "setups dictionary cfg = self.unique_configurations(copy=True, rm_none=True) # TODO: We should", "indx = np.unique(self['setup'], return_index=True) ignore = uniq == 'None' if", "are not # unique for this configuration. if uniq_meta.size !=", "cfgs[key] # Construct file ff = open(ofile, 'w') for setup", "= 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def", "metadata keys to use to construct the configuration. If None,", "if user is not None!) msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits, merge=merge)", "above if user is not None!) msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits,", "``'setup'`` column has been set, this simply constructs the configuration", "configurations in the provided list. return_index (:obj:`bool`, optional): Return row", "msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames)) use = np.ones(len(self),", "self.configs = None self.calib_bitmask = None # Initialize columns that", "group.') @property def n_calib_groups(self): \"\"\"Return the number of calibration groups.\"\"\"", "each frame to a configuration (setup) and include it in", "and bits. \"\"\" # Making Columns to pad string array", "else: output_tbl = output_tbl[tbl_cols] if output == 'table': # Instead", "cfg_keys is None else cfg_keys return {k:self.table[k][indx] for k in", "table without \\'setup\\' ' 'column; run set_configurations.') if os.path.isfile(ofile) and", "isinstance(config[k], float): if row[k] is None: match.append(False) elif np.abs(config[k]-row[k])/config[k] <", "rm_none=rm_none) msgs.info('Using metadata to determine unique configurations.') # If the", "select the configurations to include in the returned objects. If", "the file headers. The table must have a `filename` column", "self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1 def set_user_added_columns(self):", "t def edit_frame_type(self, indx, frame_type, append=False): \"\"\" Edit the frame", "in range(len(self)): if self['calib'][i] in ['all', 'None']: # No information,", "usrdata is None: usr_row = None else: # TODO: This", "None: _header = header if isinstance(header, list) else [header] for", "spectrograph, par, files=None, data=None, usrdata=None, strict=True): if data is None", "usrdata.keys(): raise KeyError('The user-provided table must have \\'filename\\' column!') #", "that this table can be used to set the frame", "col not in tbl_cols: continue indx = np.where([t == col", "Assign each frame to a configuration (setup) and include it", "if config_only else setup def get_configuration_names(self, ignore=None, return_index=False, configs=None): \"\"\"", "calibration groups associated with a specific frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row])", "run set_configurations.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists.", "should be done elsewhere # Check if os.path.basename(ifile) != usrdata['filename'][idx]:", "return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None): \"\"\" Assign", "ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy()", "to include in the returned objects. If ``'all'``, pass back", "astropy.table.Table, but that # proved too difficult. class PypeItMetaData: \"\"\"", "setups[use] indx = indx[use] return setups, indx if return_index else", "# Turn on the relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype)", "(e.g., ['A','C']). See :attr:`configs`. Raises: PypeItError: Raised if the 'setup'", "call. # indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype,", "fits headers or the data directly. If neither are provided", "0-indexed row of the frame. Returns: astropy.time.Time: The MJD of", "frame_type, append=False): \"\"\" Edit the frame type by hand. Args:", "Raises: PypeItError: Raised if none of the keywords in the", "not set not_setup = self.table['setup'] == 'None' if not np.any(not_setup):", "if sort_col not in self.keys(): raise ValueError(f'Cannot sort by {sort_col}.", "from pypeit.core import framematch from pypeit.core import flux_calib from pypeit.core", "Get the columns to return if columns in [None, 'all']:", "None.') # Find the frames of each type in this", "written (this is here because the spectrograph # needs to", "\"\"\" Write a pypeit file in data-table format. The pypeit", "bool: True if the row matches the input configuration \"\"\"", "the table. The type can be anything allowed by the", "exist - if the 'comb_id' column does not exist, this", "standard and science frame. \"\"\" if 'comb_id' not in self.keys():", "{'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker': decker, 'slitwid':slitwid,", "all configurations. Otherwise, only return the configurations matched to this", "'framebit' in self.keys(): del self.table['framebit'] # # TODO: This needs", "columns are not valid: {0}'.format( ', '.join(tbl_cols[badcol]))) # Make sure", "# Allow for single files _files = files if hasattr(files,", "import PypeItPar from pypeit.par.util import make_pypeit_file from pypeit.bitmask import BitMask", "None and not force: return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in", "and value of the dictionary, respectively. The number of keys", "one configuration. if len(cfg_keys) == 0: self.configs = {} self.configs[cfg_iter[cfg_indx]]", "len(self.table[key].shape) > 1: # NOT ALLOWED!! # TODO: This should", "method. The latter is not checked. If None, this is", "each fits file to use in the data reduction. \"\"\"", "a science frame is # assigned to that group. ngroups", "the frames with # the ignored frame types should be", "in this group #in_group = self.find_calib_group(i) in_cbit = self['calibbit'] ==", "assert isinstance(cfg_limits[key], list), \\ 'CODING ERROR: valid_configuration_values is not correctly", "meta data?? dtype = meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype #", "an exception. header (:obj:`str`, :obj:`list`, optional): One or more strings", "is not defined. \"\"\" if 'calibbit' not in self.keys(): msgs.error('Calibration", "(:obj:`dict`): A dictionary with the types designated by the user.", "and initialize the bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] = 0", "QA for icbit in np.unique(self['calibbit'].data): cbit = int(icbit) # for", "frames. \"\"\" if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return [os.path.join(d,f)", "present to determine the calibration # group if 'setup' not", "Args: type_bits (numpy.ndarray): Integer bitmask with the frame types. The", "sorted file. The sorted file lists all the unique instrument", "= self.spectrograph.valid_configuration_values() if cfg_limits is None: # No values specified,", "\"\"\" if self.configs is not None and not force: return", "types have not been set yet. \"\"\" # Configurations have", "by the automatic identification.') b = self.type_bitmask.turn_off(b, flag='standard') continue #", "types have not been defined yet. \"\"\" # Set the", "not in self.keys(): msgs.error('Must have defined \\'setup\\' column first; try", "Write the file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\\n')", "or an integer array if index=True, with the rows that", "as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] if ofile is", "specify any validation checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to", "have been ignored ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is None:", "to select the configurations to include in the returned objects.", "the configuration. This will change! # The configuration must be", "Name for the output sorted file. overwrite (:obj:`bool`, optional): Overwrite", "for manual extraction Raises: PypeItError: Raised if the 'setup' isn't", "table with the relevant metadata for each fits file to", "we should consider reformatting it. And it may be something", "a combination of instrument-specific checks using # combinations of the", "Find unique configurations, always ignoring any 'None' # configurations... cfg", "indx = np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is not", "return_index=False, configs=None): \"\"\" Get the list of the unique configuration", "if len(cfg_keys) == 0: msgs.error('No setups to write!') # Grab", "to set the first unique configuration self.configs = {} self.configs[cfg_iter[cfg_indx]]", "find_calib_group(self, grp): \"\"\" Find all the frames associated with the", "If False, all existing frame types are overwitten by the", "PypeIt orientation binning of a science image } } }", "defined. copy (:obj:`bool`, optional): Return a deep copy of :attr:`configs`", "may not be required Returns: dict: Dictionary with the data", "self.keys() else self['slitlen'][row] binning = '1,1' if 'binning' not in", "that match any of the # meta data values indx", "a valid column.') # Ignore any NoneTypes indx = output_tbl[sort_col]", "0 and match_type: for key in existing_keys: if len(self.table[key].shape) >", "with open(ofile, 'w') as f: if header is not None:", "the writing routines. Args: ignore (:obj:`list`, optional): Ignore configurations in", "to be reconstructed if the 'calib' column already exists. Raises:", "Warn that table will be empty msgs.warn('Both data and files", "self['calib'][i] = calibs # Set the bits based on the", "\"\"\" if 'calibbit' not in self.keys(): msgs.error('Calibration groups are not", "ignore is not None: for key in cfgs.keys(): if key", "provide master key string without setup and calibbit; ' 'run", "a preconstructed # pypeit file, which should nominally follow an", "setup has only one configuration. if len(cfg_keys) == 0: self.configs", "(:obj:`bool`, optional): Force the calibration groups to be reconstructed if", "self['slitwid'][row] slitlen = 'none' if 'slitlen' not in self.keys() else", "the existing table. This will *overwrite* any existing columns. Returns:", "in the selected calibration group. Raises: PypeItError: Raised if the", "write_bkg_pairs: extras += ['calib', 'comb_id', 'bkg_id'] # manual if write_manual:", "to set the code behavior. If not provided, the default", "1 msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self,", "just not using it... Args: frametype (:obj:`dict`): A dictionary with", "is not None and not force: return self._get_cfgs(copy=copy, rm_none=rm_none) if", "force: return # Groups have been set but the bits", "frame type to continue if 'frametype' not in self.keys(): msgs.error('To", "dictionary with the metadata values from the selected row. \"\"\"", "the fits headers headarr = self.spectrograph.get_headarr(ifile, strict=strict) # Grab Meta", "returned object provides the indices of the first occurrence of", "for file output. \"\"\" _obstime = self.construct_obstime(row) if obstime is", "Args: columns (:obj:`list`): List of column names types (:obj:`list`): List", "self['idname'] == self.spectrograph.idname(ftype) if useIDname \\ # else np.ones(len(self), dtype=bool)", "len(user.keys()) != len(self): raise ValueError('The user-provided dictionary does not match", "(setup) and include it in the metadata table. The internal", "all the configurations? Or can we # make the default", "calibration group.') @property def n_calib_groups(self): \"\"\"Return the number of calibration", "the calibration bit in the keyword allows MasterFrames to be", "raise FileExistsError(f'{ofile} already exists; set flag to overwrite.') # Check", "should edit the relevant follow-on code so that we #", "Check the rows input if rows is not None and", "int(n) for n in self['calib'][i].replace(':',',').split(',')]) # Check against current maximum", "data to assign to :attr:`table`. \"\"\" # Allow for single", "calibration bits for i in range(len(self)): # Convert the string", "not (likely because the # data was read from a", "< 9: ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm =", "master key string without setup and calibbit; ' 'run set_configurations", "indx): \"\"\" Return the full paths to one or more", "defined. \"\"\" if 'calibbit' not in self.keys(): msgs.error('Calibration groups are", "dictionary for a given frame. This is not the same", "the pypeit file. Each selected column must be a valid", "files with a given frame type. The frames must also", "write to the top of the file, on string per", "# TODO: Placeholder: Allow an empty set of configuration keys", "frame type is included. Returns: list: List of file paths", "moved into each Spectrograph # if useIDname and 'idname' not", "not yet been defined (see :func:`get_frame_types`), this method will fault!", "do this here? indx = self.type_bitmask.flagged(type_bits, flag='standard') for b, f,", "return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first file to set", "is not None: self.merge(usrdata) # Impose types on specific columns", "compatibility, but we should consider reformatting it. And it may", "in self.keys() and fill is not None: self['setup'] = fill", "table.Table([ftype_colm, fbits_colm]) if merge: self['frametype'] = t['frametype'] self['framebit'] = t['framebit']", ":obj:`list`, optional): One or more strings to write to the", "type names and the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the", "optional): A nested dictionary, one dictionary per configuration with the", "the output directory root = '{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path,", "filenames = np.asarray(data['filename']) bad_files = filenames[mjd == None] # Print", "type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to", "Boolean array selecting those frames in the table included in", "with the frame types to use in all calibration groups", "with invalid {0}.'.format(key)) good &= indx if np.all(good): # All", "configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None and the frame", "max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod def default_keys(): return", "np.any(indx): continue subtbl = self.table[output_cols][indx] # Write the file ff.write('##########################################################\\n')", "if the 'setup' isn't been defined. \"\"\" if 'setup' not", "calibration frames into sets. Requires the 'setup' column to have", "rows. merge (:obj:`bool`, optional): Merge the types and bits into", "Construct the master key for the file in the provided", "to -1. Args: assign_objects (:obj:`bool`, optional): If all of 'comb_id'", "find_frame_calib_groups(self, row): \"\"\" Find the calibration groups associated with a", "not np.all(indx): msgs.warn('Found frames with invalid {0}.'.format(key)) good &= indx", "self['dec'][indx]): if ra == 'None' or dec == 'None': msgs.warn('RA", "col): return self.table.sort(col) def merge(self, usrdata, match_type=True): \"\"\" Use the", "self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value, str)", "set in table; cannot use it for file typing.') #", "`bkg_id` that identify object and background frame pairs. write_manual (:obj:`bool`,", "have been defined. For now this is a simple grouping", "by setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A user provided set", "QA now; but could probably use the pypeit file instead", "all calibration groups (e.g., ['bias', 'dark']). default (:obj:`bool`, optional): If", "include the top-level designation of the configuration itself. Returns: dict:", "# this if the frames to meet the other checks", "file):\\n' indx = np.where(np.logical_not(good))[0] for i in indx: msg +=", "per file line; ``# `` is added to the beginning", "files in :attr:`table`. For frames that have multiple types, the", "compatibility, but we should consider reformatting/removing it. - This is", "or not a science frame is # assigned to that", "to this provided string or list of strings (e.g., ['A','C']).", "unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to determine unique", "frames with type: {0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool) for ftype", "Generate a table of frame types from the input metadata", "single calibration group *for all rows*. force (:obj:`bool`, optional): Force", "unique = j == len(self.configs) if unique: if cfg_indx ==", "in extras: if key not in columns: columns += [key]", "return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row): \"\"\" Construct the MJD of", "if requested and 'calib' doesn't exist yet if 'calib' not", "of the table to write. If None, all rows are", "clean_configurations? I didn't add it # here, because this method", "by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits is None:", "configs (:obj:`str`, :obj:`list`, optional): One or more strings used to", "relevant fits file metadata used during the reduction. The content", "Get the list of the unique configuration names. This provides", "will fault! Args: force (:obj:`bool`, optional): Force the configurations to", "the file can be written (this is here because the", "the frame type to continue if 'frametype' not in self.keys():", "usrdata[idx] # Add the directory and file name to the", "# Convert types if possible existing_keys = list(set(self.table.keys()) & set(usrdata.keys()))", "return # Alert the user that some of the frames", "to read. This function writes the columns selected by the", "user data in the table for key in usrdata.keys(): self.table[key]", "to assign to :attr:`table`. \"\"\" # Allow for single files", "# Find the number groups by searching for the maximum", "= os.path.join(output_path, root) if not os.path.isdir(odir): os.makedirs(odir) # Create the", "compatability) if key in ['ra', 'dec'] and not radec_done: ras,", "& or | ? Using idname above gets overwritten by", "set to -1. Args: assign_objects (:obj:`bool`, optional): If all of", "+ msgs.newline() + 'missed by the automatic identification.') b =", "file for PypeIt, configuring the control-flow and algorithmic parameters and", "(:obj:`str`, :obj:`list`): One or more frame types to append/overwrite. append", "of whether or not a science frame is # assigned", "# reverse order so I can always insert at the", "for setup in cfgs.keys(): # Get the subtable of frames", "internal :attr:`configs`. If this attribute is not None, this function", "Can be an array of indices or a boolean array", "# Unique configurations setups, indx = np.unique(self['setup'], return_index=True) if ignore", "PypeItMetaData.') usr_row = usrdata[idx] # Add the directory and file", "run ' 'get_frame_types.') # For each configuration, determine if any", "column does not exist if 'calibbit' in self.keys(): del self['calibbit']", "are not valid: {0}'.format( ', '.join(tbl_cols[badcol]))) # Make sure the", "return a Table with the frame types and bits. Args:", "provides just the list of setup identifiers ('A', 'B', etc.)", "``'setup'`` column is initialized, this function determines the unique instrument", "= self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value,", "Find all the frames associated with the provided calibration group.", "ftype == 'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use &", "construct the setup. det (:obj:`int`, optional): The 1-indexed detector to", "to specify 'all' here? Can't `configs is # None` mean", "is provided, the frames must also be matched to the", "the calibration groups are valid. This currently only checks that", "the configuration dictionary for a given frame. This is not", "of the configuration itself. Returns: dict: The pypeit setup dictionary", "this class; use :func:`unique_configurations` instead. Args: copy (:obj:`bool`, optional): Return", "extras = [] # comb, bkg columns if write_bkg_pairs: extras", "instantiation of PypeItMetaData.' ' The table will be empty!') #", "valid for this column. indx = np.isin(self[key], cfg_limits[key]) if not", "if sort_col is not None: if sort_col not in self.keys():", "would have been written/printed if ``output == 'table'``. Otherwise, the", "match_type=True): \"\"\" Use the provided table to supplement or overwrite", "# Deal with None's properly nones = usrdata[key] == 'None'", "if useIDname \\ # else np.ones(len(self), dtype=bool) # Include a", "format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] if ofile is None: # Output", "'directory']: if col not in tbl_cols: continue indx = np.where([t", "'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, # PypeIt orientation", "in are written; if ``'pypeit'``, the columns are the same", "time.Time(self['mjd'][row], format='mjd') def construct_basename(self, row, obstime=None): \"\"\" Construct the root", "into a DataContainer # Initially tried to subclass this from", "be done elsewhere # Check if os.path.basename(ifile) != usrdata['filename'][idx]: msgs.error('File", "data type in `usrdata` to the type in the internal", "checks. par (:obj:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code", "this meta data?? dtype = meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype", "ftype in ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx = indx[use] if", "cfg_limits.keys(): # NOTE: For now, check that the configuration values", "everything from the same configuration to the same # calibration", "instrument configuration for {0} '.format(ftype) + 'frames, configuration {0} does", "i in range(nrows): for d, cfg in _configs.items(): if row_match_config(self.table[i],", "\"\"\" Construct the setup dictionary. .. todo:: - This is", "the list of columns to be included in the fitstbl", "configuration. :attr:`table` is modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. .. todo::", "(for frame type) if usrdata is None: usr_row = None", "table can be used to set the frame type of", "in data-table format. The pypeit file is the main configuration", "unique configurations in this group, ignoring any # undefined ('None')", "are the first few columns; do them in # reverse", "self.table['setup'][i] = d # Check if any of the configurations", "to the existing data type. If it can't it will", "_check_calib_groups(self): \"\"\" Check that the calibration groups are valid. This", "exists, the configurations are **not** reset unless you call the", "to a single calibration group, if the 'calib' column does", "the string 'None', this returns all frames without a known", "the 'setup' or 'calibbit' columns haven't been defined. \"\"\" if", "TODO: Why do we need to specify 'all' here? Can't", "NoneTypes indx = output_tbl[sort_col] != None is_None = np.logical_not(indx) srt", "import string from copy import deepcopy import datetime from IPython", "def construct_basename(self, row, obstime=None): \"\"\" Construct the root name primarily", "(:obj:`list`, optional): A list of strings with the frame types", "initialize the bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit'] = 0 #", "rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None): \"\"\" Assign each frame", "also match the science frame index, if it is provided.", "exist, it is created. cfg_lines (:obj:`list`, optional): The list of", "of the groups self._set_calib_group_bits() # Check that the groups are", "been set if 'setup' in self.keys() and not force: return", "a `filename` column that is used to match to the", "paths that match the frame type and science frame ID,", "not None and len(rows) != len(self.table): raise ValueError('Boolean vector selecting", "astropy import table, coordinates, time, units from pypeit import msgs", "PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask() # Build table self.table = table.Table(data", "io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] # Write", "we're still done return # At this point, we need", "\"\"\" _obstime = self.construct_obstime(row) if obstime is None else obstime", "if not np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist() # Get the", "_configs) setups = setups[use] indx = indx[use] return setups, indx", "by the provided spectrograph class. For the data table, one", "configurations to the provided `setup` - assigns all frames to", "in global_frames: indx = np.where(self.find_frames(ftype))[0] for i in indx: self['calib'][i]", "output file. Returns: `astropy.table.Table`: The table object that would have", "not in self.keys() else self['slitlen'][row] binning = '1,1' if 'binning'", "# Copy the internal table so that it is unaltered", "radec_done = True else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include the", "optional): Add additional ``PypeIt`` columns for manual extraction Raises: PypeItError:", "contents are printed to the screen. If ``'table'``, the table", "that the user *might* add .. note:: :attr:`table` is edited", "The table with the relevant metadata for each fits file", "configurations by finding unique combinations of the items in the", "actually what is used to determine the calibration group of", "if calib_ID is not None: # Select frames in the", "np.unique(self['calibbit'].data): cbit = int(icbit) # for yaml # Skip this", "name and bits. \"\"\" # Making Columns to pad string", "instrument configurations by finding unique combinations of the items in", "not None: if len(user.keys()) != len(self): raise ValueError('The user-provided dictionary", "fitstbl matches the input configuration Args: row (astropy.table.Row): From fitstbl", "for the allowed frame types. \"\"\" # Checks if 'frametype'", "a single calibration group, if the 'calib' column does not", "values of the 'calib' column. \"\"\" # Find the number", "to set the frame type of each fits file. calib_bitmask", "have been defined; run ' 'get_frame_types.') # For each configuration,", "metadata is validated using checks specified by the provided spectrograph", "configuration Args: row (astropy.table.Row): From fitstbl config (dict): Defines the", "# Key names *must* match configuration_keys() for spectrographs setup =", "will *overwrite* any existing columns. Returns: `astropy.table.Table`: Table with two", "len(indx) == 0: msgs.error('No frames to use to define configurations!')", "data_lines = ff.getvalue().split('\\n')[:-1] # Write the file make_pypeit_file(ofiles[j], self.spectrograph.name, [],", "it print('\\n'.join(data_lines)) return None # Write the output to an", "global_frames is not None: if 'frametype' not in self.keys(): msgs.error('To", "when some frames cannot be assigned to a configuration, the", "= setups[rm] indx = indx[rm] # Restrict _configs = None", "without a type and continue. user (:obj:`dict`, optional): A dictionary", "for each science or standard frame, see :func:`set_combination_groups`. .. note::", "Additional valid keywords, depending on the processing level of the", "+ f) msgs.warn('The above file could be a twilight flat", "& set(usrdata.keys())) radec_done = False if len(existing_keys) > 0 and", "for a preconstructed # pypeit file, which should nominally follow", "definition. # This should probably go somewhere else or just", "of types \"\"\" for c,t in zip(columns, types): if c", "not using it... Args: frametype (:obj:`dict`): A dictionary with the", "the MJD of when the frame was observed. .. todo::", "ignored if `data` is also provided. This functionality is only", "of this type without a # configuration indx = (self.table['setup']", "Initialize: Flag frames with the correct ID name or start", "comb, bkg columns if write_bkg_pairs: extras += ['calib', 'comb_id', 'bkg_id']", "os import io import string from copy import deepcopy import", "MJD of the observation. If None, constructed using :func:`construct_obstime`. Returns:", "or corrupt files we still want this to run. #", "'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']: if col not in tbl_cols:", "the setup dictionary. .. todo:: - This is for backwards", "of a listed standard, # then it is probably a", "ValueError(f'Cannot sort by {sort_col}. Not a valid column.') # Ignore", "# NOT ALLOWED!! # TODO: This should be converted to", "may modify that attribute directly. The valid values for configuration", "{0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False,", "any configurations set to 'None'. If copy is True, this", "continue paths = np.unique(self['directory'][in_cfg]).tolist() # Get the data lines subtbl", "configs == ['all']: cfg_keys = list(cfg.keys()) else: _configs = configs", "is None else configs for k, cfg in _configs.items(): if", "Any frame types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for :attr:`spectrograph`", "import framematch from pypeit.core import flux_calib from pypeit.core import parse", "a valid pypeit metadata keyword, specific to :attr:`spectrograph`. Additional valid", "key in cfg_limits.keys(): # NOTE: For now, check that the", "following columns are not valid: {0}'.format( ', '.join(tbl_cols[badcol]))) # Make", "ignore = uniq == 'None' if np.sum(ignore) > 0: msgs.warn('Ignoring", "dictionary per configuration with the associated metadata for each. \"\"\"", "to always return the # correct type... if int(str(ftype_colmA.dtype)[2:]) <", "and set_calibration_groups.') det_name = self.spectrograph.get_det_name(det) return f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row):", "make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines, paths=paths) # Return return", "of PypeItMetaData. Args: files (:obj:`str`, :obj:`list`): One or more files", "set of objects (science or standard frames) to a unique", "f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') # Just to be explicit", "par if not isinstance(self.par, PypeItPar): raise TypeError('Input parameter set must", "for t in tbl_cols])[0][0] if indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx))", "self.keys(): msgs.error('Calibration groups are not set. First run set_calibration_groups.') return", "true # indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \\", "by the user. The file name and type are expected", "self.keys() else self['slitwid'][row] slitlen = 'none' if 'slitlen' not in", "# frames from a single configuration if len(setup) != 1:", "item): return self.table.__getitem__(item) def __setitem__(self, item, value): return self.table.__setitem__(item, value)", "ngroups) if grp is None: # No group selected continue", "TODO: This needs to be moved into each Spectrograph #", "detailed description of the logic. The 'calib' column has a", "will yield good PypeIt reductions. Any frames that do not", "is unaltered output_tbl = self.table.copy() # Select the output rows", "when building the metadata from the fits files. strict (:obj:`bool`,", "in configs: configs.remove('None') # Ignore frames with undefined configurations n_cfg", "ignore (:obj:`list`, optional): Ignore calibration groups in the provided list.", "# Return return np.where(indx)[0] if index else indx def find_frame_files(self,", "to return. Can be an array of indices or a", "frame types to use in all calibration groups (e.g., ['bias',", "== 'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use & or", "= None else: # TODO: This check should be done", "'{0} values.' .format(meta)) # Find the frames of this type", "be used to initialize columns that the user might add", "the groups self._set_calib_group_bits() # Check that the groups are valid", "empty or have corrupt headers:\\n' for file in bad_files: msg", ".. todo:: - Consolidate with :func:`convert_time` ? Args: row (:obj:`int`):", "!= ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the internal table so", "to the provided spectrograph are used. configs (:obj:`dict`): A dictionary", "= self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using", "self.table[c] = self.table[c].astype(t) def _build(self, files, strict=True, usrdata=None): \"\"\" Generate", "if user is not None: if len(user.keys()) != len(self): raise", "values of the metadata associated with each configuration. The metadata", "Ignore configurations in the provided list. write_bkg_pairs (:obj:`bool`, optional): Add", "now this is a simple grouping of frames with the", "self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key) & in_group ftype_in_group = self.find_frames(key) &", "it may be something to put in the relevant spectrograph", "configuration. if len(cfg_keys) == 0: self.configs = {} self.configs[cfg_iter[cfg_indx]] =", "is for backwards compatibility, but we should consider reformatting/removing it.", "additional ``PypeIt`` columns for manual extraction Raises: PypeItError: Raised if", "the unique configurations in that column. This is used to", "sort_col=None, overwrite=False, header=None): \"\"\" Write the metadata either to a", "instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types): \"\"\" Impose a", "the user *might* add .. note:: :attr:`table` is edited in", "None: if sort_col not in self.keys(): raise ValueError(f'Cannot sort by", "the frame was observed. .. todo:: - Consolidate with :func:`convert_time`", "for c in self.configs.values(): if row_match_config(self.table[i], c, self.spectrograph): break j", "not None) if ignore is not None: for key in", "is not None and os.path.isfile(ofile) and not overwrite: raise FileExistsError(f'{ofile}", "optional): The list of metadata keys to use to construct", "groups are not set. First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp)", "Returns: :obj:`str`: Master key with configuration, calibration group(s), and detector.", "converted to an assert statement... raise ValueError('CODING ERROR: Found high-dimensional", "returns all frames without a known type. calib_ID (:obj:`int`, optional):", "configuration_keys() for spectrographs setup = {skey: {'--': {'disperser': {'dispname': dispname,", "for now the DEIMOS image reader will # fault. self.set_configurations(fill=setup)", "(:obj:`int`, optional): The 1-indexed detector to include. If None, all", "_set_calib_group_bits(self): \"\"\" Set the calibration group bit based on the", "= cfg_key def clean_configurations(self): \"\"\" Ensure that configuration-defining keywords all", "'comb_id' column does not exist, this sets the combination groups", "that attribute directly. The valid values for configuration keys is", "used. configs (:obj:`dict`): A dictionary of the unique configurations identified.", "'0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) for ftype in", "the frame type by hand. Args: indx (:obj:`int`): The 0-indexed", "see :func:`set_combination_groups`. .. note:: This should only be run if", "not isinstance(self.par, PypeItPar): raise TypeError('Input parameter set must be of", "Groups have been set but the bits have not (likely", "PypeItMetaData: \"\"\" Provides a table and interface to the relevant", "{} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1 # Check", "object - sets all the configurations to the provided `setup`", "can we # make the default 'all'? if configs is", "unavailable keywords!'.format(k)) self.table['setup'] = 'None' nrows = len(self) for i", "the data reduction. \"\"\" def __init__(self, spectrograph, par, files=None, data=None,", "checks in this call. # indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)", "# No group selected continue # Assign the group; ensure", "always return the # correct type... if int(str(ftype_colmA.dtype)[2:]) < 9:", "set flag to overwrite.') # Check the rows input if", "for {0} '.format(ftype) + 'frames, configuration {0} does not have", "else configs for k, cfg in _configs.items(): if len(set(cfg.keys()) -", "return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def", "i in range(len(self)): if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) >", "'setup' not in self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot", "_header: f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') # Just to be", "None and os.path.isfile(ofile) and not overwrite: raise FileExistsError(f'{ofile} already exists;", "# meta data values indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] =", "str_i in string.ascii_uppercase for str_j in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase)", "if the frames to meet the other checks in this", "else: _configs = configs if isinstance(configs, list) else [configs] cfg_keys", "Some frame types may have been ignored ignore_frames = self.spectrograph.config_independent_frames()", "in self.keys(): msgs.error('Must have defined \\'setup\\' column first; try running", "these setups, if requested. Raises: PypeItError: Raised if the 'setup'", "0-indexed row of the frame. obstime (:class:`astropy.time.Time`, optional): The MJD", "Create the output directory root = '{0}_{1}'.format(self.spectrograph.name, setup) odir =", "column. indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx): msgs.warn('Found frames", "# Set the calibration bits for i in range(len(self)): #", "to supplement or overwrite metadata read from the file headers.", "If the output directory does not exist, it is created.", "and background frame pairs. write_manual (:obj:`bool`, optional): Add additional ``PypeIt``", "else np.ones(len(self), dtype=bool) # Include a combination of instrument-specific checks", "columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific to", "to the screen. If ``'table'``, the table that would have", "ignore=None, return_index=False, configs=None): \"\"\" Get the list of the unique", "array of 0-indexed indices instead of a boolean array. Returns:", "any of the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False", "warning:: Any frame types returned by the :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` method for", "get around this. Is it related to # this change?", "self['setup'] == setup if not np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist()", "identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set the frame", "Using the calibration bit in the keyword allows MasterFrames to", "ftype (str): The frame type identifier. See the keys for", "to a unique integer. If the 'comb_id' or 'bkg_id' columns", "the table is printed in its current state. overwrite (:obj:`bool`,", "writing, just return the modified table return output_tbl # Always", "(:obj:`bool`, optional): Overwrite any existing file with the same name.", "be empty or have corrupt headers:\\n' for file in bad_files:", "is not None: if sort_col not in self.keys(): raise ValueError(f'Cannot", "is None: output_path = os.getcwd() # Find unique configurations, always", "run if all files are from a single instrument configuration.", "indx = np.unique(self['setup'], return_index=True) if ignore is not None: #", "= None self.calib_bitmask = None # Initialize columns that the", "per configuration with the associated metadata for each. \"\"\" _cfg", "it can't it will just add the column anyway, with", "the string representation of the groups self._set_calib_group_bits() # Check that", "else columns.split(',') badcol = [col not in all_cols for col", "\"setup\" dictionary. Args: indx (:obj:`int`): The index of the table", "now the DEIMOS image reader will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True)", "[None, 'all']: tbl_cols = list(self.keys()) elif columns == 'pypeit': tbl_cols", "data directly. If neither are provided the table is instantiated", "One or more strings used to select the configurations to", "grp = parse.str2list(self['calib'][i], ngroups) if grp is None: # No", "metadata keywords in the dictionary should be the same as", "and np.all(self['comb_id'] < 0): # find_frames will throw an exception", "string.ascii_uppercase for str_j in string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) + double_alphabet", "= t['frametype'] self['framebit'] = t['framebit'] return t def edit_frame_type(self, indx,", "self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return self.set_frame_types(type_bits, merge=merge) # Loop over the frame", "without a # configuration indx = (self.table['setup'] == 'None') &", "user. The file name and type are expected to be", "return data # TODO: In this implementation, slicing the PypeItMetaData", "List of column names types (:obj:`list`): List of types \"\"\"", "configuration set to None.'.format( np.sum(ignore))) self.configs = {} for i", "header cards mjd[mjd == None] = -99999.0 isort = np.argsort(mjd)", "descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True,", "groups self._set_calib_group_bits() # Check that the groups are valid self._check_calib_groups()", "# Build lists to fill data = {k:[] for k", "the frame. Returns: astropy.time.Time: The MJD of the observation. \"\"\"", "'directory', 'filename', 'instrume' ] def keys(self): return self.table.keys() def sort(self,", "will be truncated at 4 characters. self.table['calib'] = np.full(len(self), 'None',", "calib_ID=calib_ID)) def frame_paths(self, indx): \"\"\" Return the full paths to", "Force the configurations to be redetermined. Otherwise the configurations are", "the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to grab the rtol value", "name='framebit') t = table.Table([ftype_colm, fbits_colm]) if merge: self['frametype'] = t['frametype']", "file in data-table format. The pypeit file is the main", "(:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual extraction Returns:", "force (:obj:`bool`, optional): Force the configurations to be redetermined. Otherwise", "by config_indpendent_frames are not ' \\ 'correctly defined for {0};", "cfg_keys=cfg_keys) cfg_indx += 1 # Check if any of the", "if ignore is not None: # Remove the selected configurations", "there are unidentified files, leave without a type and continue.", "configurations are **not** reset unless you call the function with", "is True. Returns: :obj:`list`: List of ``PypeIt`` files generated. \"\"\"", "must be a list.'.format(self.spectrograph.__class__.__name__) # Check that the metadata are", "some frame types to be used in all calibration groups", "def default_keys(): return [ 'directory', 'filename', 'instrume' ] def keys(self):", "Returns: numpy.array: The list of unique setup names. A second", "defined. \"\"\" if 'setup' not in self.keys() or 'calibbit' not", "= table.Column(self.type_bitmask.type_names(type_bits), dtype='U9', name='frametype') else: ftype_colm = ftype_colmA fbits_colm =", "If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not return None and the frame types", "global_frames=None, default=False, force=False): \"\"\" Group calibration frames into sets. Requires", "# TODO: Do we want to do this here? indx", "has not yet been defined. copy (:obj:`bool`, optional): Return a", "already been set if 'setup' in self.keys() and not force:", "set. First run get_frame_types.') if ftype == 'None': return self['framebit']", "each frame from that configuration with a given calibration group.", "place*. If the 'setup' column already exists, the configurations are", "format with io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1]", "image reader will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self,", "# Create the output file name ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))", "DEC must not be None for file:' + msgs.newline() +", "self.find_calib_group(calib_ID) # Return return np.where(indx)[0] if index else indx def", "the fits files. strict (:obj:`bool`, optional): Function will fault if", "optional): One or more strings used to select the configurations", "'calib' in self.keys() and 'calibbit' not in self.keys() and not", "few columns ncol = len(tbl_cols) for col in ['dithpat', 'dithpos',", "with each configuration. The output data table is identical to", "parameters used to set the code behavior. files (:obj:`str`, :obj:`list`,", "== 'None': msgs.warn('RA and DEC must not be None for", "PypeIt file. This function: - sets the frame types based", "== cbit # Find the unique configurations in this group,", "not in self.keys() else self['dichroic'][row] decker = 'none' if 'decker'", "return the dictionary with the configuration, don't include the top-level", "the metadata associated with each configuration. The metadata keywords in", "or the data directly. If neither are provided the table", "Raised if the `framebit` column is not set in the", "should be the same as in the table, and the", "else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO: Use & or | ? Using", "(:obj:`list`, optional): The list of configuration lines to include in", "string. Ignored if ``output`` does not specify an output file.", "as a list directly or as a comma-separated string. If", "{0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1]))", "= 'The following frames have configurations that cannot be reduced", "todo:: - This is for backwards compatibility, but we should", "consider reformatting/removing it. Args: ofile (:obj:`str`): Name for the output", "\"\"\" Convenience method to return :attr:`configs` with possible alterations. This", "the bitmask and initialize the bits self.calib_bitmask = BitMask(np.arange(ngroups)) self['calibbit']", "Raised if the columns to include are not valid, or", "edits back in a frame that has an # invalid", "'none' if 'decker' not in self.keys() else self['decker'][row] slitwid =", "configs: configs.remove('None') # Ignore frames with undefined configurations n_cfg =", "redetermined. Otherwise the configurations are only determined if :attr:`configs` has", "in the configuration column (A, B, C, etc), the calibration", "complicated by allowing some frame types to have no association", "for i in indx: msg += ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) #", "self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn on the relevant bits type_bits[indx]", "of file paths that match the frame type and science", "a warning if there is problem try: time.Time(data['mjd'], format='mjd') except", "'det': d, # 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only else", "\\ 'correctly defined for {0}; values must be None or", "types and bits into the existing table. This will *overwrite*", "this provided string or list of strings (e.g., ['A','C']). Returns:", "name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns, types): \"\"\" Impose a set", "to set the configuration should be the same as returned", "detector to include. If None, all detectors are included. config_only", "in indx[1:]: j = 0 for c in self.configs.values(): if", "`filename` is not a key in the provided table. \"\"\"", "data save to each file. The class is used to", "None else cfg_keys return {k:self.table[k][indx] for k in _cfg_keys} def", "self.keys() or 'calibbit' not in self.keys(): msgs.error('Cannot provide master key", "pypeit setup dictionary with the default format. Raises: PypeItError: Raised", "# pypeit_setup. If the user edits back in a frame", "instrument configurations. If run before the ``'setup'`` column is initialized,", "IDs are the last # few columns ncol = len(tbl_cols)", "are going to be # removed msg = 'The following", "0)] = str(i) # Allow some frame types to be", "ftype, calib_ID=None): \"\"\" Return the list of files with a", "instruments (e.g. VLT) where meta data may not be required", "configuration setup columns with this single identifier. \"\"\" self.get_frame_types(user=frametype) #", "here? indx = self.type_bitmask.flagged(type_bits, flag='standard') for b, f, ra, dec", "the table data['directory'][idx], data['filename'][idx] = os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx]", "indx[use] return setups, indx if return_index else setups def _get_cfgs(self,", "the calibration group, and the detector. The configuration ID is", "per configuration with the associated metadata for each. Raises: PypeItError:", "run. # Validate, print out a warning if there is", "ignore frames with type: {0}'.format(ignore_frames)) use = np.ones(len(self), dtype=bool) for", "'calibbit' not in self.keys() and not force: self._set_calib_group_bits() self._check_calib_groups() return", "return _cfg def unique_configurations(self, force=False, copy=False, rm_none=False): \"\"\" Return the", "if 'filename' not in usrdata.keys(): raise KeyError('The user-provided table must", "if not np.any(indx): continue subtbl = self.table[output_cols][indx] # Write the", "additional ``PypeIt`` columns for calib, comb_id and bkg_id write_manual (:obj:`bool`,", "files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`. Set to False to instead report a", "the keywords used to set the configuration should be the", "listed by the spectrograph ``configuration_keys`` method. If run after the", "on the configuration. This will change! # The configuration must", "cfg_key continue # Find the unique values of meta for", "spectrographs setup = {skey: {'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic':", "# meaning that the instrument setup has only one configuration.", "group, and the detector. The configuration ID is the same", "Columns for output columns = self.spectrograph.pypeit_file_keys() extras = [] #", "in config match = [] for k in config.keys(): #", "by {sort_col}. Not a valid column.') # Ignore any NoneTypes", "run get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with", "configs (e.g. grating angle) if isinstance(config[k], float): if row[k] is", "edited in place. This function can be used to initialize", "otherwise # any changes to the strings will be truncated", "taken in this configuration indx = self['setup'] == setup if", "def find_frame_files(self, ftype, calib_ID=None): \"\"\" Return the list of files", "by a given column if sort_col is not None: if", "A Table with two columns, the type names and the", ":attr:`spectrograph` will be ignored in the construction of the unique", "statement... raise ValueError('CODING ERROR: Found high-dimensional column.') #embed(header='372 of metadata')", "PypeIt reductions. Any frames that do not are removed from", "rm_none and 'None' in _cfg.keys(): del _cfg['None'] return _cfg def", "is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: msgs.error('Science frames can only", "beginning of the list for col in ['framebit', 'frametype', 'filename',", "PypeIt parameters used to set the code behavior. files (:obj:`str`,", "= self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None,", "the types should be provided as a string with comma-separated", "# TODO: The rest of this just nominally sets the", "any existing file with the same name. ignore (:obj:`list`, optional):", "format. The pypeit file is the main configuration file for", "(:obj:`int`): The 0-indexed row of the frame. obstime (:class:`astropy.time.Time`, optional):", "will be at the heart of PypeItMetaData. Args: files (:obj:`str`,", "instrument configuration. :attr:`table` is modified in-place. See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`. ..", "('A', 'B', etc.) and the row index where it first", "cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits is None: # No values", "msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format( meta_key, value))", "string with comma-separated types. setup (:obj:`str`): If the 'setup' columns", "# # flagging all as true # indx = self['idname']", "the 'setup' column already exists, the configurations are **not** reset", "else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False, force=False): \"\"\" Group calibration", "in self.keys() or 'dec' not in self.keys(): msgs.warn('Cannot associate standard", "'None' or dec == 'None': msgs.warn('RA and DEC must not", "be empty!') # Initialize internals self.spectrograph = spectrograph self.par =", "From fitstbl config (dict): Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used", "reading the header for any of the provided files; see", "for each. Raises: PypeItError: Raised if there are list of", "optional): The MJD of the observation. If None, constructed using", "frame types to have no association with an instrument configuration", "if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) for ftype in global_frames:", "0: msgs.error('No frames to use to define configurations!') # Get", "# metakey is either not set or a string assert", "file. Each selected column must be a valid pypeit metadata", "supplement or overwrite metadata read from the file headers. The", "output directory root = '{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path, root)", "Args: ftype (str): The frame type identifier. See the keys", "table that would have been printed/written to disk is returned.", "not are removed from :attr:`table`, meaning this method may modify", "in the provided list. Raises: PypeItError: Raised if the 'setup'", "default: self['calib'] = '0' # Make sure the calibbit column", "`match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A user provided set of data", "metadata object. .. todo:: - Here's where we could add", "``PypeIt`` columns for manual extraction Raises: PypeItError: Raised if the", "See ' 'usrdata argument of instantiation of PypeItMetaData.') usr_row =", "defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot provide instrument", "np import yaml from astropy import table, coordinates, time, units", "None` mean that you want all the configurations? Or can", "# Write the file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines, sorted_files=data_lines,", "# Configurations have already been set if 'setup' in self.keys()", "spectrograph class definition. # This should probably go somewhere else", "darks) if global_frames is not None: if 'frametype' not in", "self['framebit'] = t['framebit'] return t def edit_frame_type(self, indx, frame_type, append=False):", "detector. The configuration ID is the same as included in", "'calib' column does not exist - if the 'comb_id' column", "This provides just the list of setup identifiers ('A', 'B',", "# configuration indx = (self.table['setup'] == 'None') & self.find_frames(ftype) if", "this step by setting `match_type=False`. Args: usrdata (:obj:`astropy.table.Table`): A user", "Get the subtable of frames taken in this configuration indx", "row[k] is None: match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else:", "not None!) msgs.info(\"Typing completed!\") return self.set_frame_types(type_bits, merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False,", "icbit in np.unique(self['calibbit'].data): cbit = int(icbit) # for yaml #", "the configurations? Or can we # make the default 'all'?", "return if 'setup' not in self.keys() and fill is not", "if all files are from a single instrument configuration. :attr:`table`", "column that is used to match to the metadata table", "strings to write to the top of the file, on", "Take only those present output_cols = np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist()", "raise ValueError('The user-provided dictionary does not match table length.') msgs.info('Using", "frames in this group #in_group = self.find_calib_group(i) in_cbit = self['calibbit']", "for n in self['calib'][i].replace(':',',').split(',')]) # Check against current maximum ngroups", "configurations to include in the returned objects. If ``'all'``, pass", "dictionary with the configuration, don't include the top-level designation of", "unique configurations in that column. This is used to set", "ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx], self['dec'][indx]): if ra ==", "configuration file for PypeIt, configuring the control-flow and algorithmic parameters", "format. Raises: PypeItError: Raised if the 'setup' isn't been defined.", "not None: self.merge(usrdata) # Impose types on specific columns self._impose_types(['comb_id',", "include in the returned objects. If ``'all'``, pass back all", "same as those included in the pypeit file. Each selected", "for key in existing_keys: if len(self.table[key].shape) > 1: # NOT", "configuration with a given calibration group. .. todo:: - This", "an attribute of # PypeItMetaData? def row_match_config(row, config, spectrograph): \"\"\"", "provided, regardless of whether or not a science frame is", "self['slitlen'][row] binning = '1,1' if 'binning' not in self.keys() else", "of the items in the metadata table listed by the", "# few columns ncol = len(tbl_cols) for col in ['dithpat',", "you call the function with ``force=True``. Args: configs (:obj:`dict`, optional):", "user-defined frame types from the input dictionary if user is", "must have \\'filename\\' column!') # Make sure the data are", "string type to make sure that it matches with what", "'__len__') else [files] # Build lists to fill data =", "if 'None' in setup: setup.remove('None') # Make sure that each", "Ignore any NoneTypes indx = output_tbl[sort_col] != None is_None =", "msg = 'The following frames have configurations that cannot be", "`configuration_keys` method. The latter is not checked. If None, this", "from pypeit.par.util import make_pypeit_file from pypeit.bitmask import BitMask # TODO:", "= len(self) for i in range(nrows): for d, cfg in", "the spectrograph `configuration_keys` method. The latter is not checked. If", "subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width')", "groups.\"\"\" return None if self.calib_bitmask is None else self.calib_bitmask.nbits def", "comb_id and bkg_id write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns", "The 0-indexed row of the frame. obstime (:class:`astropy.time.Time`, optional): The", "None else self.calib_bitmask.nbits def set_calibration_groups(self, global_frames=None, default=False, force=False): \"\"\" Group", "Assign everything from the same configuration to the same #", "{0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And remove 'em self.table = self.table[good] def", "The bitmask used to set the frame type of each", "Also raised when some frames cannot be assigned to a", "specific frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there a", "is a simple grouping of frames with the same configuration.", "internal table. See above. Raises: TypeError: Raised if `usrdata` is", "i in range(n_cfg): self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)]", "tbl_cols.pop(indx)) # Copy the internal table so that it is", "we want to do this here? indx = self.type_bitmask.flagged(type_bits, flag='standard')", "write_bkg_pairs (:obj:`bool`, optional): When constructing the :class:`pypeit.metadata.PypeItMetaData` object, include two", "cfg, self.spectrograph): self.table['setup'][i] = d # Check if any of", "else self['dichroic'][row] decker = 'none' if 'decker' not in self.keys()", "optional): The 1-indexed detector number(s). If a tuple, it must", "use &= np.logical_not(self.find_frames(ftype)) indx = indx[use] if len(indx) == 0:", "if a vector was provided if rows is not None:", "is None: # Output file not defined so just print", "exists. Use ovewrite=True to overwrite.'.format(ofile)) # Grab output columns output_cols", "empty set of configuration keys # meaning that the instrument", "enumerate(_files): # User data (for frame type) if usrdata is", "= None if output in [None, 'table'] else output if", "run set_configurations.') # Unique configurations setups, indx = np.unique(self['setup'], return_index=True)", "the other checks in this call. # indx &= self.spectrograph.check_frame_type(ftype,", "given column if sort_col is not None: if sort_col not", "expected to be the key and value of the dictionary,", "each configuration. The output data table is identical to the", "string with comma-separated types. merge (:obj:`bool`, optional): Merge the frame", "ftype, metakey in ignore_frames.items(): # TODO: For now, use this", "# Construct file ff = open(ofile, 'w') for setup in", "manual extraction Returns: `numpy.ndarray`_: Array of columns to be used", "removed msg = 'The following frames have configurations that cannot", "groups are set to be unique for each standard and", "return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to determine unique configurations.') #", "orientation binning of a science image } } } #_det", "# pypeit file, which should nominally follow an execution of", "any data. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect", "self['filename'][ftype_in_group])] # Write it ff = open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close()", "self.keys(): msgs.error('Frame types are not set. First run get_frame_types.') if", "either the file list from which to grab the data", "directory does not exist, it is created. cfg_lines (:obj:`list`, optional):", "`comb_id` and `bkg_id` that identify object and background frame pairs.", "Add in a call to clean_configurations? I didn't add it", "cannot be assigned to a configuration, the spectrograph defined frames", "is ignored if `data` is also provided. This functionality is", "match.append(True) else: match.append(False) else: # The np.all allows for arrays", "# TODO: Do we need a calib file? def write_calib(self,", "to it: for cfg_key in _configs.keys(): in_cfg = self.table['setup'] ==", "ff.close() def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): \"\"\" Write", "match with the metadata keywords. Also raised when some frames", "self['filename'][indx]) return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self,", "- This is complicated by allowing some frame types to", "in cfgs.keys(): # Get the subtable of frames taken in", "a warning and continue. usrdata (astropy.table.Table, optional): Parsed for frametype", "set of metadata exprng = self.par['scienceframe']['exprng'] if ftype == 'science'", "list of frames of this type without a # configuration", "setups = setups[use] indx = indx[use] return setups, indx if", "has only one configuration. if len(cfg_keys) == 0: self.configs =", "combination of instrument-specific checks using # combinations of the full", "dictionary using the unique configurations in that column. This is", "Args: global_frames (:obj:`list`, optional): A list of strings with the", "is None or configs == 'all' or configs == ['all']:", "# configuration. for i in indx[1:]: j = 0 for", "a # file... return None def find_calib_group(self, grp): \"\"\" Find", "force=False, copy=False, rm_none=False): \"\"\" Return the unique instrument configurations. If", "in place. Args: columns (:obj:`list`): List of column names types", "columns, the type names and the type bits. See :class:`pypeit.core.framematch.FrameTypeBitMask`", "the table. strict (:obj:`bool`, optional): Function will fault if :func:`fits.getheader`", "overwrite (:obj:`bool`, optional): Overwrite any existing file; otherwise raise an", "PypeIt. .. include common links, assuming primary doc root is", "Return the full paths to one or more frames. Args:", "and which column to use for sorting. Args: output (:obj:`str`,", "exprng=exprng) # Turn on the relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx],", "or more frames. \"\"\" if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx])", "not in self.keys() and not force: self._set_calib_group_bits() self._check_calib_groups() return #", "the basic parameters are the first few columns; do them", "np.all allows for arrays in the Table (e.g. binning) match.append(np.all(config[k]", "= np.unique(self['setup'], return_index=True) if ignore is not None: # Remove", "'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None):", "tiso = time.Time(_obstime, format='isot') dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f') return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],", "values must be a list.'.format(self.spectrograph.__class__.__name__) # Check that the metadata", "list. Raises: PypeItError: Raised if the 'setup' or 'calibbit' columns", "For each configuration, determine if any of the frames with", "calib_ID=None): \"\"\" Return the list of files with a given", "the input configuration \"\"\" # Loop on keys in config", "\"\"\" Check that the calibration groups are valid. This currently", "MasterFrames and QA for icbit in np.unique(self['calibbit'].data): cbit = int(icbit)", "which can be specific to each instrument. Args: output_path (:obj:`str`,", "'get_frame_types.') # For each configuration, determine if any of the", "will fault if there is a problem with the reading", "around this. Is it related to # this change? #", "not None, this function simply returns :attr:`config` (cf. ``force``). ..", "a specific frame. \"\"\" return self.calib_bitmask.flagged_bits(self['calibbit'][row]) # TODO: Is there", "in_cfg = self.table['setup'] == cfg_key for ftype, metakey in ignore_frames.items():", "is also provided. This functionality is only used when building", "the data to assign to :attr:`table`. \"\"\" # Allow for", "elif columns == 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols =", "_configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i] = d # Check", "table.Table(data if files is None else self._build(files, strict=strict, usrdata=usrdata)) #", "in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header =", "PypeItError: Raised if 'setup' column is not defined, or if", "these are the root of the # MasterFrames and QA", "add a SPIT option. Args: flag_unknown (:obj:`bool`, optional): Instead of", "over the frame types for i, ftype in enumerate(self.type_bitmask.keys()): #", "str RA, DEC (backwards compatability) if key in ['ra', 'dec']", "if 'comb_id' not in self.keys(): self['comb_id'] = -1 if 'bkg_id'", "in_group ftype_in_group = self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f)", "continue. usrdata (astropy.table.Table, optional): Parsed for frametype for a few", "None: for key in cfgs.keys(): if key in ignore: del", "already been set if 'calib' in self.keys() and 'calibbit' in", "None if configs is None else np.atleast_1d(configs) # TODO: Why", "the unique values of meta for this configuration uniq_meta =", "+= [key] # Take only those present output_cols = np.array(columns)", "to grab the data from the fits headers or the", "' 'usrdata argument of instantiation of PypeItMetaData.') usr_row = usrdata[idx]", "== setup if not np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist() #", "configuration keys # meaning that the instrument setup has only", "columns (:obj:`list`): List of column names types (:obj:`list`): List of", "set the code behavior. If not provided, the default parameters", "invalid {0}.'.format(key)) good &= indx if np.all(good): # All values", "a single instrument configuration. :attr:`table` is modified in-place. See also:", "= '0' # Make sure the calibbit column does not", "== 0: msgs.error('No setups to write!') # Grab output columns", "just set all # the frames to this (first) configuration", "key in extras: if key not in columns: columns +=", "one and only one instrument ' 'configuration with a valid", "pad string array ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME", "identifier; i.e., the ' 'configuration cannot be None.') # Find", ":attr:`configs` instead of the object itself. rm_none (:obj:`bool`, optional): Remove", "can force type_names() in bitmask to always return the #", "we # don't have to do these gymnastics. Or better", "in the table with the frames to return. Can be", "to the metadata table generated within PypeIt. match_type (:obj:`bool`, optional):", "a standard star foundstd = flux_calib.find_standard_file(ra, dec, check=True) b =", "np.unique(self['setup'], return_index=True) if ignore is not None: # Remove the", "allows you to set the columns to print and which", "'manual' not in self.keys(): self['manual'] = '' def write_sorted(self, ofile,", "len(self))]) @staticmethod def default_keys(): return [ 'directory', 'filename', 'instrume' ]", "# indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \\ #", "provided table. \"\"\" meta_data_model = meta.get_meta_data_model() # Check the input", "merge: self['frametype'] = t['frametype'] self['framebit'] = t['framebit'] return t def", "' spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def _repr_html_(self): return self.table._base_repr_(html=True, max_width=-1,", "the # meta data values indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx]", "match_type (:obj:`bool`, optional): Attempt to match the data type in", "is expected that this table can be used to set", "group is the same as the calibration bit number, and", "configurations setups, indx = np.unique(self['setup'], return_index=True) if ignore is not", "this is done *after* :attr:`configs` is copied to a new", "type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the nearest standard star", "metadata either to a file or to the screen. The", "calibration bit in the keyword allows MasterFrames to be used", "# 'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only else setup def", "# Check the file can be written (this is here", "standard frames) to a unique integer. If the 'comb_id' or", "to be either undefined or to be unique for each", "import io import string from copy import deepcopy import datetime", "\"\"\" Generate a table of frame types from the input", "the header keyword data to include in the table and", "a # configuration indx = (self.table['setup'] == 'None') & self.find_frames(ftype)", "few columns; do them in # reverse order so I", "data and files are None in the instantiation of PypeItMetaData.'", "you want all the configurations? Or can we # make", "optional): Return a deep copy of :attr:`configs` instead of the", "with io.StringIO() as ff: subtbl.write(ff, format='ascii.fixed_width') data_lines = ff.getvalue().split('\\n')[:-1] #", "should typically provide either the file list from which to", "if 'frametype' not in self.keys(): msgs.error('To set global frames, types", "or have corrupt headers:\\n' for file in bad_files: msg +=", "column; ' 'run set_configurations.') dispname = 'none' if 'dispname' not", "problem try: time.Time(data['mjd'], format='mjd') except ValueError: mjd = np.asarray(data['mjd']) filenames", "the user. The file name and type are expected to", "Shape must match the number of the rows in the", "__repr__(self): return self.table._base_repr_(html=False, descr_vals=['PypeItMetaData:\\n', ' spectrograph={0}\\n'.format( self.spectrograph.name), ' length={0}\\n'.format(len(self))]) def", ":attr:`configs`. Raises: PypeItError: Raised if the 'setup' isn't defined and", "the output sorted file. overwrite (:obj:`bool`, optional): Overwrite any existing", "types must have been defined; ' 'run get_frame_types.') calibs =", "Raised if `usrdata` is not an `astropy.io.table.Table` KeyError: Raised if", "set all # the frames to this (first) configuration self.table['setup'][indx]", "frames to a single calibration group, if the 'calib' column", "# producing/using the *.calib file. _cfg = {} for setup", "were # correctly assigned in the spectrograph class definition. #", "msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None,", "types from the input metadata object. .. todo:: - Here's", "of the object itself. rm_none (:obj:`bool`, optional): Remove any configurations", "str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase] cfg_iter", "file. _cfg = {} for setup in cfg.keys(): _cfg[setup] =", "'run set_configurations.') dispname = 'none' if 'dispname' not in self.keys()", "with two digits (the maximum number of detectors is 99).", "the same calibration group indx &= self.find_calib_group(calib_ID) # Return return", "this call. # indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx =", "will return an astropy.table.Table, not a PypeItMetaData object. def __getitem__(self,", "table to supplement or overwrite the metadata. If the internal", "have dtype=object, otherwise # any changes to the strings will", "VLT) where meta data may not be required Returns: dict:", "(:obj:`bool`, optional): Return a deep copy of :attr:`configs` instead of", "The MJD of the observation. \"\"\" return time.Time(self['mjd'][row], format='mjd') def", "instrument setup has only one configuration. if len(cfg_keys) == 0:", "indx[1:]: j = 0 for c in self.configs.values(): if row_match_config(self.table[i],", "k, cfg in _configs.items(): if len(set(cfg.keys()) - set(self.keys())) > 0:", "The internal table is edited *in place*. If the 'setup'", "used to supplement or overwrite metadata read from the file", "if 'dispname' not in self.keys() else self['dispname'][row] dispangle = 'none'", "= indx[rm] # Restrict _configs = None if configs is", "provided spectrograph. It is expected that this table can be", "explicit that the method returns None when writing to a", "'{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0], self['target'][row].replace(\" \", \"\"), self.spectrograph.camera, datetime.datetime.strftime(dtime, '%Y%m%dT'), tiso.value.split(\"T\")[1].replace(':','')) def get_setup(self,", "type can be anything allowed by the instantiation of :class:`astropy.table.Table`.", "the reading the header for any of the provided files;", "and split is True. Returns: :obj:`list`: List of ``PypeIt`` files", "One or more strings to write to the top of", "in the provided row. The master key is the combination", "setups def _get_cfgs(self, copy=False, rm_none=False): \"\"\" Convenience method to return", "optional): Add additional ``PypeIt`` columns for calib, comb_id and bkg_id", "frame types have not been set yet. \"\"\" # Configurations", "only those present output_cols = np.array(columns) return output_cols[np.isin(output_cols, self.keys())].tolist() def", "of the frame. obstime (:class:`astropy.time.Time`, optional): The MJD of the", "been printed/written to disk is returned. Otherwise, the string is", "must match the number of the rows in the table.", "NOT ALLOWED!! # TODO: This should be converted to an", "either not set or a string assert metakey is None", "already contains the column in `usrdata`, the function will try", "_cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys return", "define configurations!') # Get the list of keys to use", "self.spectrograph.configuration_keys() # Configuration identifiers are iterations through the # upper-case", "ignored in the determination of the unique configurations, but the", "indx = self['filename'] == ifile type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) return", "each fits file. calib_bitmask (:class:`BitMask`): The bitmask used to keep", "make the default 'all'? if configs is not None and", "types. \"\"\" # Checks if 'frametype' in self.keys() or 'framebit'", "f\"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}\" def construct_obstime(self, row): \"\"\" Construct the MJD of when", "directory .. include:: ../include/links.rst \"\"\" import os import io import", "files without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't", "list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames)) use =", "Raised if none of the keywords in the provided configuration", "with the default format. Raises: PypeItError: Raised if the 'setup'", "Overwrite any existing file with the same name. ignore (:obj:`list`,", "None, this function simply returns :attr:`config` (cf. ``force``). .. warning::", "is not defined, or if `global_frames` is provided but the", "for i in range(n_cfg): self['calib'][(self['setup'] == configs[i]) & (self['framebit'] >", "those included in the pypeit file. Each selected column must", "matches with what can be read from the pypeit file.", "= np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close() #", "bit number, and the detector number is provided as an", "if none of the keywords in the provided configuration match", "setup if not np.any(in_cfg): continue paths = np.unique(self['directory'][in_cfg]).tolist() # Get", "the screen. If ``'table'``, the table that would have been", "usrdata['filename'][idx]: msgs.error('File name list does not match user-provided metadata table.", "filename, frametype, framebit, setup, calib, and calibbit. sort_col (:obj:`str`, optional):", "to ignore but the frame types have not been defined", "frame types have not been defined yet. \"\"\" if self.configs", "try running set_configurations.') configs = np.unique(self['setup'].data).tolist() if 'None' in configs:", "in config.keys(): # Deal with floating configs (e.g. grating angle)", "{h}\\n') f.write('\\n') f.write('\\n'.join(data_lines)) f.write('\\n') # Just to be explicit that", "not been set yet. \"\"\" # Configurations have already been", "self['dichroic'][row] decker = 'none' if 'decker' not in self.keys() else", "&= indx if np.all(good): # All values good, so we're", "files are from a single instrument configuration. :attr:`table` is modified", "value: value = value.replace('#', '') msgs.warn('Removing troublesome # character from", "leave without a type and continue. user (:obj:`dict`, optional): A", "if assign_objects and np.all(self['comb_id'] < 0): # find_frames will throw", "import numpy as np import yaml from astropy import table,", "[os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])] def set_frame_types(self, type_bits, merge=True):", "the output rows if a vector was provided if rows", "the 'setup' isn't been defined. \"\"\" if 'setup' not in", "list of strings with the frame types to use in", "all of 'comb_id' values are less than 0 (meaning they're", "of the configurations are not set not_setup = self.table['setup'] ==", "self.keys() and not force: self._set_calib_group_bits() self._check_calib_groups() return # TODO: The", "# Just to be explicit that the method returns None", "to use cfg_keys = self.spectrograph.configuration_keys() # Configuration identifiers are iterations", "be truncated at 4 characters. self.table['calib'] = np.full(len(self), 'None', dtype=object)", "else: usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include the user data in", "type. If it can't it will just add the column", "write calibration groups without \\'setup\\' and \\'calibbit\\' ' 'columns; run", "import flux_calib from pypeit.core import parse from pypeit.core import meta", "initialized, this function determines the unique instrument configurations by finding", "*overwrite* any existing columns. Returns: `astropy.table.Table`: Table with two columns,", "for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']: if", "if key in ignore: del cfgs[key] # Construct file ff", "= [key for key in cfg.keys() if key in _configs]", "'none' if 'dichroic' not in self.keys() else self['dichroic'][row] decker =", "valid, or if the column to use for sorting is", "are valid for this column. indx = np.isin(self[key], cfg_limits[key]) if", "int, str]) # Initialize internal attributes self.configs = None self.calib_bitmask", "{0}; values must be a list.'.format(self.spectrograph.__class__.__name__) # Check that the", "== 'None' if np.sum(ignore) > 0: msgs.warn('Ignoring {0} frames with", "cfg_key for ftype, metakey in ignore_frames.items(): # TODO: For now,", "configuration keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits = self.spectrograph.valid_configuration_values()", "not in self.keys() or 'dec' not in self.keys(): msgs.warn('Cannot associate", "0 for i in range(len(self)): if self['calib'][i] in ['all', 'None']:", "dichroic, 'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen}, 'binning': binning, # PypeIt", "np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is not None: if", "foundstd = flux_calib.find_standard_file(ra, dec, check=True) b = self.type_bitmask.turn_off(b, flag='science' if", "row_match_config(self.table[i], c, self.spectrograph): break j += 1 unique = j", "isinstance(metakey, str), \\ 'CODING ERROR: metadata keywords set by config_indpendent_frames", "more 0-indexed rows in the table with the frames to", "ValueError('Boolean vector selecting output rows has incorrect length.') # Get", "the output file. Can be provided as a list directly", "ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root)) # Get the setup lines setup_lines", "If None, set to current directory. If the output directory", "Make sure the data are correctly ordered srt = [np.where(f", "index=True, with the rows that contain the frames of the", "BitMask # TODO: Turn this into a DataContainer # Initially", "frame types for i, ftype in enumerate(self.type_bitmask.keys()): # # Initialize:", "Raises: PypeItError: Raised if the 'setup' isn't been defined. \"\"\"", "set if 'calib' in self.keys() and 'calibbit' in self.keys() and", "cbit # Find the unique configurations in this group, ignoring", "in table; cannot use it for file typing.') # Start", "range(nrows): for d, cfg in _configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph):", "key in meta_data_model.keys(): # Is this meta data?? dtype =", "cfg_key def clean_configurations(self): \"\"\" Ensure that configuration-defining keywords all have", "of the correct length. Returns: list: List of the full", "types if possible existing_keys = list(set(self.table.keys()) & set(usrdata.keys())) radec_done =", "= os.getcwd() # Find unique configurations, always ignoring any 'None'", "This function writes the columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which", "Is it related to # this change? # http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 #", "provided as a list directly or as a comma-separated string.", "identifier. \"\"\" self.get_frame_types(user=frametype) # TODO: Add in a call to", "file name. If None, the table contents are printed to", "to :attr:`table`. \"\"\" # Allow for single files _files =", "and/or to initialize the combination groups to the set of", "so we're done return good = np.ones(len(self), dtype=bool) for key", "set_calibration_groups.') if os.path.isfile(ofile) and not overwrite: msgs.error('{0} already exists. Use", "the configuration setup columns with this single identifier. \"\"\" self.get_frame_types(user=frametype)", "meta data values indx &= np.isin(self.table[metakey], uniq_meta) self.table['setup'][indx] = cfg_key", "of unique setup names. A second returned object provides the", "In this implementation, slicing the PypeItMetaData object # will return", "ff.getvalue().split('\\n')[:-1] # Write the file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines, setup_lines=setup_lines,", "are provided the table is instantiated without any data. Args:", "table. See above. Raises: TypeError: Raised if `usrdata` is not", "provided list. return_index (:obj:`bool`, optional): Return row indices with the", "that cannot be reduced by PypeIt' \\ ' and will", "IPython import embed import numpy as np import yaml from", "we should consider reformatting/removing it. Args: ofile (:obj:`str`): Name for", "the columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific", "included in the selected calibration group. Raises: PypeItError: Raised if", "type to make sure that it matches with what can", "directly. If neither are provided the table is instantiated without", "http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3 # # See also: # # http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode # #", "name list does not match user-provided metadata table. See '", "extras: if key not in columns: columns += [key] #", "than {0} configurations!'.format(len(cfg_iter))) self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1", "to the top of the file, on string per file", "any validation checks. par (:class:`pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set", "Find the number groups by searching for the maximum number", "provided as an argument and converted to a zero-filled string", "if possible existing_keys = list(set(self.table.keys()) & set(usrdata.keys())) radec_done = False", "string.ascii_uppercase] cfg_iter = list(string.ascii_uppercase) + double_alphabet cfg_indx = 0 #", "= self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for d,f", "merge (:obj:`bool`, optional): Merge the types and bits into the", "Get the list of keys to use cfg_keys = self.spectrograph.configuration_keys()", "calibration # group if 'setup' not in self.keys(): msgs.error('Must have", "PypeItPar): raise TypeError('Input parameter set must be of type PypeItPar.')", "have not been defined yet. \"\"\" if self.configs is not", "file in the provided row. The master key is the", "returns :attr:`config` (cf. ``force``). .. warning:: Any frame types returned", "(first) configuration self.table['setup'][indx] = cfg_key continue # Find the unique", "files\") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame", "columns if isinstance(columns, list) else columns.split(',') badcol = [col not", "table.Table): raise TypeError('Must provide an astropy.io.table.Table instance.') if 'filename' not", "`configuration_keys` of :attr:`spectrograph` is used. Returns: dict: A dictionary with", "and 'calib' doesn't exist yet if 'calib' not in self.keys()", "the user might add self.set_user_added_columns() # Validate instrument name self.spectrograph.vet_instrument(self.table)", "be removed from the metadata table (pypeit file):\\n' indx =", "of detectors is 99). Using the calibration bit in the", "dictated by the header keywords specified for the provided spectrograph.", "`usrdata` to the type in the internal table. See above.", "1: msgs.warn('When setting the instrument configuration for {0} '.format(ftype) +", "' 'run get_frame_types.') calibs = '0' if n_cfg == 1", "ff = open(ofile, 'w') for setup in cfgs.keys(): # Get", "the metadata table. The internal table is edited *in place*.", "# Use the user-defined frame types from the input dictionary", "standard, # then it is probably a standard star foundstd", "but we should consider reformatting/removing it. - This is complicated", "to a file or to the screen. The method allows", "type. calib_ID (:obj:`int`, optional): Index of the calibration group that", "self.keys(): del self.table['frametype'] if 'framebit' in self.keys(): del self.table['framebit'] #", "if ignore_frames is not None: if 'frametype' not in self.keys():", "into the exiting table. Returns: :obj:`astropy.table.Table`: A Table with two", "group columns, and/or to initialize the combination groups to the", "# Construct the setups dictionary cfg = self.unique_configurations(copy=True, rm_none=True) #", "list of columns to include in the output file. Can", "characters. self.table['calib'] = np.full(len(self), 'None', dtype=object) for i in range(n_cfg):", "_repr_html_(self): return self.table._base_repr_(html=True, max_width=-1, descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\\n'.format( self.spectrograph.name, len(self))]) @staticmethod", "to append/overwrite. append (:obj:`bool`, optional): Append the frame type. If", "ERROR: valid_configuration_values is not correctly defined ' \\ 'for {0};", "Initially tried to subclass this from astropy.table.Table, but that #", "unique configurations.') # If the frame types have been set,", "rows*. force (:obj:`bool`, optional): Force the calibration groups to be", "Remove the selected configurations to ignore rm = np.logical_not(np.isin(setups, ignore))", "``force=True``. Args: configs (:obj:`dict`, optional): A nested dictionary, one dictionary", "not be required Returns: dict: Dictionary with the data to", "from which to grab the data from the fits headers", "= self.unique_configurations() if configs is None else configs for k,", "function writes the columns selected by the :func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can", "self.table['framebit'] # # TODO: This needs to be moved into", "self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only else setup def get_configuration_names(self, ignore=None,", "handles the fits metadata required by PypeIt. .. include common", "else: all_cols = list(self.keys()) tbl_cols = columns if isinstance(columns, list)", "do not exist, they're set to -1. Args: assign_objects (:obj:`bool`,", "and not force: return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys():", "to determine the calibration # group if 'setup' not in", "a single calibration group *for all rows*. force (:obj:`bool`, optional):", "frame types.') for ifile,ftypes in user.items(): indx = self['filename'] ==", "the first unique configuration self.configs = {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0],", "# None` mean that you want all the configurations? Or", "= ['None']*len(_files) data['filename'] = ['None']*len(_files) # Build the table for", "datetime from IPython import embed import numpy as np import", "a frame that has an # invalid key, at least", "not_setup = self.table['setup'] == 'None' if not np.any(not_setup): # All", "up (note that this is called above if user is", "Add additional ``PypeIt`` columns for manual extraction Raises: PypeItError: Raised", "table, are directory, filename, frametype, framebit, setup, calib, and calibbit.", "included. index (:obj:`bool`, optional): Return an array of 0-indexed indices", "self.keys() and 'calibbit' in self.keys() and not force: return #", "metadata used during the reduction. The content of the fits", "table. The type can be anything allowed by the instantiation", "the provided table to supplement or overwrite the metadata. If", "the user that the matching meta values are not #", "the directory and file name to the table data['directory'][idx], data['filename'][idx]", "This will *overwrite* any existing columns. Returns: `astropy.table.Table`: Table with", "import table, coordinates, time, units from pypeit import msgs from", "\"\"\" Find the rows with the associated frame type. If", "0 (meaning they're unassigned), the combination groups are set to", "in the same calibration group indx &= self.find_calib_group(calib_ID) # Return", "open(ofile, 'w') ff.write(yaml.dump(utils.yamlify(cfg))) ff.close() def write_pypeit(self, output_path=None, cfg_lines=None, write_bkg_pairs=False, write_manual=False,", "header is not None: _header = header if isinstance(header, list)", "meta_key, required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and", "the metadata values from the selected row. \"\"\" _cfg_keys =", "probably use the pypeit file instead Args: ofile (:obj:`str`): Name", "row of the specified frame type is included. Returns: list:", "optional): Index of the calibration group that it must match.", "the specified frame type is included. Returns: list: List of", "calibration group. TODO: Is this appropriate for NIR data? \"\"\"", "not None and not force: return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup'", "if 'setup' not in self.keys(): msgs.error('Cannot provide instrument setup without", "can only be assigned to a single calibration group.') @property", "DEIMOS image reader will # fault. self.set_configurations(fill=setup) self.set_calibration_groups(default=True) self.set_combination_groups() def", "for ignored frames, types must have been defined; run '", "self.type_bitmask.flagged(self['framebit'], ftype) if calib_ID is not None: # Select frames", "If None, the table contents are printed to the screen.", "a viable mosaic for :attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master", "from :func:`unique_configurations` because the latter determines and provides the configurations", "configuring the control-flow and algorithmic parameters and listing the data", "reformatting/removing it. - This is complicated by allowing some frame", "required by PypeIt. .. include common links, assuming primary doc", "have not yet been defined (see :func:`get_frame_types`), this method will", "dtype = meta_data_model[key]['dtype'] else: dtype = self.table[key].dtype # Deal with", "I'm just not using it... Args: frametype (:obj:`dict`): A dictionary", "used to set the configuration should be the same as", "is None: # Nope, we're still done return # At", "= self['setup'] == setup if not np.any(indx): continue subtbl =", "here because the spectrograph # needs to be defined first)", "columns with this single identifier. \"\"\" self.get_frame_types(user=frametype) # TODO: Add", "list directly or as a comma-separated string. If None or", "Impose types on specific columns self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int,", "_header = header if isinstance(header, list) else [header] for h", "set but the bits have not (likely because the #", "def find_frame_calib_groups(self, row): \"\"\" Find the calibration groups associated with", "self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn on", "astropy.time.Time: The MJD of the observation. \"\"\" return time.Time(self['mjd'][row], format='mjd')", "indx (:obj:`int`): The 0-indexed row in the table to edit", "always ignoring any 'None' # configurations... cfg = self.unique_configurations(copy=True, rm_none=True)", "in self.keys(): msgs.info('Setup column already set. Finding unique configurations.') uniq,", "files if hasattr(files, '__len__') else [files] # Build lists to", "set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\" Generate the list of columns to", "have been ignored in the determination of the unique configurations,", "= table.Column(self.type_bitmask.type_names(type_bits), name='frametype') # KLUDGE ME # # TODO: It", "self['framebit'] == 0 # Select frames indx = self.type_bitmask.flagged(self['framebit'], ftype)", ":attr:`spectrograph`; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. Returns: :obj:`str`: Master key with configuration, calibration", "the number groups by searching for the maximum number #", "without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): msgs.info(\"Couldn't identify", "frames to return. Can be an array of indices or", "group must be from one and only one instrument '", "of strings (e.g., ['A','C']). Returns: numpy.array: The list of unique", "return_index=True) ignore = uniq == 'None' if np.sum(ignore) > 0:", "self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys) cfg_indx += 1 msgs.info('Found {0} unique", "been defined yet. \"\"\" if self.configs is not None and", "If the index is provided, the frames must also be", "``PypeIt`` columns for manual extraction Returns: `numpy.ndarray`_: Array of columns", "self.keys(): self.table[c] = self.table[c].astype(t) def _build(self, files, strict=True, usrdata=None): \"\"\"", "is added to the beginning of each string. Ignored if", "merge=True): \"\"\" Set and return a Table with the frame", "configuration column (A, B, C, etc), the calibration group is", "not in self.keys() else self['dispname'][row] dispangle = 'none' if 'dispangle'", "in the table, and the keywords used to set the", "of the specified frame type is included. Returns: list: List", "keywords, depending on the processing level of the metadata table,", "with multiple calibration groups. Args: row (:obj:`int`): The 0-indexed row", "given frame. This is not the same as the backwards", "be a valid pypeit metadata keyword, specific to :attr:`spectrograph`. Additional", "'slitwid' not in self.keys() else self['slitwid'][row] slitlen = 'none' if", "== ['all']: cfg_keys = list(cfg.keys()) else: _configs = configs if", "(:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set the frame type of", "} #_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det]", "of the unique configurations, but the frame types have not", "meta values are not # unique for this configuration. if", "root name for file output. \"\"\" _obstime = self.construct_obstime(row) if", "merge=True): \"\"\" Generate a table of frame types from the", "producing/using the *.calib file. _cfg = {} for setup in", "be present to determine the calibration # group if 'setup'", "self.keys(): msgs.error('Cannot write calibration groups without \\'setup\\' and \\'calibbit\\' '", "-99999.0 isort = np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n')", "data is None and files is None: # Warn that", "identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. If set to the", "group bit based on the string values of the 'calib'", "configuration lines to include in the file. If None are", "root = '{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path, root) if not", "because this method is only called for a preconstructed #", "spectrograph class. For the data table, one should typically provide", "on the provided object - sets all the configurations to", "optional): Remove any configurations set to 'None'. If copy is", "the construction of the unique configurations. If :func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames` does not", "method for :attr:`spectrograph` will be ignored in the construction of", "tbl_cols])[0][0] if indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure", "def get_configuration(self, indx, cfg_keys=None): \"\"\" Return the configuration dictionary for", "for manual extraction Returns: `numpy.ndarray`_: Array of columns to be", "type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame types", "return an astropy.table.Table, not a PypeItMetaData object. def __getitem__(self, item):", "the 'calib' column does not exist - if the 'comb_id'", "to the metadata table generated within PypeIt. **Note**: This is", "configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to determine unique configurations.')", "None: # Select frames in the same calibration group indx", "try: time.Time(data['mjd'], format='mjd') except ValueError: mjd = np.asarray(data['mjd']) filenames =", "is True, this is done *after* :attr:`configs` is copied to", "metadata for each. Raises: PypeItError: Raised if there are list", "# Create the output directory root = '{0}_{1}'.format(self.spectrograph.name, setup) odir", "in `usrdata`. You can avoid this step by setting `match_type=False`.", "but we should consider reformatting/removing it. Args: ofile (:obj:`str`): Name", "the unique instrument configurations by finding unique combinations of the", "usrdata=None, strict=True): if data is None and files is None:", "= max(l+1, ngroups) # Define the bitmask and initialize the", "``# `` is added to the beginning of each string.", "`astropy.table.Table`: The table object that would have been written/printed if", "must match the existing number of table rows. merge (:obj:`bool`,", "if isinstance(configs, list) else [configs] cfg_keys = [key for key", "rm_none=True) # TODO: We should edit the relevant follow-on code", "some frames cannot be assigned to a configuration, the spectrograph", "known type. calib_ID (:obj:`int`, optional): Index of the calibration group", "only have one calibration group # Assign everything from the", "dictionary, one dictionary per configuration with the associated metadata for", "if `global_frames` is provided but the frame types have not", "file) if 'calib' in self.keys() and 'calibbit' not in self.keys()", "return if columns in [None, 'all']: tbl_cols = list(self.keys()) elif", "if self['calib'][i] in ['all', 'None']: # No information, keep going", "biases and darks) if global_frames is not None: if 'frametype'", "np.sum(ignore) > 0: msgs.warn('Ignoring {0} frames with configuration set to", "are valid. This currently only checks that the science frames", "double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for", "The table will be empty!') # Initialize internals self.spectrograph =", "optional): One or more strings to write to the top", "above file could be a twilight flat frame that was'", "output. If None, the table is printed in its current", "# Grab Meta for meta_key in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr,", "removed. assert isinstance(cfg_limits[key], list), \\ 'CODING ERROR: valid_configuration_values is not", "A user provided set of data used to supplement or", "be reduced by PypeIt' \\ ' and will be removed", "type in `usrdata` to the type in the internal table.", "level=1)) + '\\n') ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy() # Deal with", "self.table, exprng=exprng) # Turn on the relevant bits type_bits[indx] =", "the 'calib' column is not present, set a single calibration", "'None' # configurations... cfg = self.unique_configurations(copy=True, rm_none=True) # Get the", "if isinstance(config[k], float): if row[k] is None: match.append(False) elif np.abs(config[k]-row[k])/config[k]", "calibration group # Assign everything from the same configuration to", "first) ofile = None if output in [None, 'table'] else", "None: # Warn that table will be empty msgs.warn('Both data", "0: msgs.error('Configuration {0} defined using unavailable keywords!'.format(k)) self.table['setup'] = 'None'", "cfg_iter = list(string.ascii_uppercase) + double_alphabet cfg_indx = 0 # TODO:", "the # MasterFrames and QA for icbit in np.unique(self['calibbit'].data): cbit", "file to set the first unique configuration self.configs = {}", "- This is for backwards compatibility, but we should consider", "bitmask used to keep track of the calibration group bits.", "cfg_lines=None, write_bkg_pairs=False, write_manual=False, configs=None): \"\"\" Write a pypeit file in", "of configuration lines to include in the file. If None", "relevant bits type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype) # Find the nearest", "os.getcwd() # Find unique configurations, always ignoring any 'None' #", "# Initially tried to subclass this from astropy.table.Table, but that", "groups by searching for the maximum number # provided, regardless", "if 'setup' column is not defined, or if `global_frames` is", "table in ascii format with io.StringIO() as ff: output_tbl.write(ff, format='ascii.fixed_width')", "'calib', 'comb_id', 'bkg_id']: if col not in tbl_cols: continue indx", "not an attribute of # PypeItMetaData? def row_match_config(row, config, spectrograph):", "if row[k] is None: match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True)", "subtbl[isort] subtbl.write(ff, format='ascii.fixed_width') ff.write('##end\\n') ff.close() # TODO: Do we need", "parameters specific to the provided spectrograph are used. configs (:obj:`dict`):", "file to which to write the table contents. rows (`numpy.ndarray`_,", "latter is not checked. If None, this is set by", "# TODO: Science frames can only have one calibration group", "object # will return an astropy.table.Table, not a PypeItMetaData object.", "add self.set_user_added_columns() # Validate instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self, columns,", ":obj:`list`): One or more frame types to append/overwrite. append (:obj:`bool`,", "for key in cfg_limits.keys(): # NOTE: For now, check that", "types should be assigned to it: for cfg_key in _configs.keys():", "to a zero-filled string with two digits (the maximum number", "sure the basic parameters are the first few columns; do", "assigned to that group. ngroups = 0 for i in", "was observed. .. todo:: - Consolidate with :func:`convert_time` ? Args:", "= np.where(np.logical_not(good))[0] for i in indx: msg += ' {0}\\n'.format(self['filename'][i])", "# Make sure the dithers and combination and background IDs", "table already contains the column in `usrdata`, the function will", "file not defined so just print it print('\\n'.join(data_lines)) return None", "etc), the calibration group is the same as the calibration", "note:: This should only be run if all files are", "\"\"\" Provides a table and interface to the relevant fits", "{'--': {'disperser': {'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker': decker,", "= np.where(self.find_frames(ftype))[0] for i in indx: self['calib'][i] = calibs #", "> 0 and match_type: for key in existing_keys: if len(self.table[key].shape)", "the frames are going to be # removed msg =", "include. If None, all detectors are included. config_only (:obj:`bool`, optional):", "return ofiles def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False, header=None):", "and default: self['calib'] = '0' # Make sure the calibbit", "one or more frames. Args: indx (:obj:`int`, array-like): One or", "_obstime = self.construct_obstime(row) if obstime is None else obstime tiso", "is probably a standard star foundstd = flux_calib.find_standard_file(ra, dec, check=True)", "table will be empty!') # Initialize internals self.spectrograph = spectrograph", "TODO: Placeholder: Allow an empty set of configuration keys #", "indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)", "framematch.FrameTypeBitMask() # Build table self.table = table.Table(data if files is", "but we should consider reformatting it. And it may be", "optional): Just return the dictionary with the configuration, don't include", "column to use for sorting. Args: output (:obj:`str`, optional): Output", "grp) def find_frame_calib_groups(self, row): \"\"\" Find the calibration groups associated", "i in indx: msg += ' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And", "return output_tbl # Always write the table in ascii format", "TODO: This check should be done elsewhere # Check if", "the relevant spectrograph class. Args: row (:obj:`int`): The 0-indexed row", "any changes to the strings will be truncated at 4", "numpy as np import yaml from astropy import table, coordinates,", "for backwards compatibility, but we should consider reformatting/removing it. -", "science frames without sky coordinates.') else: # TODO: Do we", "pypeit files ofiles = [None]*len(cfg_keys) for j,setup in enumerate(cfg_keys): #", "usrdata.keys(): self.table[key] = usrdata[key][srt] def finalize_usr_build(self, frametype, setup): \"\"\" Finalize", "ff.write('##end\\n') ff.close() # TODO: Do we need a calib file?", "None else: # TODO: This check should be done elsewhere", "file. The class is used to provide the header keyword", "set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits is", "flag to overwrite.') # Check the rows input if rows", "all existing frame types are overwitten by the provided type.", "= np.where([t == col for t in tbl_cols])[0][0] if indx", "row (astropy.table.Row): From fitstbl config (dict): Defines the configuration spectrograph", "(:obj:`int`): The index of the table row to use to", "merge=merge) def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\" Generate the list of", "is used to match to the metadata table generated within", "for single files _files = files if hasattr(files, '__len__') else", "be None or a string.'.format( self.spectrograph.__class__.__name__) # Get the list", "the input dictionary if user is not None: if len(user.keys())", "we still want this to run. # Validate, print out", "' \\ 'correctly defined for {0}; values must be None", "for idx, ifile in enumerate(_files): # User data (for frame", "write_sorted(self, ofile, overwrite=True, ignore=None, write_bkg_pairs=False, write_manual=False): \"\"\" Write the sorted", "been defined. \"\"\" if 'setup' not in self.keys(): msgs.error('Cannot get", "use = np.ones(len(self), dtype=bool) for ftype in ignore_frames: use &=", "and detector. Raises: PypeItError: Raised if the 'setup' or 'calibbit'", "The list of configuration lines to include in the file.", "the associated values of the metadata associated with each configuration.", "self['framebit'][indx] = 0 self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx])", "start by # # flagging all as true # indx", "self.table['calib'] = np.full(len(self), 'None', dtype=object) for i in range(n_cfg): self['calib'][(self['setup']", "not in columns: columns += [key] # Take only those", "rows that contain the frames of the requested type. Raises:", "array if index=True, with the rows that contain the frames", "with comma-separated types. merge (:obj:`bool`, optional): Merge the frame typing", "self.keys(): msgs.error('Cannot write sorted instrument configuration table without \\'setup\\' '", "frametype (:obj:`dict`): A dictionary with the types designated by the", "def _build(self, files, strict=True, usrdata=None): \"\"\" Generate the fitstbl that", "# unique for this configuration. if uniq_meta.size != 1: msgs.warn('When", "each calibration group should only contain # frames from a", "clean_configurations(self): \"\"\" Ensure that configuration-defining keywords all have values that", "Args: indx (:obj:`int`): The index of the table row to", "group. TODO: Is this appropriate for NIR data? \"\"\" is_science", "provided string or list of strings (e.g., ['A','C']). See :attr:`configs`.", "with the types designated by the user. The file name", "does not exist if 'calibbit' in self.keys(): del self['calibbit'] #", "configurations set to 'None'. If copy is True, this is", "Check against current maximum ngroups = max(l+1, ngroups) # Define", "find_frame_files(self, ftype, calib_ID=None): \"\"\" Return the list of files with", "printed in its current state. overwrite (:obj:`bool`, optional): Overwrite any", "used during the reduction. The content of the fits table", "one should typically provide either the file list from which", "to supplement or overwrite the metadata. If the internal table", "in range(n_cfg): self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] =", "The number of keys therefore *must* match the number of", "optional): Ignore configurations in the provided list. return_index (:obj:`bool`, optional):", "is not None: for key in cfgs.keys(): if key in", "TODO: Do we need a calib file? def write_calib(self, ofile,", "1 # Check if any of the other files show", "calibration bit names as these are the root of the", "Assign the group; ensure the integers are unique self['calibbit'][i] =", "Groups have already been set if 'calib' in self.keys() and", "\"\"\" # Checks if 'frametype' in self.keys() or 'framebit' in", "col in tbl_cols] if np.any(badcol): raise ValueError('The following columns are", "the user that some of the frames are going to", "yet. \"\"\" if self.configs is not None and not force:", "file. The sorted file lists all the unique instrument configurations", "yet if 'calib' not in self.keys() and default: self['calib'] =", "is identical to the pypeit file output. .. todo:: -", "is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits = self.spectrograph.valid_configuration_values() if cfg_limits", "ALLOWED!! # TODO: This should be converted to an assert", "The list of files to include in the table. data", "used in the fits table> \"\"\" # Columns for output", "1 def set_user_added_columns(self): \"\"\" Set columns that the user *might*", "ftype_colm = ftype_colmA fbits_colm = table.Column(type_bits, name='framebit') t = table.Table([ftype_colm,", "types): \"\"\" Impose a set of types on certain columns.", "array of the correct length. Returns: list: List of the", "This is different from :func:`unique_configurations` because the latter determines and", "\"\"\" Return the list of files with a given frame", "add \"\"\" if 'manual' not in self.keys(): self['manual'] = ''", "letter identifier; i.e., the ' 'configuration cannot be None.') #", "# find_frames will throw an exception if framebit is not", "table must have \\'filename\\' column!') # Make sure the data", "- sets the frame types based on the provided object", "# Check the rows input if rows is not None", "for float meta (e.g. dispangle) Returns: bool: True if the", "If run before the ``'setup'`` column is initialized, this function", "for each. \"\"\" _cfg = deepcopy(self.configs) if copy else self.configs", "standard frame, see :func:`set_combination_groups`. .. note:: This should only be", "output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore is", "for setup in cfg.keys(): _cfg[setup] = {} _cfg[setup]['--'] = deepcopy(cfg[setup])", "included. config_only (:obj:`bool`, optional): Just return the dictionary with the", "table will be empty msgs.warn('Both data and files are None", "self.spectrograph.__class__.__name__) # Get the list of frames of this type", "radec_done: ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones]", "each science or standard frame, see :func:`set_combination_groups`. .. note:: This", "the provided spectrograph are used. configs (:obj:`dict`): A dictionary of", "table and interface to the relevant fits file metadata used", "row. \"\"\" _cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else", "'None': msgs.warn('RA and DEC must not be None for file:'", "must be present to determine the calibration # group if", "the calibration group bit based on the string values of", "data['directory'][idx] = '.' # Read the fits headers headarr =", "the provided calibration group. Args: grp (:obj:`int`): The calibration group", "the frame types have been set, ignore anything listed in", "defined; run get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames", "Print status message msg = 'Time invalid for {0} files.\\n'.format(len(bad_files))", "Master key with configuration, calibration group(s), and detector. Raises: PypeItError:", "\\'setup\\' column first; try running set_configurations.') configs = np.unique(self['setup'].data).tolist() if", "set yet. \"\"\" # Configurations have already been set if", "PypeItMetaData.' ' The table will be empty!') # Initialize internals", "ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if isinstance(value, str) and '#' in value:", "get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with type:", "a boolean array. Returns: numpy.ndarray: A boolean array, or an", "write if configs is None or configs == 'all' or", "in self.keys() and not force: self._set_calib_group_bits() self._check_calib_groups() return # TODO:", "Write a pypeit file in data-table format. The pypeit file", "'namp': self.spectrograph.detector[d-1]['numamplifiers']} return setup[skey] if config_only else setup def get_configuration_names(self,", "been set if 'calib' in self.keys() and 'calibbit' in self.keys()", "sure that it matches with what can be read from", "columns haven't been defined. \"\"\" if 'setup' not in self.keys()", "ordered srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']]", "columns in [None, 'all']: tbl_cols = list(self.keys()) elif columns ==", "configuration is included. write_bkg_pairs (:obj:`bool`, optional): When constructing the :class:`pypeit.metadata.PypeItMetaData`", "the 'setup' columns does not exist, fill the configuration setup", "combinations of the full set of metadata exprng = self.par['scienceframe']['exprng']", "a known type. calib_ID (:obj:`int`, optional): Index of the calibration", "to set the frame type of each file. The metadata", "simply constructs the configuration dictionary using the unique configurations in", "columns; do them in # reverse order so I can", "provided row. The master key is the combination of the", "frame types have not been defined yet. \"\"\" # Set", "Raises: PypeItError: Raised if the 'calibbit' column is not defined.", "to collect the data save to each file. The class", "list of files to include in the table. data (table-like,", "# empty or corrupt files we still want this to", "using :func:`construct_obstime`. Returns: str: The root name for file output.", "files ofiles = [None]*len(cfg_keys) for j,setup in enumerate(cfg_keys): # Create", "to use to construct the configuration. If None, the `configuration_keys`", "# Assign the group; ensure the integers are unique self['calibbit'][i]", "vanilla configuration is included. write_bkg_pairs (:obj:`bool`, optional): When constructing the", "nearest standard star to each science frame # TODO: Should", "self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys(): msgs.info('Setup column already set.", "not data['directory'][idx]: data['directory'][idx] = '.' # Read the fits headers", "in ignore_frames.items(): # TODO: For now, use this assert to", "ras, decs = meta.convert_radec(usrdata['ra'][~nones].data, usrdata['dec'][~nones].data) usrdata['ra'][~nones] = ras.astype(dtype) usrdata['dec'][~nones] =", "None: # No values specified, so we're done return good", "length.') msgs.info('Using user-provided frame types.') for ifile,ftypes in user.items(): indx", "MJD of the observation. \"\"\" return time.Time(self['mjd'][row], format='mjd') def construct_basename(self,", "= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) # Turn on the relevant bits", "del self.table['frametype'] if 'framebit' in self.keys(): del self.table['framebit'] # #", "is_science = self.find_frames('science') for i in range(len(self)): if not is_science[i]:", "self.keys() and not force: return # Groups have been set", "tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure the dithers and combination and", "should be assigned to it: for cfg_key in _configs.keys(): in_cfg", "to set the internal :attr:`configs`. If this attribute is not", "table (pypeit file):\\n' indx = np.where(np.logical_not(good))[0] for i in indx:", "metadata from the fits files. strict (:obj:`bool`, optional): Function will", "raise ValueError('idname is not set in table; cannot use it", "row, det=None, config_only=False): \"\"\" Construct the setup dictionary. .. todo::", "usrdata[key][~nones] = usrdata[key][~nones].astype(dtype) # Include the user data in the", "``PypeIt`` files generated. \"\"\" # Set output path if output_path", "table (:class:`astropy.table.Table`): The table with the relevant metadata for each", "``PypeIt`` columns for calib, comb_id and bkg_id write_manual (:obj:`bool`, optional):", "path for the output pypeit files. If None, set to", "continue # Convert to a list of numbers l =", "a vector was provided if rows is not None: output_tbl", "and len(rows) != len(self.table): raise ValueError('Boolean vector selecting output rows", "self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp) def _check_calib_groups(self): \"\"\" Check that the", "a list.'.format(self.spectrograph.__class__.__name__) # Check that the metadata are valid for", "the provided type. \"\"\" if not append: self['framebit'][indx] = 0", "to the type in the internal table. See above. Raises:", "list: List of the full paths of one or more", "used in all calibration groups # (like biases and darks)", "= os.path.split(ifile) if not data['directory'][idx]: data['directory'][idx] = '.' # Read", "\"\"\" Impose a set of types on certain columns. ..", "the calibration bits for i in range(len(self)): # Convert the", "write!') # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) #", "the first few columns; do them in # reverse order", "can be specific to each instrument. Args: output_path (:obj:`str`, optional):", "used to match to the metadata table generated within PypeIt.", "keywords in the provided configuration match with the metadata keywords.", "in self.keys() and default: self['calib'] = '0' # Make sure", "back in a frame that has an # invalid key,", "might add \"\"\" if 'manual' not in self.keys(): self['manual'] =", "in range(nrows): for d, cfg in _configs.items(): if row_match_config(self.table[i], cfg,", "Set and return a Table with the frame types and", "selected column must be a valid pypeit metadata keyword, specific", "*must* match configuration_keys() for spectrographs setup = {skey: {'--': {'disperser':", "frame type identifier. See the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`,", "columns that the user might add \"\"\" if 'manual' not", ":attr:`spectrograph`. Additional valid keywords, depending on the processing level of", "= [] # comb, bkg columns if write_bkg_pairs: extras +=", "ofile is None: # Output file not defined so just", "RA, DEC (backwards compatability) if key in ['ra', 'dec'] and", "import datetime from IPython import embed import numpy as np", "file headers. The table must have a `filename` column that", "as an argument and converted to a zero-filled string with", "= self.spectrograph.config_independent_frames() if ignore_frames is not None: if 'frametype' not", "is here because the spectrograph # needs to be defined", "any of the # meta data values indx &= np.isin(self.table[metakey],", "# (like biases and darks) if global_frames is not None:", "value)) data[meta_key].append(value) msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1])) # JFH Changed the", "same as returned by the spectrograph `configuration_keys` method. The latter", "to write!') # Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual)", "bits. See :class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame types. \"\"\" #", "run before the ``'setup'`` column is initialized, this function determines", "or more strings used to select the configurations to include", "'none' if 'dispname' not in self.keys() else self['dispname'][row] dispangle =", "np.ones(len(self), dtype=bool) for key in cfg_limits.keys(): # NOTE: For now,", "(self['framebit'] > 0)] = str(i) # Allow some frame types", "to False to instead report a warning and continue. Attributes:", "[np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']] # Convert types", "write_manual: extras += ['manual'] for key in extras: if key", "the metadata are valid for this column. indx = np.isin(self[key],", "else self['binning'][row] skey = 'Setup {}'.format(self['setup'][row]) # Key names *must*", "None] = -99999.0 isort = np.argsort(mjd) subtbl = subtbl[isort] subtbl.write(ff,", "contain the frames of the requested type. Raises: PypeItError: Raised", "setups, indx = np.unique(self['setup'], return_index=True) if ignore is not None:", "build of the table based on user-provided data, typically pulled", "20 arcmins of a listed standard, # then it is", "has been set, this simply constructs the configuration dictionary using", "if cfg_keys is None else cfg_keys return {k:self.table[k][indx] for k", "add .. note:: :attr:`table` is edited in place. This function", "that was' + msgs.newline() + 'missed by the automatic identification.')", "unassigned), the combination groups are set to be unique for", "(:obj:`list`, optional): Ignore configurations in the provided list. write_bkg_pairs (:obj:`bool`,", "empty or corrupt files we still want this to run.", "= ff.getvalue().split('\\n')[:-1] # Write the file make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,", "import os import io import string from copy import deepcopy", "msgs.newline() + f) msgs.warn('The above file could be a twilight", "output_tbl[tbl_cols][srt] else: output_tbl = output_tbl[tbl_cols] if output == 'table': #", "None # Initialize columns that the user might add self.set_user_added_columns()", "self.type_bitmask = framematch.FrameTypeBitMask() # Build table self.table = table.Table(data if", "optional): If the 'setup' column does not exist, fill the", "frame types based on the provided object - sets all", "if the 'calib' column already exists. Raises: PypeItError: Raised if", "' The table will be empty!') # Initialize internals self.spectrograph", "else [files] # Build lists to fill data = {k:[]", "'correctly defined for {0}; values must be None or a", "the internal :attr:`configs`. If this attribute is not None, this", "and provides the configurations themselves. This is mostly a convenience", "of the `usrdata` column to the existing data type. If", "name and type are expected to be the key and", "`filename` column that is used to match to the metadata", "in self.keys(): msgs.warn('Removing existing frametype and framebit columns.') if 'frametype'", "if write_manual: extras += ['manual'] for key in extras: if", "can be anything allowed by the instantiation of :class:`astropy.table.Table`. usrdata", "integer. Returns: numpy.ndarray: Boolean array selecting those frames in the", "write_manual=write_manual) # Write the pypeit files ofiles = [None]*len(cfg_keys) for", "the string to the group list grp = parse.str2list(self['calib'][i], ngroups)", "& in_group ftype_in_group = self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] = [", "so we're done return # Alert the user that some", "in existing_keys: if len(self.table[key].shape) > 1: # NOT ALLOWED!! #", "self.find_frames(key) & in_group ftype_in_group = self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] =", "= np.unique(self['setup'].data).tolist() if 'None' in configs: configs.remove('None') # Ignore frames", "parameter set must be of type PypeItPar.') self.type_bitmask = framematch.FrameTypeBitMask()", "will be empty!') # Initialize internals self.spectrograph = spectrograph self.par", "any 'None' # configurations... cfg = self.unique_configurations(copy=True, rm_none=True) # Get", "the key and value of the dictionary, respectively. The number", "bits for i in range(len(self)): # Convert the string to", "be assigned to a single calibration group.') @property def n_calib_groups(self):", "[key for key in cfg.keys() if key in _configs] if", "self._check_calib_groups() def find_frames(self, ftype, calib_ID=None, index=False): \"\"\" Find the rows", "zip(columns, types): if c in self.keys(): self.table[c] = self.table[c].astype(t) def", "write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False, header=None): \"\"\" Write the", "in all calibration groups # (like biases and darks) if", "'Time invalid for {0} files.\\n'.format(len(bad_files)) msg += 'Continuing, but the", "types may have been ignored ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames", "\"\"\" Edit the frame type by hand. Args: indx (:obj:`int`):", "return :attr:`configs` with possible alterations. This method *should not* be", "set, so we're done return # Some frame types may", "# # Initialize: Flag frames with the correct ID name", "useIDname \\ # else np.ones(len(self), dtype=bool) # Include a combination", "class definition. # This should probably go somewhere else or", "the table that would have been printed/written to disk is", "_cfg[setup] = {} _cfg[setup]['--'] = deepcopy(cfg[setup]) cfg = _cfg #", "as a comma-separated string. If None or ``'all'``, all columns", "provide instrument setup without \\'setup\\' column; ' 'run set_configurations.') dispname", "we should consider reformatting/removing it. - This is complicated by", "we can force type_names() in bitmask to always return the", "force type_names() in bitmask to always return the # correct", "as the name of an ascii file to which to", "= self.find_calib_group(i) in_cbit = self['calibbit'] == cbit # Find the", "in self.keys(): msgs.error('Calibration groups are not set. First run set_calibration_groups.')", "set not_setup = self.table['setup'] == 'None' if not np.any(not_setup): #", "If ``'table'``, the table that would have been printed/written to", "= ['None']*len(_files) # Build the table for idx, ifile in", "# Find the unique configurations in this group, ignoring any", "return setup[skey] if config_only else setup def get_configuration_names(self, ignore=None, return_index=False,", "from IPython import embed import numpy as np import yaml", "enumerate(cfg_keys): # Create the output directory root = '{0}_{1}'.format(self.spectrograph.name, setup)", "the calib file. The calib file provides the unique instrument", "# Add the directory and file name to the table", "optional): Overwrite any existing file with the same name. ignore", "idx, ifile in enumerate(_files): # User data (for frame type)", "string per file line; ``# `` is added to the", "list(cfg.keys()) else: _configs = configs if isinstance(configs, list) else [configs]", "_configs.keys(): in_cfg = self.table['setup'] == cfg_key for ftype, metakey in", "data (for frame type) if usrdata is None: usr_row =", "(see get_frame_types) and I'm just not using it... Args: frametype", "output. .. todo:: - This is for backwards compatibility, but", "# Start msgs.info(\"Typing files\") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use", "from pypeit.par import PypeItPar from pypeit.par.util import make_pypeit_file from pypeit.bitmask", "np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup: setup.remove('None') # Make sure that", "column already exists. Raises: PypeItError: Raised if 'setup' column is", "type in `usrdata`. You can avoid this step by setting", "self.get_configuration(indx[i]) msgs.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata", "as those included in the pypeit file. Each selected column", "optional): Name of the column to use for sorting the", "meta.get_meta_data_model() # Check the input if not isinstance(usrdata, table.Table): raise", "write_bkg_pairs=False, write_manual=False): \"\"\" Generate the list of columns to be", "value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers']) if", "Use ovewrite=True to overwrite.'.format(ofile)) # Construct the setups dictionary cfg", "msgs.info('Setup column already set. Finding unique configurations.') uniq, indx =", "and science frame. \"\"\" if 'comb_id' not in self.keys(): self['comb_id']", "row): \"\"\" Construct the MJD of when the frame was", "in the metadata table listed by the spectrograph ``configuration_keys`` method.", "determined if :attr:`configs` has not yet been defined. copy (:obj:`bool`,", "requested type. Raises: PypeItError: Raised if the `framebit` column is", "None or configs == 'all' or configs == ['all']: cfg_keys", "= '{0}_{1}'.format(self.spectrograph.name, setup) odir = os.path.join(output_path, root) if not os.path.isdir(odir):", "The 1-indexed detector number(s). If a tuple, it must include", "self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type) self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx]) def get_frame_types(self, flag_unknown=False, user=None, merge=True):", "set by :func:`unique_configurations`. force (:obj:`bool`, optional): Force the configurations to", "data # TODO: In this implementation, slicing the PypeItMetaData object", "data reduction. \"\"\" def __init__(self, spectrograph, par, files=None, data=None, usrdata=None,", "provides the unique instrument configurations (setups) and the association of", "columns to be used in the fits table> \"\"\" #", "are associated with one calibration group. TODO: Is this appropriate", "len(self.table): raise ValueError('Boolean vector selecting output rows has incorrect length.')", "(:obj:`bool`, optional): Remove any configurations set to 'None'. If copy", "assign to :attr:`table`. \"\"\" # Allow for single files _files", "if 'slitwid' not in self.keys() else self['slitwid'][row] slitlen = 'none'", "type_bits (numpy.ndarray): Integer bitmask with the frame types. The length", "the automatic identification.') b = self.type_bitmask.turn_off(b, flag='standard') continue # If", "= table.Table(data if files is None else self._build(files, strict=strict, usrdata=usrdata))", "(:obj:`bool`, optional): Merge the frame typing into the exiting table.", "user provided set of data used to supplement or overwrite", "keys # meaning that the instrument setup has only one", "the header for any of the provided files; see :func:`pypeit.spectrographs.spectrograph.get_headarr`.", "files have None in # their MJD. This is the", "self.spectrograph.config_independent_frames() if ignore_frames is None: # Nope, we're still done", "keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits = self.spectrograph.valid_configuration_values() if", "groups in the provided list. Raises: PypeItError: Raised if the", "the unique configuration names. This provides just the list of", "the keys for :class:`pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of the", "the instrument configuration for {0} '.format(ftype) + 'frames, configuration {0}", "be reset. fill (:obj:`str`, optional): If the 'setup' column does", "dtype=object, otherwise # any changes to the strings will be", "if `usrdata` is not an `astropy.io.table.Table` KeyError: Raised if `filename`", "and the frame types have not yet been defined (see", "This is complicated by allowing some frame types to have", "have been set but the bits have not (likely because", "list), \\ 'CODING ERROR: valid_configuration_values is not correctly defined '", "type and science frame ID, if the latter is provided.", "calibration group that it must match. If None, any row", "Grab output columns output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs, write_manual=write_manual) cfgs = self.unique_configurations(copy=ignore", "self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row, ignore_bad_header = self.par['rdx']['ignore_bad_headers'])", "ignore_frames = self.spectrograph.config_independent_frames() if ignore_frames is not None: if 'frametype'", "first few columns; do them in # reverse order so", "# Restrict _configs = None if configs is None else", "sets the combination groups to be either undefined or to", "= np.unique(self['setup'][in_group]).tolist() setup = np.unique(self['setup'][in_cbit]).tolist() if 'None' in setup: setup.remove('None')", "what can be read from the pypeit file. The 'calibbit'", "{0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) msgs.info('Using metadata to determine", "table. See ' 'usrdata argument of instantiation of PypeItMetaData.') usr_row", "cfg[setup[0]][cbit] = {} for key in self.type_bitmask.keys(): #ftype_in_group = self.find_frames(key)", "types have not yet been defined (see :func:`get_frame_types`), this method", "more strings used to select the configurations to include in", "' {0}\\n'.format(self['filename'][i]) msgs.warn(msg) # And remove 'em self.table = self.table[good]", "the frames to meet the other checks in this call.", "of :attr:`spectrograph` is used. Returns: dict: A dictionary with the", "None: # Nope, we're still done return # At this", "Args: indx (:obj:`int`, array-like): One or more 0-indexed rows in", "np.all(self['comb_id'] < 0): # find_frames will throw an exception if", "be explicit that the method returns None when writing to", "DEC (backwards compatability) if key in ['ra', 'dec'] and not", "Parsed for frametype for a few instruments (e.g. VLT) where", "configs=None, force=False, fill=None): \"\"\" Assign each frame to a configuration", "force=False, fill=None): \"\"\" Assign each frame to a configuration (setup)", "`astropy.io.table.Table` KeyError: Raised if `filename` is not a key in", "a different # configuration. for i in indx[1:]: j =", "must be None or a string.'.format( self.spectrograph.__class__.__name__) # Get the", "np.ones(len(self), dtype=bool) # Include a combination of instrument-specific checks using", "Do we want to do this here? indx = self.type_bitmask.flagged(type_bits,", "yaml # Skip this group if ignore is not None", "# # TODO: It would be good to get around", "numpy.ndarray: Boolean array selecting those frames in the table included", "If None, the `configuration_keys` of :attr:`spectrograph` is used. Returns: dict:", "ff.write('#---------------------------------------------------------\\n') mjd = subtbl['mjd'].copy() # Deal with possibly None mjds", "or more frames. Args: indx (:obj:`int`, array-like): One or more", "bits into the existing table. This will *overwrite* any existing", "any existing columns. Returns: `astropy.table.Table`: Table with two columns, the", "lines subtbl = self.table[output_cols][in_cfg] subtbl.sort(['frametype','filename']) with io.StringIO() as ff: subtbl.write(ff,", "copy (:obj:`bool`, optional): Return a deep copy of :attr:`configs` instead", "= [col not in all_cols for col in tbl_cols] if", "None: match.append(False) elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']: match.append(True) else: match.append(False) else:", "import msgs from pypeit import utils from pypeit.core import framematch", "to disk is returned. Otherwise, the string is interpreted as", ":func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`, which can be specific to each instrument. Args: output_path", "self.find_frames(key) & in_cbit cfg[setup[0]][cbit][key] = [ os.path.join(d,f) for d,f in", "the top of the file, on string per file line;", "self.table = self.table[good] def _set_calib_group_bits(self): \"\"\" Set the calibration group", "is not None) if ignore is not None: for key", "{'dispname': dispname, 'dispangle':dispangle}, 'dichroic': dichroic, 'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen},", "The configuration must be present to determine the calibration #", "use to construct the configuration. cfg_keys (:obj:`list`, optional): The list", "frames can only have one calibration group # Assign everything", "1-indexed detector to include. If None, all detectors are included.", "from pypeit.bitmask import BitMask # TODO: Turn this into a", "self.unique_configurations(copy=True, rm_none=True) # Get the setups to write if configs", "provided, the frames must also be matched to the relevant", "number of detectors is 99). Using the calibration bit in", "more frames. \"\"\" if isinstance(indx, (int,np.integer)): return os.path.join(self['directory'][indx], self['filename'][indx]) return", "= None # Initialize columns that the user might add", "be the same as in the table, and the keywords", "current state. overwrite (:obj:`bool`, optional): Overwrite any existing file; otherwise", "msgs.info(\"Couldn't identify the following files:\") for f in self['filename'][indx]: msgs.info(f)", "``output == 'table'``. Otherwise, the method always returns None. Raises:", "the metadata keywords. Also raised when some frames cannot be", "= np.unique(self['directory'][in_cfg]).tolist() # Get the data lines subtbl = self.table[output_cols][in_cfg]", "self.unique_configurations(copy=True, rm_none=True) # TODO: We should edit the relevant follow-on", "associated with each configuration. The metadata keywords in the dictionary", "without sky coordinates.') else: # TODO: Do we want to", "not in self.keys(): msgs.error('To ignore frames, types must have been", "array-like): One or more 0-indexed rows in the table with", "indices or a boolean array of the correct length. Returns:", "of the metadata associated with each configuration. The metadata keywords", "['A','C']). Returns: numpy.array: The list of unique setup names. A", "return_index else setups def _get_cfgs(self, copy=False, rm_none=False): \"\"\" Convenience method", "defined frames that have been ignored in the determination of", "self.set_calibration_groups(default=True) self.set_combination_groups() def get_configuration(self, indx, cfg_keys=None): \"\"\" Return the configuration", "np.all(good): # All values good, so we're done return #", "Nope, we're still done return # At this point, we", "be (see get_frame_types) and I'm just not using it... Args:", "just print it print('\\n'.join(data_lines)) return None # Write the output", "file can be written (this is here because the spectrograph", "and 'calibbit' not in self.keys() and not force: self._set_calib_group_bits() self._check_calib_groups()", "the table, and the keywords used to set the configuration", "if len(self.table[key].shape) > 1: # NOT ALLOWED!! # TODO: This", "unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set", "rm_none=rm_none) # Use the first file to set the first", ":class:`pypeit.core.framematch.FrameTypeBitMask` for the allowed frame types. \"\"\" # Checks if", "= {} self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys) cfg_indx += 1 #", "None: if 'frametype' not in self.keys(): msgs.error('To ignore frames, types", "Set combination groups. .. note:: :attr:`table` is edited in place.", "be provided as a string with comma-separated types. merge (:obj:`bool`,", "os.path.join(odir, '{0}.pypeit'.format(root)) # Get the setup lines setup_lines = dict_to_lines({'Setup", "columns does not exist, fill the configuration setup columns with", "point, we need the frame type to continue if 'frametype'", "the same configuration to the same # calibration group; this", "self.par['scienceframe']['exprng'] if ftype == 'science' \\ else self.par['calibrations']['{0}frame'.format(ftype)]['exprng'] # TODO:", "optional): Add additional ``PypeIt`` columns for manual extraction Returns: `numpy.ndarray`_:", "if there is a problem with the reading the header", "just return the modified table return output_tbl # Always write", "A list of strings with the frame types to use", "sets. Requires the 'setup' column to have been defined. For", "headers. Set to False to report a warning and continue.", "{0} does not have unique '.format(cfg_key) + '{0} values.' .format(meta))", "as true # indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname", "the configuration, the calibration group, and the detector. The configuration", "&= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) #", "is not set in table; cannot use it for file", "implementation, slicing the PypeItMetaData object # will return an astropy.table.Table,", "the relevant science frame. Args: ftype (str): The frame type", "fault! Args: force (:obj:`bool`, optional): Force the configurations to be", "- sets all the configurations to the provided `setup` -", "None or ``'all'``, all columns in are written; if ``'pypeit'``,", "column names types (:obj:`list`): List of types \"\"\" for c,t", "only checks that the science frames are associated with one", "name primarily for PypeIt file output. Args: row (:obj:`int`): The", "indx &= self.find_calib_group(calib_ID) # Return return np.where(indx)[0] if index else", "fits metadata required by PypeIt. .. include common links, assuming", "in self.keys() else self['slitlen'][row] binning = '1,1' if 'binning' not", "row to use to construct the configuration. cfg_keys (:obj:`list`, optional):", "in value: value = value.replace('#', '') msgs.warn('Removing troublesome # character", "= np.asarray(data['filename']) bad_files = filenames[mjd == None] # Print status", "than 0 (meaning they're unassigned), the combination groups are set", "files.\\n'.format(len(bad_files)) msg += 'Continuing, but the following frames may be", "'calib' in self.keys() and 'calibbit' in self.keys() and not force:", "and continue. user (:obj:`dict`, optional): A dictionary with the types", "calib file provides the unique instrument configurations (setups) and the", "keys to use cfg_keys = self.spectrograph.configuration_keys() # Configuration identifiers are", "Write the metadata either to a file or to the", "self.configs if rm_none and 'None' in _cfg.keys(): del _cfg['None'] return", "config match = [] for k in config.keys(): # Deal", "# if useIDname and 'idname' not in self.keys(): # raise", "\\ 'CODING ERROR: metadata keywords set by config_indpendent_frames are not", "(:obj:`dict`): A dictionary of the unique configurations identified. type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`):", "in # the ignore_frames indx = np.arange(len(self)) ignore_frames = self.spectrograph.config_independent_frames()", "TODO: Is this appropriate for NIR data? \"\"\" is_science =", "to use to build the table. strict (:obj:`bool`, optional): Function", "\"\"\" Finalize the build of the table based on user-provided", "# Find the frames of this type that match any", "write_bkg_pairs=False, write_manual=False): \"\"\" Write the sorted file. The sorted file", "ignore_frames: use &= np.logical_not(self.find_frames(ftype)) indx = indx[use] if len(indx) ==", "that the user might add \"\"\" if 'manual' not in", "else setup def get_configuration_names(self, ignore=None, return_index=False, configs=None): \"\"\" Get the", "setups[rm] indx = indx[rm] # Restrict _configs = None if", "continue # Assign the group; ensure the integers are unique", "0: msgs.error('No setups to write!') # Grab output columns output_cols", "def n_calib_groups(self): \"\"\"Return the number of calibration groups.\"\"\" return None", "else [header] for h in _header: f.write(f'# {h}\\n') f.write('\\n') f.write('\\n'.join(data_lines))", "the bits have not (likely because the # data was", "types designated by the user. The file name and type", "for file:' + msgs.newline() + f) msgs.warn('The above file could", "to build the table. strict (:obj:`bool`, optional): Function will fault", "= 'Time invalid for {0} files.\\n'.format(len(bad_files)) msg += 'Continuing, but", "The file name and type are expected to be the", "of the observation. If None, constructed using :func:`construct_obstime`. Returns: str:", "might add self.set_user_added_columns() # Validate instrument name self.spectrograph.vet_instrument(self.table) def _impose_types(self,", "input if rows is not None and len(rows) != len(self.table):", "is used to determine the calibration group of each frame;", "are not set. First run set_calibration_groups.') return self.calib_bitmask.flagged(self['calibbit'].data, grp) def", "self.table = table.Table(data if files is None else self._build(files, strict=strict,", "# make the default 'all'? if configs is not None", "value): return self.table.__setitem__(item, value) def __len__(self): return self.table.__len__() def __repr__(self):", "doesn't exist yet if 'calib' not in self.keys() and default:", "if 'calibbit' in self.keys(): del self['calibbit'] # Groups have already", "list of frame types to ignore but the frame types", "the string is interpreted as the name of an ascii", "indx = output_tbl[sort_col] != None is_None = np.logical_not(indx) srt =", "write_bkg_pairs (:obj:`bool`, optional): Add additional ``PypeIt`` columns for calib, comb_id", "Index of the calibration group that it must match. If", "binning of a science image } } } #_det =", "ValueError: Raised if the columns to include are not valid,", "ascii file with open(ofile, 'w') as f: if header is", "fitstbl config (dict): Defines the configuration spectrograph (pypeit.spectrographs.spectrograph.Spectrograph): Used to", "invalid for {0} files.\\n'.format(len(bad_files)) msg += 'Continuing, but the following", "of configuration keys # meaning that the instrument setup has", "note:: :attr:`table` is edited in place. This function can be", "Start msgs.info(\"Typing files\") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the", "been defined; run get_frame_types.') ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore", "0-indexed row used to construct the setup. det (:obj:`int`, optional):", "For now, use this assert to check that the #", "and 'all' not in _configs: use = np.isin(setups, _configs) setups", "the same configuration. .. todo:: - Maintain a detailed description", "self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None): \"\"\" Assign each", "of frames of this type without a # configuration indx", "values for configuration keys is set by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`. \"\"\" cfg_limits", "for meta_key in self.spectrograph.meta.keys(): value = self.spectrograph.get_meta_value(headarr, meta_key, required=strict, usr_row=usr_row,", "def edit_frame_type(self, indx, frame_type, append=False): \"\"\" Edit the frame type", "= self.table[output_cols][indx] # Write the file ff.write('##########################################################\\n') ff.write('Setup {:s}\\n'.format(setup)) ff.write('\\n'.join(dict_to_lines(cfgs[setup],", "And it may be something to put in the relevant", "Write the calib file. The calib file provides the unique", "data used to supplement or overwrite metadata read from the", "force: return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys(): msgs.info('Setup column", "'setup' not in self.keys(): msgs.error('Cannot provide instrument setup without \\'setup\\'", "\\'setup\\' column; ' 'run set_configurations.') dispname = 'none' if 'dispname'", "Instead of crashing out if there are unidentified files, leave", "for i in range(len(self)): if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i]))", "# indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng) indx = self.spectrograph.check_frame_type(ftype, self.table,", "calib_ID (:obj:`int`, optional): Index of the calibration group that it", "have a `filename` column that is used to match to", "set of types on certain columns. .. note:: :attr:`table` is", "for d, cfg in _configs.items(): if row_match_config(self.table[i], cfg, self.spectrograph): self.table['setup'][i]", "this returns all frames without a known type. calib_ID (:obj:`int`,", "self.table.sort(col) def merge(self, usrdata, match_type=True): \"\"\" Use the provided table", "bkg_id write_manual (:obj:`bool`, optional): Add additional ``PypeIt`` columns for manual", "is not # set... sci_std_idx = np.where(np.any([self.find_frames('science'), self.find_frames('standard')], axis=0))[0] self['comb_id'][sci_std_idx]", "type. \"\"\" if not append: self['framebit'][indx] = 0 self['framebit'][indx] =", "list(self.keys()) elif columns == 'pypeit': tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True) else: all_cols", "'B', etc.) and the row index where it first occurs.", "if indx != 0: tbl_cols.insert(0, tbl_cols.pop(indx)) # Make sure the", "an astropy.table.Table, not a PypeItMetaData object. def __getitem__(self, item): return", "groups (e.g., ['bias', 'dark']). default (:obj:`bool`, optional): If the 'calib'", "Write the sorted file. The sorted file lists all the", "The MJD of the observation. If None, constructed using :func:`construct_obstime`.", "len(n) == 0 else int(n) for n in self['calib'][i].replace(':',',').split(',')]) #", "subclass this from astropy.table.Table, but that # proved too difficult.", "else self.configs if rm_none and 'None' in _cfg.keys(): del _cfg['None']", "Returns: :obj:`dict`: A nested dictionary, one dictionary per configuration with", "ovewrite=True to overwrite.'.format(ofile)) # Construct the setups dictionary cfg =", "attributes self.configs = None self.calib_bitmask = None # Initialize columns", "for a given frame. This is not the same as", "the existing data type. If it can't it will just", "from the fits files. strict (:obj:`bool`, optional): Function will fault", "file. The metadata is validated using checks specified by the", "tbl_cols])[0][0] if indx != ncol-1: tbl_cols.insert(ncol-1, tbl_cols.pop(indx)) # Copy the", "associated frame type. If the index is provided, the frames", "key for the file in the provided row. The master", "def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False): \"\"\" Generate the list of columns", "that will be at the heart of PypeItMetaData. Args: files" ]
[ "= MoveFilesAs( prog_node ) dir_node = RemoveFiles( prog_node ) node", "dir_node ) dir_node = FileDir( prog_node ) \"\"\" def _makeTagetDirs(", "prog_node, target = dir_name ) dir_node = MoveFiles( prog_node, )", "tools.cpp.cxx, '--help -v' ) node = ExecuteMethod( target = my_function", "prog_node, ) dir_node = MoveFilesAs( prog_node ) dir_node = RemoveFiles(", "prog_node ) node = FindFiles( dir_node ) dir_node = FileDir(", "= self.target _makeTagetDirs( target ) for source in sources: if", "getTraceTargets( self, node, brief ): return self.target #//===========================================================================// class BuiltinTool(", "ExecuteCommand( self, options ): return ExecuteCommand( options ) def Install(self,", "\"BuiltinTool\", ) \"\"\" Unique Value - name + type value", ") \"\"\" Unique Value - name + type value node", "self, node, brief ): cmd = node.getSourceValues() return (cmd,) #//===========================================================================//", "node = ExecuteCommand( tools.cpp.cxx, '--help -v' ) node = ExecuteMethod(", "MoveFiles( prog_node, ) dir_node = MoveFilesAs( prog_node ) dir_node =", "= RemoveFiles( prog_node ) node = FindFiles( dir_node ) dir_node", "def __init__(self, options, target ): self.target = os.path.abspath( target )", "): shutil.copy( source, target ) node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self,", "RemoveFiles( prog_node ) node = FindFiles( dir_node ) dir_node =", "__all__ = ( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", ) \"\"\" Unique Value", "if e.errno != errno.EEXIST: raise #//===========================================================================// class ExecuteCommand (Builder): def", "= dir_name ) dir_node = MoveFiles( prog_node, ) dir_node =", "as e: if e.errno != errno.EEXIST: raise #//===========================================================================// class ExecuteCommand", "errno from aql.nodes import Builder, FileBuilder from .aql_tools import Tool", "options ): return ExecuteCommand( options ) def Install(self, options, target", "dir_node = MoveFiles( prog_node, ) dir_node = MoveFilesAs( prog_node )", "target = self.target _makeTagetDirs( target ) for source in sources:", "type value node node = ExecuteCommand('gcc --help -v') tools.cpp.cxx node", "= my_function ) dir_node = CopyFiles( prog_node, target = dir_name", "build( self, node ): cmd = node.getSources() out = self.execCmd(", "CopyFilesAs( prog_node, target = dir_name ) dir_node = MoveFiles( prog_node,", "def build( self, node ): cmd = node.getSources() out =", "#//===========================================================================// class InstallBuilder (FileBuilder): def __init__(self, options, target ): self.target", "import Builder, FileBuilder from .aql_tools import Tool __all__ = (", "): return InstallBuilder( options, target ) def DirName(self, options): raise", "-v') tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx, '--help -v' ) node", "= node.getSources() out = self.execCmd( cmd ) node.setNoTargets() return out", "e.errno != errno.EEXIST: raise #//===========================================================================// class ExecuteCommand (Builder): def build(", ") except OSError as e: if e.errno != errno.EEXIST: raise", "node.setNoTargets() return out #//-------------------------------------------------------// def getBuildStrArgs( self, node, brief ):", "in sources: if os.path.isfile( source ): shutil.copy( source, target )", "ExecuteCommand( options ) def Install(self, options, target ): return InstallBuilder(", "-v' ) node = ExecuteMethod( target = my_function ) dir_node", ") dir_node = RemoveFiles( prog_node ) node = FindFiles( dir_node", "target ): self.target = os.path.abspath( target ) #//-------------------------------------------------------// def build(", "InstallBuilder (FileBuilder): def __init__(self, options, target ): self.target = os.path.abspath(", "= os.path.abspath( target ) #//-------------------------------------------------------// def build( self, node ):", "from .aql_tools import Tool __all__ = ( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\",", "os.path import shutil import errno from aql.nodes import Builder, FileBuilder", ") dir_node = MoveFiles( prog_node, ) dir_node = MoveFilesAs( prog_node", "dir_node = CopyFiles( prog_node, target = dir_name ) dir_node =", "if os.path.isfile( source ): shutil.copy( source, target ) node.setNoTargets() #//-------------------------------------------------------//", "(FileBuilder): def __init__(self, options, target ): self.target = os.path.abspath( target", "shutil import errno from aql.nodes import Builder, FileBuilder from .aql_tools", "'--help -v' ) node = ExecuteMethod( target = my_function )", "target = my_function ) dir_node = CopyFiles( prog_node, target =", "try: os.makedirs( path_dir ) except OSError as e: if e.errno", "out = self.execCmd( cmd ) node.setNoTargets() return out #//-------------------------------------------------------// def", ") #//-------------------------------------------------------// def build( self, node ): sources = node.getSources()", "cmd ) node.setNoTargets() return out #//-------------------------------------------------------// def getBuildStrArgs( self, node,", "os.path.isfile( source ): shutil.copy( source, target ) node.setNoTargets() #//-------------------------------------------------------// def", "source in sources: if os.path.isfile( source ): shutil.copy( source, target", "): sources = node.getSources() target = self.target _makeTagetDirs( target )", "prog_node ) \"\"\" def _makeTagetDirs( path_dir ): try: os.makedirs( path_dir", "return (cmd,) #//===========================================================================// class InstallBuilder (FileBuilder): def __init__(self, options, target", "sources: if os.path.isfile( source ): shutil.copy( source, target ) node.setNoTargets()", "node, brief ): cmd = node.getSourceValues() return (cmd,) #//===========================================================================// class", "+ type value node node = ExecuteCommand('gcc --help -v') tools.cpp.cxx", "node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self, node, brief ): return self.target", "_makeTagetDirs( path_dir ): try: os.makedirs( path_dir ) except OSError as", "class BuiltinTool( Tool ): def ExecuteCommand( self, options ): return", "target ) def DirName(self, options): raise NotImplementedError() def BaseName(self, options):", "prog_node ) dir_node = RemoveFiles( prog_node ) node = FindFiles(", "os.path.abspath( target ) #//-------------------------------------------------------// def build( self, node ): sources", "node ): cmd = node.getSources() out = self.execCmd( cmd )", "node.getSources() out = self.execCmd( cmd ) node.setNoTargets() return out #//-------------------------------------------------------//", "errno.EEXIST: raise #//===========================================================================// class ExecuteCommand (Builder): def build( self, node", ") node = ExecuteMethod( target = my_function ) dir_node =", "self.target = os.path.abspath( target ) #//-------------------------------------------------------// def build( self, node", "brief ): cmd = node.getSourceValues() return (cmd,) #//===========================================================================// class InstallBuilder", "= FindFiles( dir_node ) dir_node = FileDir( prog_node ) \"\"\"", "options, target ): return InstallBuilder( options, target ) def DirName(self,", ") dir_node = MoveFilesAs( prog_node ) dir_node = RemoveFiles( prog_node", "dir_node = CopyFilesAs( prog_node, target = dir_name ) dir_node =", "except OSError as e: if e.errno != errno.EEXIST: raise #//===========================================================================//", "ExecuteCommand( tools.cpp.cxx, '--help -v' ) node = ExecuteMethod( target =", "MoveFilesAs( prog_node ) dir_node = RemoveFiles( prog_node ) node =", "node node = ExecuteCommand('gcc --help -v') tools.cpp.cxx node = ExecuteCommand(", "): try: os.makedirs( path_dir ) except OSError as e: if", "#//-------------------------------------------------------// def build( self, node ): sources = node.getSources() target", "(cmd,) #//===========================================================================// class InstallBuilder (FileBuilder): def __init__(self, options, target ):", "Tool ): def ExecuteCommand( self, options ): return ExecuteCommand( options", "return out #//-------------------------------------------------------// def getBuildStrArgs( self, node, brief ): cmd", "node = ExecuteCommand('gcc --help -v') tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx,", "= ( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", ) \"\"\" Unique Value -", "def DirName(self, options): raise NotImplementedError() def BaseName(self, options): raise NotImplementedError()", "--help -v') tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx, '--help -v' )", "cmd = node.getSources() out = self.execCmd( cmd ) node.setNoTargets() return", "__init__(self, options, target ): self.target = os.path.abspath( target ) #//-------------------------------------------------------//", "dir_name ) dir_node = CopyFilesAs( prog_node, target = dir_name )", "= self.execCmd( cmd ) node.setNoTargets() return out #//-------------------------------------------------------// def getBuildStrArgs(", "def getTraceTargets( self, node, brief ): return self.target #//===========================================================================// class", "node, brief ): return self.target #//===========================================================================// class BuiltinTool( Tool ):", "\"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", ) \"\"\" Unique Value - name +", "aql.nodes import Builder, FileBuilder from .aql_tools import Tool __all__ =", "!= errno.EEXIST: raise #//===========================================================================// class ExecuteCommand (Builder): def build( self,", "ExecuteCommand('gcc --help -v') tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx, '--help -v'", "self.execCmd( cmd ) node.setNoTargets() return out #//-------------------------------------------------------// def getBuildStrArgs( self,", "= ExecuteCommand( tools.cpp.cxx, '--help -v' ) node = ExecuteMethod( target", "out #//-------------------------------------------------------// def getBuildStrArgs( self, node, brief ): cmd =", "): cmd = node.getSources() out = self.execCmd( cmd ) node.setNoTargets()", "= node.getSources() target = self.target _makeTagetDirs( target ) for source", "shutil.copy( source, target ) node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self, node,", "self, node, brief ): return self.target #//===========================================================================// class BuiltinTool( Tool", "value node node = ExecuteCommand('gcc --help -v') tools.cpp.cxx node =", "node.getSourceValues() return (cmd,) #//===========================================================================// class InstallBuilder (FileBuilder): def __init__(self, options,", "FileDir( prog_node ) \"\"\" def _makeTagetDirs( path_dir ): try: os.makedirs(", "CopyFiles( prog_node, target = dir_name ) dir_node = CopyFilesAs( prog_node,", "): return ExecuteCommand( options ) def Install(self, options, target ):", ") def Install(self, options, target ): return InstallBuilder( options, target", "class ExecuteCommand (Builder): def build( self, node ): cmd =", "target ) node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self, node, brief ):", "node.getSources() target = self.target _makeTagetDirs( target ) for source in", "cmd = node.getSourceValues() return (cmd,) #//===========================================================================// class InstallBuilder (FileBuilder): def", "): def ExecuteCommand( self, options ): return ExecuteCommand( options )", "import os.path import shutil import errno from aql.nodes import Builder,", ") for source in sources: if os.path.isfile( source ): shutil.copy(", "): return self.target #//===========================================================================// class BuiltinTool( Tool ): def ExecuteCommand(", "Builder, FileBuilder from .aql_tools import Tool __all__ = ( \"ExecuteCommand\",", "(Builder): def build( self, node ): cmd = node.getSources() out", "return ExecuteCommand( options ) def Install(self, options, target ): return", "import shutil import errno from aql.nodes import Builder, FileBuilder from", ") node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self, node, brief ): return", "= ExecuteMethod( target = my_function ) dir_node = CopyFiles( prog_node,", "InstallBuilder( options, target ) def DirName(self, options): raise NotImplementedError() def", "<filename>aql/aql/main/aql_builtin_tools.py<gh_stars>0 import os.path import shutil import errno from aql.nodes import", "brief ): return self.target #//===========================================================================// class BuiltinTool( Tool ): def", "Install(self, options, target ): return InstallBuilder( options, target ) def", "options, target ) def DirName(self, options): raise NotImplementedError() def BaseName(self,", "def build( self, node ): sources = node.getSources() target =", ") dir_node = FileDir( prog_node ) \"\"\" def _makeTagetDirs( path_dir", "import Tool __all__ = ( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", ) \"\"\"", "source, target ) node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets( self, node, brief", "FileBuilder from .aql_tools import Tool __all__ = ( \"ExecuteCommand\", \"InstallBuilder\",", "node = FindFiles( dir_node ) dir_node = FileDir( prog_node )", "\"\"\" def _makeTagetDirs( path_dir ): try: os.makedirs( path_dir ) except", "prog_node, target = dir_name ) dir_node = CopyFilesAs( prog_node, target", "= CopyFiles( prog_node, target = dir_name ) dir_node = CopyFilesAs(", "target = dir_name ) dir_node = MoveFiles( prog_node, ) dir_node", "def getBuildStrArgs( self, node, brief ): cmd = node.getSourceValues() return", "options ) def Install(self, options, target ): return InstallBuilder( options,", "build( self, node ): sources = node.getSources() target = self.target", "_makeTagetDirs( target ) for source in sources: if os.path.isfile( source", "return self.target #//===========================================================================// class BuiltinTool( Tool ): def ExecuteCommand( self,", "#//-------------------------------------------------------// def getBuildStrArgs( self, node, brief ): cmd = node.getSourceValues()", "from aql.nodes import Builder, FileBuilder from .aql_tools import Tool __all__", ") dir_node = CopyFiles( prog_node, target = dir_name ) dir_node", "getBuildStrArgs( self, node, brief ): cmd = node.getSourceValues() return (cmd,)", "#//-------------------------------------------------------// def getTraceTargets( self, node, brief ): return self.target #//===========================================================================//", "raise #//===========================================================================// class ExecuteCommand (Builder): def build( self, node ):", "for source in sources: if os.path.isfile( source ): shutil.copy( source,", "self, node ): sources = node.getSources() target = self.target _makeTagetDirs(", "self, node ): cmd = node.getSources() out = self.execCmd( cmd", ") \"\"\" def _makeTagetDirs( path_dir ): try: os.makedirs( path_dir )", "): self.target = os.path.abspath( target ) #//-------------------------------------------------------// def build( self,", "target ) for source in sources: if os.path.isfile( source ):", "FindFiles( dir_node ) dir_node = FileDir( prog_node ) \"\"\" def", "def _makeTagetDirs( path_dir ): try: os.makedirs( path_dir ) except OSError", "dir_node = MoveFilesAs( prog_node ) dir_node = RemoveFiles( prog_node )", "my_function ) dir_node = CopyFiles( prog_node, target = dir_name )", ") node.setNoTargets() return out #//-------------------------------------------------------// def getBuildStrArgs( self, node, brief", "OSError as e: if e.errno != errno.EEXIST: raise #//===========================================================================// class", "dir_name ) dir_node = MoveFiles( prog_node, ) dir_node = MoveFilesAs(", "= ExecuteCommand('gcc --help -v') tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx, '--help", "path_dir ) except OSError as e: if e.errno != errno.EEXIST:", "os.makedirs( path_dir ) except OSError as e: if e.errno !=", "dir_node = FileDir( prog_node ) \"\"\" def _makeTagetDirs( path_dir ):", "name + type value node node = ExecuteCommand('gcc --help -v')", "node = ExecuteMethod( target = my_function ) dir_node = CopyFiles(", "= MoveFiles( prog_node, ) dir_node = MoveFilesAs( prog_node ) dir_node", "sources = node.getSources() target = self.target _makeTagetDirs( target ) for", "BuiltinTool( Tool ): def ExecuteCommand( self, options ): return ExecuteCommand(", "e: if e.errno != errno.EEXIST: raise #//===========================================================================// class ExecuteCommand (Builder):", "class InstallBuilder (FileBuilder): def __init__(self, options, target ): self.target =", "): cmd = node.getSourceValues() return (cmd,) #//===========================================================================// class InstallBuilder (FileBuilder):", "options, target ): self.target = os.path.abspath( target ) #//-------------------------------------------------------// def", "import errno from aql.nodes import Builder, FileBuilder from .aql_tools import", "source ): shutil.copy( source, target ) node.setNoTargets() #//-------------------------------------------------------// def getTraceTargets(", "node ): sources = node.getSources() target = self.target _makeTagetDirs( target", "target ) #//-------------------------------------------------------// def build( self, node ): sources =", "( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", ) \"\"\" Unique Value - name", "ExecuteMethod( target = my_function ) dir_node = CopyFiles( prog_node, target", "return InstallBuilder( options, target ) def DirName(self, options): raise NotImplementedError()", "self.target #//===========================================================================// class BuiltinTool( Tool ): def ExecuteCommand( self, options", "Value - name + type value node node = ExecuteCommand('gcc", "def Install(self, options, target ): return InstallBuilder( options, target )", "target = dir_name ) dir_node = CopyFilesAs( prog_node, target =", "Unique Value - name + type value node node =", "def ExecuteCommand( self, options ): return ExecuteCommand( options ) def", "target ): return InstallBuilder( options, target ) def DirName(self, options):", ") node = FindFiles( dir_node ) dir_node = FileDir( prog_node", "path_dir ): try: os.makedirs( path_dir ) except OSError as e:", "dir_node = RemoveFiles( prog_node ) node = FindFiles( dir_node )", "ExecuteCommand (Builder): def build( self, node ): cmd = node.getSources()", "= node.getSourceValues() return (cmd,) #//===========================================================================// class InstallBuilder (FileBuilder): def __init__(self,", "#//===========================================================================// class ExecuteCommand (Builder): def build( self, node ): cmd", "tools.cpp.cxx node = ExecuteCommand( tools.cpp.cxx, '--help -v' ) node =", ") dir_node = CopyFilesAs( prog_node, target = dir_name ) dir_node", ".aql_tools import Tool __all__ = ( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", )", "= FileDir( prog_node ) \"\"\" def _makeTagetDirs( path_dir ): try:", "\"InstallBuilder\", \"BuiltinTool\", ) \"\"\" Unique Value - name + type", "self.target _makeTagetDirs( target ) for source in sources: if os.path.isfile(", ") def DirName(self, options): raise NotImplementedError() def BaseName(self, options): raise", "= CopyFilesAs( prog_node, target = dir_name ) dir_node = MoveFiles(", "self, options ): return ExecuteCommand( options ) def Install(self, options,", "- name + type value node node = ExecuteCommand('gcc --help", "#//===========================================================================// class BuiltinTool( Tool ): def ExecuteCommand( self, options ):", "Tool __all__ = ( \"ExecuteCommand\", \"InstallBuilder\", \"BuiltinTool\", ) \"\"\" Unique", "= dir_name ) dir_node = CopyFilesAs( prog_node, target = dir_name", "\"\"\" Unique Value - name + type value node node" ]
[ "static_admin_url = '' def __init__(self, *args, **kwargs): super(Example1, self).__init__(*args, **kwargs)", "- %s\" % (self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations =", "= PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return \"%s - %s\" %", "get_language_from_request(request) if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk", "__str__(self): return \"%s - %s\" % (self.char_1, self.pk) @python_2_unicode_compatible class", "return instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1", "admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2", "= PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2", "related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) slug =", "PlaceholderField from cms.utils import get_language_from_request from cms.utils.urlutils import admin_reverse from", "import models from django.utils.encoding import python_2_unicode_compatible from cms.models.fields import PlaceholderField", "= get_language_from_request(request) if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return", "class MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), )", "get_language_from_request from cms.utils.urlutils import admin_reverse from hvad.models import TranslatableModel, TranslatedFields", "= PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1", "PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) slug", "args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 =", "def get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url() def", "related_name='charpk_p1') def __str__(self): return \"%s - %s\" % (self.char_1, self.pk)", "self.get_absolute_url() def get_public_url(self): return '/public/view/' def set_static_url(self, request): language =", "def set_static_url(self, request): language = get_language_from_request(request) if self.pk: self.static_admin_url =", "\"%s - %s\" % (self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations", "= models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1,", "char_4 = models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True) placeholder = PlaceholderField('placeholder')", "from django.utils.encoding import python_2_unicode_compatible from cms.models.fields import PlaceholderField from cms.utils", "max_length=255) slug = models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1')", "__init__(self, *args, **kwargs): super(Example1, self).__init__(*args, **kwargs) def callable_item(self, request): return", "cms.utils.urlutils import admin_reverse from hvad.models import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance):", "related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1',", "= models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 =", "= get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1 =", "import python_2_unicode_compatible from cms.models.fields import PlaceholderField from cms.utils import get_language_from_request", "slug = models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def", "TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible", "def callable_item(self, request): return self.char_1 def __str__(self): return self.char_1 def", "models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255)", "@python_2_unicode_compatible class CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1',", "% (self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1',", "__str__(self): return self.char_1 def get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self):", "@python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255),", "reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url() def get_public_url(self): return '/public/view/'", "max_length=255) char_4 = models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True) placeholder =", "models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')", "instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 =", "char_2 = models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 =", "dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class Example1(models.Model):", "reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible from", "def __str__(self): return self.char_1 def get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,)) def", "= PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model):", "= models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url = '' def __init__(self,", "primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return \"%s -", "models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')", "return self.get_absolute_url() def get_public_url(self): return '/public/view/' def set_static_url(self, request): language", "def dynamic_url(self, request): language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))", "**kwargs): super(Example1, self).__init__(*args, **kwargs) def callable_item(self, request): return self.char_1 def", "self.pk def dynamic_url(self, request): language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk,", "PlaceholderField('placeholder') static_admin_url = '' def __init__(self, *args, **kwargs): super(Example1, self).__init__(*args,", "placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1 = models.CharField(u'char_1',", "def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1 = models.CharField(u'char_1',", "= PlaceholderField('placeholder_1') def __str__(self): return self.char_1 def get_absolute_url(self): return reverse(\"detail_multi\",", "dynamic_url(self, request): language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class", "import reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible", "hvad.models import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance):", "args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url() def get_public_url(self): return '/public/view/' def", "request): language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model):", "= models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1',", "class Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255)", "%s\" % (self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations = TranslatedFields(", "cms.models.fields import PlaceholderField from cms.utils import get_language_from_request from cms.utils.urlutils import", "= models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3',", "django.db import models from django.utils.encoding import python_2_unicode_compatible from cms.models.fields import", "language = get_language_from_request(request) if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))", "related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1 =", "dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255)", "Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3", "MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1", "get_draft_url(self): return self.get_absolute_url() def get_public_url(self): return '/public/view/' def set_static_url(self, request):", "placeholder = PlaceholderField('placeholder') static_admin_url = '' def __init__(self, *args, **kwargs):", "max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) date_field", "max_length=255) date_field = models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url = ''", "placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class", "placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return \"%s - %s\"", "PlaceholderField('placeholder_1') def __str__(self): return self.char_1 def get_absolute_url(self): return reverse(\"detail_multi\", args=(self.pk,))", "return self.pk def dynamic_url(self, request): language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field',", "self.char_1 def get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url()", "admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk def dynamic_url(self, request): language =", "from cms.utils.urlutils import admin_reverse from hvad.models import TranslatableModel, TranslatedFields def", "**kwargs) def callable_item(self, request): return self.char_1 def __str__(self): return self.char_1", "= PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255)", "char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) placeholder_1 =", "max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return \"%s", "if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk def", "max_length=255) char_2 = models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2", "= models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self):", ") placeholder_1 = PlaceholderField('placeholder_1') def __str__(self): return self.char_1 def get_absolute_url(self):", "self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk def dynamic_url(self, request):", "class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255)", "models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')", "get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url() def get_public_url(self):", "PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible class CharPksExample(models.Model): char_1", "return self.char_1 def __str__(self): return self.char_1 def get_absolute_url(self): return reverse(\"example_detail\",", "args=(self.pk, language)) return self.pk def dynamic_url(self, request): language = get_language_from_request(request)", "import PlaceholderField from cms.utils import get_language_from_request from cms.utils.urlutils import admin_reverse", "from cms.utils import get_language_from_request from cms.utils.urlutils import admin_reverse from hvad.models", "char_2 = models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 =", "django.utils.encoding import python_2_unicode_compatible from cms.models.fields import PlaceholderField from cms.utils import", "TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1') def", "models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1',", "char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) date_field =", "models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255)", "placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255)", "char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1') def __str__(self): return self.char_1", "admin_reverse from hvad.models import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1", "'' def __init__(self, *args, **kwargs): super(Example1, self).__init__(*args, **kwargs) def callable_item(self,", "def __init__(self, *args, **kwargs): super(Example1, self).__init__(*args, **kwargs) def callable_item(self, request):", "= PlaceholderField('placeholder') static_admin_url = '' def __init__(self, *args, **kwargs): super(Example1,", "import get_language_from_request from cms.utils.urlutils import admin_reverse from hvad.models import TranslatableModel,", "= models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url", "self.char_1 def __str__(self): return self.char_1 def get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,))", "request): language = get_language_from_request(request) if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk,", "*args, **kwargs): super(Example1, self).__init__(*args, **kwargs) def callable_item(self, request): return self.char_1", "= admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk def dynamic_url(self, request): language", "request): return self.char_1 def __str__(self): return self.char_1 def get_absolute_url(self): return", "max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) placeholder_1", "return \"%s - %s\" % (self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel):", "def get_public_url(self): return '/public/view/' def set_static_url(self, request): language = get_language_from_request(request)", "import admin_reverse from hvad.models import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return", "def get_draft_url(self): return self.get_absolute_url() def get_public_url(self): return '/public/view/' def set_static_url(self,", "get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1',", "return reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self): return self.get_absolute_url() def get_public_url(self): return", "max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class", "max_length=255) char_4 = models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2", "django.core.urlresolvers import reverse from django.db import models from django.utils.encoding import", "def dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class", "CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1', max_length=255, primary_key=True)", "language)) return self.pk def dynamic_url(self, request): language = get_language_from_request(request) return", "PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 =", "models from django.utils.encoding import python_2_unicode_compatible from cms.models.fields import PlaceholderField from", "= '' def __init__(self, *args, **kwargs): super(Example1, self).__init__(*args, **kwargs) def", "callable_item(self, request): return self.char_1 def __str__(self): return self.char_1 def get_absolute_url(self):", "self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) return self.pk def dynamic_url(self,", "from cms.models.fields import PlaceholderField from cms.utils import get_language_from_request from cms.utils.urlutils", "models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url =", "get_public_url(self): return '/public/view/' def set_static_url(self, request): language = get_language_from_request(request) if", "models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')", "= models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True)", "char_1 = models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1", "from hvad.models import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1 def", "char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) placeholder_1 =", "TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3", "language)) class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2',", "= models.CharField(u'char_2', max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2,", "placeholder_1 = PlaceholderField('placeholder_1') def __str__(self): return self.char_1 def get_absolute_url(self): return", "from django.db import models from django.utils.encoding import python_2_unicode_compatible from cms.models.fields", "= models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4',", "translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 =", "max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1') def __str__(self): return self.char_1 def", "return instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2", "set_static_url(self, request): language = get_language_from_request(request) if self.pk: self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field',", "return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255)", "char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1') def __str__(self):", "char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3 =", "import TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance): return", "from django.core.urlresolvers import reverse from django.db import models from django.utils.encoding", "'/public/view/' def set_static_url(self, request): language = get_language_from_request(request) if self.pk: self.static_admin_url", "TranslatableModel, TranslatedFields def dynamic_placeholder_1(instance): return instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2", "return '/public/view/' def set_static_url(self, request): language = get_language_from_request(request) if self.pk:", "return self.char_1 def get_absolute_url(self): return reverse(\"example_detail\", args=(self.pk,)) def get_draft_url(self): return", "self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2',", "python_2_unicode_compatible from cms.models.fields import PlaceholderField from cms.utils import get_language_from_request from", "max_length=255) char_2 = models.CharField(u'char_2', max_length=255) char_3 = models.CharField(u'char_3', max_length=255) char_4", "language = get_language_from_request(request) return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language)) class TwoPlaceholderExample(models.Model): char_1", "related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2',", "(self.char_1, self.pk) @python_2_unicode_compatible class MultilingualExample1(TranslatableModel): translations = TranslatedFields( char_1=models.CharField(u'char_1', max_length=255),", "= models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2',", "def __str__(self): return \"%s - %s\" % (self.char_1, self.pk) @python_2_unicode_compatible", "PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return \"%s - %s\" % (self.char_1,", "self).__init__(*args, **kwargs) def callable_item(self, request): return self.char_1 def __str__(self): return", "PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 =", "class CharPksExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) slug = models.SlugField(u'char_1', max_length=255,", "= TranslatedFields( char_1=models.CharField(u'char_1', max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1')", "models.CharField(u'char_3', max_length=255) char_4 = models.CharField(u'char_4', max_length=255) date_field = models.DateField(null=True) placeholder", "models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url = '' def __init__(self, *args,", "class TwoPlaceholderExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255)", "DynamicPlaceholderSlotExample(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2', max_length=255) placeholder_1", "models.SlugField(u'char_1', max_length=255, primary_key=True) placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1') def __str__(self): return", "placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2 = PlaceholderField('placeholder_2', related_name='p2') class DynamicPlaceholderSlotExample(models.Model):", "date_field = models.DateField(null=True) placeholder = PlaceholderField('placeholder') static_admin_url = '' def", "max_length=255), char_2=models.CharField(u'char_2', max_length=255), ) placeholder_1 = PlaceholderField('placeholder_1') def __str__(self): return", "instance.char_1 def dynamic_placeholder_2(instance): return instance.char_2 @python_2_unicode_compatible class Example1(models.Model): char_1 =", "max_length=255) placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1') placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2') @python_2_unicode_compatible", "super(Example1, self).__init__(*args, **kwargs) def callable_item(self, request): return self.char_1 def __str__(self):", "cms.utils import get_language_from_request from cms.utils.urlutils import admin_reverse from hvad.models import", "@python_2_unicode_compatible class Example1(models.Model): char_1 = models.CharField(u'char_1', max_length=255) char_2 = models.CharField(u'char_2',", "char_4 = models.CharField(u'char_4', max_length=255) placeholder_1 = PlaceholderField('placeholder_1', related_name='p1') placeholder_2 =" ]
[ "type in their postcode.Display the first two # letters in", "084 # Ask the user to type in their postcode.Display", "the user to type in their postcode.Display the first two", "Ask the user to type in their postcode.Display the first", "# 084 # Ask the user to type in their", "to type in their postcode.Display the first two # letters", "two # letters in uppercase. # very simple print(input('Enter your", "letters in uppercase. # very simple print(input('Enter your postcode: ')[0:2].upper())", "postcode.Display the first two # letters in uppercase. # very", "# letters in uppercase. # very simple print(input('Enter your postcode:", "first two # letters in uppercase. # very simple print(input('Enter", "user to type in their postcode.Display the first two #", "in their postcode.Display the first two # letters in uppercase.", "their postcode.Display the first two # letters in uppercase. #", "the first two # letters in uppercase. # very simple", "# Ask the user to type in their postcode.Display the" ]
[ "@classmethod def import_channels(cls, fname): with open(fname, 'r') as f: text", "for channel in self.channels: encoder.set('forUsername', channel) data = self.get_data(encoder.get()) ret[channel]", "def get_playlist_id(self, data): items = data.get('items') content = items[0].get('contentDetails') playlists", "title, published_at, description in sorted(data, key=lambda x: x[2]): f.write('{}: {}", "FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels): self.encoder = self.build_encoder(API_KEY)", "generate_data(self): encoder = self.encoder ret = {} for channel in", "self.channels = channels def run(self): data = self.generate_data() self.save(data) def", "= '?' + self._parms() if len(self.args) else '' return self.API_URL", "items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return playlists.get('uploads') def save(self, data): with", "open(fname, 'r') as f: text = f.read() f.close() return json.loads(text)", "api_key) encoder.set('part', 'contentDetails') return encoder class ApiPlayList(object): URL = BASE_URL", "return encoder class ApiPlayList(object): URL = BASE_URL + API_PLAYLIST FILE_NAME", "ApiChannel(CHANNELS) channel.run() if '-playlist' in args: channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME) play_list", "api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part',", "build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key', api_key)", "'' return self.API_URL + parms def set(self, key, value): if", "f.close() def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder()", "encoder.set('key', api_key) encoder.set('part', 'contentDetails') return encoder class ApiPlayList(object): URL =", "= urllib2.urlopen(url) data = url.read() return json.loads(data) def get_playlist_id(self, data):", "f.read() f.close() return json.loads(text) if __name__ == '__main__': args =", "encoder @classmethod def import_channels(cls, fname): with open(fname, 'r') as f:", "with codecs.open(fname, 'w', encoding='utf-8') as f: for key, title, published_at,", "'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object): API_URL =", "return self.API_URL + parms def set(self, key, value): if value:", "data): fname = os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8') as", "save(self, data): fname = os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8')", "def run(self): data = self.generate_data() self.save(data) def generate_data(self): encoder =", "if '-channel' in args: channel = ApiChannel(CHANNELS) channel.run() if '-playlist'", "import sys import json import urllib2 import codecs BASE_DIR =", "'-channel' in args: channel = ApiChannel(CHANNELS) channel.run() if '-playlist' in", "generate_data(self): encoder = self.encoder channels = self.channels ret = []", "BASE_URL + API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels):", "def get(self): parms = '?' + self._parms() if len(self.args) else", "+ API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels): self.encoder", "description] def save(self, data): fname = os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname,", "data = url.read() return json.loads(data) def get_playlist_id(self, data): items =", "get_info(self, data): items = data.get('items') snippet = items[0].get('snippet') title =", "json.loads(data) def get_playlist_id(self, data): items = data.get('items') content = items[0].get('contentDetails')", "len(self.args) else '' return self.API_URL + parms def set(self, key,", "codecs.open(fname, 'w', encoding='utf-8') as f: for key, title, published_at, description", "codecs BASE_DIR = os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels'", "'?' + self._parms() if len(self.args) else '' return self.API_URL +", "data = self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder channels", "os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels): self.encoder = self.build_encoder(API_KEY) self.channels =", "= self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder channels =", "args.append(k + '=' + str(v)) return '&'.join(args) def get(self): parms", "if value: self.args[key] = value class ApiChannel(object): URL = BASE_URL", "self.args = kwargs def _parms(self): args = [] for k,", "'playlist.txt') def __init__(self, channels): self.channels = channels self.encoder = self.build_encoder(API_KEY)", "ret = {} for channel in self.channels: encoder.set('forUsername', channel) data", "+ self.get_info(data)] return ret def get_info(self, data): items = data.get('items')", "items = data.get('items') snippet = items[0].get('snippet') title = snippet.get('title') published_at", "= url.read() return json.loads(data) def get_playlist_id(self, data): items = data.get('items')", "{} - {}\\n'.format(published_at[:10], key, title)) f.close() def get_data(self, url): url", "url = urllib2.urlopen(url) data = url.read() return json.loads(data) def get_playlist_id(self,", "x: x[2]): f.write('{}: {} - {}\\n'.format(published_at[:10], key, title)) f.close() def", "import codecs BASE_DIR = os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS =", "from __future__ import unicode_literals import os import sys import json", "'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object): API_URL = '' def", "self.args[key] = value class ApiChannel(object): URL = BASE_URL + API_CHANNELS", "= channels self.encoder = self.build_encoder(API_KEY) def run(self): data = self.generate_data()", "self.save(data) def generate_data(self): encoder = self.encoder channels = self.channels ret", "in channels: encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get()) ret += [[key]", "= snippet.get('description') return [title, published_at, description] def save(self, data): fname", "-*- from __future__ import unicode_literals import os import sys import", "+ '=' + str(v)) return '&'.join(args) def get(self): parms =", "= os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels): self.encoder = self.build_encoder(API_KEY) self.channels", "f.write('{}: {} - {}\\n'.format(published_at[:10], key, title)) f.close() def get_data(self, url):", "channels = self.channels ret = [] for key in channels:", "FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels): self.channels = channels", "'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object): API_URL", "with open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data)) f.close() def build_encoder(self, api_key):", "= [] for k, v in self.args.items(): args.append(k + '='", "channels self.encoder = self.build_encoder(API_KEY) def run(self): data = self.generate_data() self.save(data)", "return [title, published_at, description] def save(self, data): fname = os.path.join(BASE_DIR,", "= f.read() f.close() return json.loads(text) if __name__ == '__main__': args", "self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder channels = self.channels", "os import sys import json import urllib2 import codecs BASE_DIR", "self._parms() if len(self.args) else '' return self.API_URL + parms def", "self.API_URL + parms def set(self, key, value): if value: self.args[key]", "= self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults',", "= ApiChannel(CHANNELS) channel.run() if '-playlist' in args: channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME)", "'r') as f: text = f.read() f.close() return json.loads(text) if", "text = f.read() f.close() return json.loads(text) if __name__ == '__main__':", "os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels): self.channels = channels self.encoder =", "content.get('relatedPlaylists') return playlists.get('uploads') def save(self, data): with open(self.FILE_NAME, 'w') as", "f: for key, title, published_at, description in sorted(data, key=lambda x:", "<filename>api_youtube.py # -*- coding: utf-8 -*- from __future__ import unicode_literals", "title)) f.close() def get_data(self, url): url = urllib2.urlopen(url) data =", "**kwargs): self.args = kwargs def _parms(self): args = [] for", "self.get_info(data)] return ret def get_info(self, data): items = data.get('items') snippet", "ret def get_info(self, data): items = data.get('items') snippet = items[0].get('snippet')", "fname = os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8') as f:", "f: f.write(json.dumps(data)) f.close() def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder", "encoder.set('part', 'snippet') encoder.set('maxResults', '1') return encoder @classmethod def import_channels(cls, fname):", "'channels' API_PLAYLIST = 'playlistItems' API_KEY = 'YOUR KEY' CHANNELS =", "def get_data(self, url): url = urllib2.urlopen(url) data = url.read() return", "def __init__(self, channels): self.encoder = self.build_encoder(API_KEY) self.channels = channels def", "published_at, description in sorted(data, key=lambda x: x[2]): f.write('{}: {} -", "json.loads(text) if __name__ == '__main__': args = sys.argv[1:] if '-channel'", "api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1') return encoder @classmethod def import_channels(cls,", "ret def get_data(self, url): url = urllib2.urlopen(url) data = url.read()", "as f: for key, title, published_at, description in sorted(data, key=lambda", "def __init__(self, **kwargs): self.args = kwargs def _parms(self): args =", "'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object): API_URL = '' def __init__(self,", "published_at, description] def save(self, data): fname = os.path.join(BASE_DIR, 'last_update.txt') with", "'YOUR KEY' CHANNELS = [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics',", "get_playlist_id(self, data): items = data.get('items') content = items[0].get('contentDetails') playlists =", "= 'playlistItems' API_KEY = 'YOUR KEY' CHANNELS = [ 'videosimprovaveis',", "data = self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return ret def get_data(self,", "k, v in self.args.items(): args.append(k + '=' + str(v)) return", "[title, published_at, description] def save(self, data): fname = os.path.join(BASE_DIR, 'last_update.txt')", "url.read() return json.loads(data) def get_playlist_id(self, data): items = data.get('items') content", "def get_info(self, data): items = data.get('items') snippet = items[0].get('snippet') title", "json import urllib2 import codecs BASE_DIR = os.path.dirname(__file__) BASE_URL =", "API_KEY = 'YOUR KEY' CHANNELS = [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt',", "= self.encoder ret = {} for channel in self.channels: encoder.set('forUsername',", "= items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return playlists.get('uploads') def save(self, data):", "def generate_data(self): encoder = self.encoder ret = {} for channel", "encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1') return encoder @classmethod def", "for k, v in self.args.items(): args.append(k + '=' + str(v))", "if len(self.args) else '' return self.API_URL + parms def set(self,", "= os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels): self.channels = channels self.encoder", "parms def set(self, key, value): if value: self.args[key] = value", "def import_channels(cls, fname): with open(fname, 'r') as f: text =", "unicode_literals import os import sys import json import urllib2 import", "in sorted(data, key=lambda x: x[2]): f.write('{}: {} - {}\\n'.format(published_at[:10], key,", "UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails') return encoder class ApiPlayList(object): URL", "-*- coding: utf-8 -*- from __future__ import unicode_literals import os", "value: self.args[key] = value class ApiChannel(object): URL = BASE_URL +", "return playlists.get('uploads') def save(self, data): with open(self.FILE_NAME, 'w') as f:", "channels): self.channels = channels self.encoder = self.build_encoder(API_KEY) def run(self): data", "] class UrlEncoder(object): API_URL = '' def __init__(self, **kwargs): self.args", "encoder.set('forUsername', channel) data = self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return ret", "coding: utf-8 -*- from __future__ import unicode_literals import os import", "return json.loads(text) if __name__ == '__main__': args = sys.argv[1:] if", "+ API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels): self.channels", "def set(self, key, value): if value: self.args[key] = value class", "= self.encoder channels = self.channels ret = [] for key", "data): items = data.get('items') content = items[0].get('contentDetails') playlists = content.get('relatedPlaylists')", "encoder class ApiPlayList(object): URL = BASE_URL + API_PLAYLIST FILE_NAME =", "def save(self, data): with open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data)) f.close()", "set(self, key, value): if value: self.args[key] = value class ApiChannel(object):", "API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels): self.channels =", "as f: text = f.read() f.close() return json.loads(text) if __name__", "value class ApiChannel(object): URL = BASE_URL + API_CHANNELS FILE_NAME =", "if '-playlist' in args: channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME) play_list = ApiPlayList(channels)", "str(v)) return '&'.join(args) def get(self): parms = '?' + self._parms()", "{}\\n'.format(published_at[:10], key, title)) f.close() def get_data(self, url): url = urllib2.urlopen(url)", "self.channels = channels self.encoder = self.build_encoder(API_KEY) def run(self): data =", "= self.build_encoder(API_KEY) self.channels = channels def run(self): data = self.generate_data()", "value): if value: self.args[key] = value class ApiChannel(object): URL =", "= value class ApiChannel(object): URL = BASE_URL + API_CHANNELS FILE_NAME", "= {} for channel in self.channels: encoder.set('forUsername', channel) data =", "_parms(self): args = [] for k, v in self.args.items(): args.append(k", "f.write(json.dumps(data)) f.close() def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder =", "KEY' CHANNELS = [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal',", "in self.channels: encoder.set('forUsername', channel) data = self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data)", "self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1')", "API_PLAYLIST = 'playlistItems' API_KEY = 'YOUR KEY' CHANNELS = [", "return json.loads(data) def get_playlist_id(self, data): items = data.get('items') content =", "save(self, data): with open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data)) f.close() def", "self.encoder = self.build_encoder(API_KEY) self.channels = channels def run(self): data =", "BASE_DIR = os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST", "= 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST = 'playlistItems' API_KEY =", "+ str(v)) return '&'.join(args) def get(self): parms = '?' +", "as f: f.write(json.dumps(data)) f.close() def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL", "= 'channels' API_PLAYLIST = 'playlistItems' API_KEY = 'YOUR KEY' CHANNELS", "urllib2 import codecs BASE_DIR = os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS", "snippet.get('title') published_at = snippet.get('publishedAt') description = snippet.get('description') return [title, published_at,", "'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8') as f: for key, title,", "encoder = self.encoder channels = self.channels ret = [] for", "ApiChannel(object): URL = BASE_URL + API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json')", "data.get('items') snippet = items[0].get('snippet') title = snippet.get('title') published_at = snippet.get('publishedAt')", "playlists = content.get('relatedPlaylists') return playlists.get('uploads') def save(self, data): with open(self.FILE_NAME,", "__name__ == '__main__': args = sys.argv[1:] if '-channel' in args:", "return encoder @classmethod def import_channels(cls, fname): with open(fname, 'r') as", "'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class", "encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails') return encoder class", "snippet.get('description') return [title, published_at, description] def save(self, data): fname =", "= BASE_URL + API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def __init__(self,", "self.build_encoder(API_KEY) self.channels = channels def run(self): data = self.generate_data() self.save(data)", "items = data.get('items') content = items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return", "def __init__(self, channels): self.channels = channels self.encoder = self.build_encoder(API_KEY) def", "data): items = data.get('items') snippet = items[0].get('snippet') title = snippet.get('title')", "class ApiPlayList(object): URL = BASE_URL + API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR,", "self.encoder channels = self.channels ret = [] for key in", "url): url = urllib2.urlopen(url) data = url.read() return json.loads(data) def", "+ self._parms() if len(self.args) else '' return self.API_URL + parms", "ApiPlayList(object): URL = BASE_URL + API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt')", "self.args.items(): args.append(k + '=' + str(v)) return '&'.join(args) def get(self):", "return '&'.join(args) def get(self): parms = '?' + self._parms() if", "kwargs def _parms(self): args = [] for k, v in", "f.close() return json.loads(text) if __name__ == '__main__': args = sys.argv[1:]", "playlists.get('uploads') def save(self, data): with open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data))", "'&'.join(args) def get(self): parms = '?' + self._parms() if len(self.args)", "'1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object):", "published_at = snippet.get('publishedAt') description = snippet.get('description') return [title, published_at, description]", "== '__main__': args = sys.argv[1:] if '-channel' in args: channel", "'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ]", "encoder.set('maxResults', '1') return encoder @classmethod def import_channels(cls, fname): with open(fname,", "sorted(data, key=lambda x: x[2]): f.write('{}: {} - {}\\n'.format(published_at[:10], key, title))", "encoder.set('part', 'contentDetails') return encoder class ApiPlayList(object): URL = BASE_URL +", "run(self): data = self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder", "encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1') return", "import_channels(cls, fname): with open(fname, 'r') as f: text = f.read()", "get_data(self, url): url = urllib2.urlopen(url) data = url.read() return json.loads(data)", "json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder()", "channel) data = self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return ret def", "'contentDetails') return encoder class ApiPlayList(object): URL = BASE_URL + API_PLAYLIST", "CHANNELS = [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow',", "BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST = 'playlistItems' API_KEY", "= os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST =", "__future__ import unicode_literals import os import sys import json import", "ret = [] for key in channels: encoder.set('playlistId', channels[key]) data", "self.encoder = self.build_encoder(API_KEY) def run(self): data = self.generate_data() self.save(data) def", "'__main__': args = sys.argv[1:] if '-channel' in args: channel =", "x[2]): f.write('{}: {} - {}\\n'.format(published_at[:10], key, title)) f.close() def get_data(self,", "channel in self.channels: encoder.set('forUsername', channel) data = self.get_data(encoder.get()) ret[channel] =", "for key in channels: encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get()) ret", "self.channels ret = [] for key in channels: encoder.set('playlistId', channels[key])", "'channels.json') def __init__(self, channels): self.encoder = self.build_encoder(API_KEY) self.channels = channels", "= data.get('items') snippet = items[0].get('snippet') title = snippet.get('title') published_at =", "in self.args.items(): args.append(k + '=' + str(v)) return '&'.join(args) def", "= self.channels ret = [] for key in channels: encoder.set('playlistId',", "[] for k, v in self.args.items(): args.append(k + '=' +", "BASE_URL + API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels):", "UrlEncoder(object): API_URL = '' def __init__(self, **kwargs): self.args = kwargs", "sys import json import urllib2 import codecs BASE_DIR = os.path.dirname(__file__)", "if __name__ == '__main__': args = sys.argv[1:] if '-channel' in", "import unicode_literals import os import sys import json import urllib2", "# -*- coding: utf-8 -*- from __future__ import unicode_literals import", "args = sys.argv[1:] if '-channel' in args: channel = ApiChannel(CHANNELS)", "def _parms(self): args = [] for k, v in self.args.items():", "f: text = f.read() f.close() return json.loads(text) if __name__ ==", "description in sorted(data, key=lambda x: x[2]): f.write('{}: {} - {}\\n'.format(published_at[:10],", "url = urllib2.urlopen(url) data = url.read() return json.loads(data) def build_encoder(self,", "description = snippet.get('description') return [title, published_at, description] def save(self, data):", "encoder = self.encoder ret = {} for channel in self.channels:", "def generate_data(self): encoder = self.encoder channels = self.channels ret =", "= 'YOUR KEY' CHANNELS = [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium',", "'playlistItems' API_KEY = 'YOUR KEY' CHANNELS = [ 'videosimprovaveis', 'nerdologia',", "parms = '?' + self._parms() if len(self.args) else '' return", "API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels): self.encoder =", "'-playlist' in args: channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME) play_list = ApiPlayList(channels) play_list.run()", "+= [[key] + self.get_info(data)] return ret def get_info(self, data): items", "= self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder ret =", "os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8') as f: for key,", "UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet')", "= '' def __init__(self, **kwargs): self.args = kwargs def _parms(self):", "'snippet') encoder.set('maxResults', '1') return encoder @classmethod def import_channels(cls, fname): with", "items[0].get('snippet') title = snippet.get('title') published_at = snippet.get('publishedAt') description = snippet.get('description')", "'w') as f: f.write(json.dumps(data)) f.close() def build_encoder(self, api_key): UrlEncoder.API_URL =", "key, value): if value: self.args[key] = value class ApiChannel(object): URL", "= UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1') return encoder", "return json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder =", "'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia',", "= snippet.get('title') published_at = snippet.get('publishedAt') description = snippet.get('description') return [title,", "channels): self.encoder = self.build_encoder(API_KEY) self.channels = channels def run(self): data", "f.close() def get_data(self, url): url = urllib2.urlopen(url) data = url.read()", "for key, title, published_at, description in sorted(data, key=lambda x: x[2]):", "key, title, published_at, description in sorted(data, key=lambda x: x[2]): f.write('{}:", "self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder ret = {}", "key=lambda x: x[2]): f.write('{}: {} - {}\\n'.format(published_at[:10], key, title)) f.close()", "channels: encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get()) ret += [[key] +", "self.build_encoder(API_KEY) def run(self): data = self.generate_data() self.save(data) def generate_data(self): encoder", "self.get_playlist_id(data) return ret def get_data(self, url): url = urllib2.urlopen(url) data", "API_CHANNELS = 'channels' API_PLAYLIST = 'playlistItems' API_KEY = 'YOUR KEY'", "os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST = 'playlistItems'", "self.encoder ret = {} for channel in self.channels: encoder.set('forUsername', channel)", "= UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails') return encoder class ApiPlayList(object):", "[ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo',", "= self.build_encoder(API_KEY) def run(self): data = self.generate_data() self.save(data) def generate_data(self):", "[[key] + self.get_info(data)] return ret def get_info(self, data): items =", "= self.get_data(encoder.get()) ret += [[key] + self.get_info(data)] return ret def", "= urllib2.urlopen(url) data = url.read() return json.loads(data) def build_encoder(self, api_key):", "import urllib2 import codecs BASE_DIR = os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/'", "= kwargs def _parms(self): args = [] for k, v", "__init__(self, channels): self.encoder = self.build_encoder(API_KEY) self.channels = channels def run(self):", "return ret def get_info(self, data): items = data.get('items') snippet =", "import os import sys import json import urllib2 import codecs", "url.read() return json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder", "key in channels: encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get()) ret +=", "'=' + str(v)) return '&'.join(args) def get(self): parms = '?'", "'' def __init__(self, **kwargs): self.args = kwargs def _parms(self): args", "args: channel = ApiChannel(CHANNELS) channel.run() if '-playlist' in args: channels", "API_URL = '' def __init__(self, **kwargs): self.args = kwargs def", "'w', encoding='utf-8') as f: for key, title, published_at, description in", "snippet = items[0].get('snippet') title = snippet.get('title') published_at = snippet.get('publishedAt') description", "= sys.argv[1:] if '-channel' in args: channel = ApiChannel(CHANNELS) channel.run()", "= self.get_playlist_id(data) return ret def get_data(self, url): url = urllib2.urlopen(url)", "data.get('items') content = items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return playlists.get('uploads') def", "def save(self, data): fname = os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w',", "encoding='utf-8') as f: for key, title, published_at, description in sorted(data,", "'1') return encoder @classmethod def import_channels(cls, fname): with open(fname, 'r')", "= channels def run(self): data = self.generate_data() self.save(data) def generate_data(self):", "class UrlEncoder(object): API_URL = '' def __init__(self, **kwargs): self.args =", "ret += [[key] + self.get_info(data)] return ret def get_info(self, data):", "urllib2.urlopen(url) data = url.read() return json.loads(data) def get_playlist_id(self, data): items", "self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return ret def get_data(self, url): url", "+ parms def set(self, key, value): if value: self.args[key] =", "self.channels: encoder.set('forUsername', channel) data = self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return", "open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data)) f.close() def build_encoder(self, api_key): UrlEncoder.API_URL", "key, title)) f.close() def get_data(self, url): url = urllib2.urlopen(url) data", "= data.get('items') content = items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return playlists.get('uploads')", "'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object): API_URL = ''", "data = url.read() return json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL =", "self.get_data(encoder.get()) ret += [[key] + self.get_info(data)] return ret def get_info(self,", "utf-8 -*- from __future__ import unicode_literals import os import sys", "self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails') return encoder", "def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key',", "URL = BASE_URL + API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def", "= [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce',", "UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails')", "= BASE_URL + API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def __init__(self,", "with open(fname, 'r') as f: text = f.read() f.close() return", "= self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails') return", "title = snippet.get('title') published_at = snippet.get('publishedAt') description = snippet.get('description') return", "get(self): parms = '?' + self._parms() if len(self.args) else ''", "args = [] for k, v in self.args.items(): args.append(k +", "= [] for key in channels: encoder.set('playlistId', channels[key]) data =", "data = self.get_data(encoder.get()) ret += [[key] + self.get_info(data)] return ret", "__init__(self, **kwargs): self.args = kwargs def _parms(self): args = []", "[] for key in channels: encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get())", "UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1') return encoder @classmethod", "in args: channel = ApiChannel(CHANNELS) channel.run() if '-playlist' in args:", "fname): with open(fname, 'r') as f: text = f.read() f.close()", "__init__(self, channels): self.channels = channels self.encoder = self.build_encoder(API_KEY) def run(self):", "channels[key]) data = self.get_data(encoder.get()) ret += [[key] + self.get_info(data)] return", "encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get()) ret += [[key] + self.get_info(data)]", "urllib2.urlopen(url) data = url.read() return json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL", "channel.run() if '-playlist' in args: channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME) play_list =", "sys.argv[1:] if '-channel' in args: channel = ApiChannel(CHANNELS) channel.run() if", "return ret def get_data(self, url): url = urllib2.urlopen(url) data =", "= content.get('relatedPlaylists') return playlists.get('uploads') def save(self, data): with open(self.FILE_NAME, 'w')", "= items[0].get('snippet') title = snippet.get('title') published_at = snippet.get('publishedAt') description =", "channels def run(self): data = self.generate_data() self.save(data) def generate_data(self): encoder", "'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST = 'playlistItems' API_KEY = 'YOUR", "data = self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder ret", "{} for channel in self.channels: encoder.set('forUsername', channel) data = self.get_data(encoder.get())", "channel = ApiChannel(CHANNELS) channel.run() if '-playlist' in args: channels =", "= self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return ret def get_data(self, url):", "content = items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return playlists.get('uploads') def save(self,", "URL = BASE_URL + API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def", "else '' return self.API_URL + parms def set(self, key, value):", "import json import urllib2 import codecs BASE_DIR = os.path.dirname(__file__) BASE_URL", "= snippet.get('publishedAt') description = snippet.get('description') return [title, published_at, description] def", "data): with open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data)) f.close() def build_encoder(self,", "= os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8') as f: for", "v in self.args.items(): args.append(k + '=' + str(v)) return '&'.join(args)", "ret[channel] = self.get_playlist_id(data) return ret def get_data(self, url): url =", "'CienciaTodoDia', ] class UrlEncoder(object): API_URL = '' def __init__(self, **kwargs):", "self.save(data) def generate_data(self): encoder = self.encoder ret = {} for", "- {}\\n'.format(published_at[:10], key, title)) f.close() def get_data(self, url): url =", "class ApiChannel(object): URL = BASE_URL + API_CHANNELS FILE_NAME = os.path.join(BASE_DIR,", "= url.read() return json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL", "snippet.get('publishedAt') description = snippet.get('description') return [title, published_at, description] def save(self," ]
[ "print(\"F1: \",classifier.f1(y_pred, y)) def predict_on_trace(trace, A = 0.9): classifier =", "= (np.array(y_pred) == 0) * (np.array(y_ref) == 1) return tp", "def train_on_logs(*filenames, is_miner): classifier = model() #classifier.load() x, y =", "* len(x) return x,y def train_on_logs(*filenames, is_miner): classifier = model()", "'model.p'): f = open(filename, 'wb') pickle.dump(self.d, f) f.close() def fit(self,", "len(res): raise Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x", "is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x = ngrams(a) y = [1", "f: l = eval(''.join(f)) codes = [] for i in", "y)) def predict_on_trace(trace, A = 0.9): classifier = model() classifier.load()", "no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y)", "0,] * len(x) return x,y def train_on_logs(*filenames, is_miner): classifier =", "#classifier.load() x, y = [], [] for id, filename in", "filename in enumerate(filenames): l = [] with open(filename, 'r') as", "* (np.array(y_ref) == 1) return tp / (tp + (fp", "fn = (np.array(y_pred) == 0) * (np.array(y_ref) == 1) return", "+ fn) / 2) def ngrams(array, size = 25, overlacing", "i in l: if i[0] not in no: codes.append(i[1]) x_,", "print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner): classifier = model() classifier.load()", "(np.array(y_ref) == 0) fp = (np.array(y_pred) == 1) * (np.array(y_ref)", "codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred =", "b'apt',b'dpkg'] class model: def __init__(self): self.d = DecisionTreeClassifier() def load(self,", "is_miner): classifier = model() #classifier.load() x, y = [], []", "predict_on_trace(trace, A = 0.9): classifier = model() classifier.load() x, y", "= open(filename, 'rb') self.d = pickle.load(f) if type(self.d) != DecisionTreeClassifier:", "= None f.close() except: return def save(self, filename = 'model.p'):", "y = [1 if is_miner else 0,] * len(x) return", "= [1 if is_miner else 0,] * len(x) return x,y", "== np.array(y_ref)) / len(y_ref) def f1(self, y_pred, y_ref): tp =", "as np no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model: def", "[], [] for id, filename in enumerate(filenames): l = []", "self.d.fit(x, y) def predict(self, x): return self.d.predict(x) def accuracy(self, y_pred,", "DecisionTreeClassifier() def load(self, filename = 'model.p'): try: f = open(filename,", "__init__(self): self.d = DecisionTreeClassifier() def load(self, filename = 'model.p'): try:", "np no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model: def __init__(self):", "+ (fp + fn) / 2) def ngrams(array, size =", "sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref) def f1(self, y_pred, y_ref): tp", "== size for i in res]) != len(res): raise Exception('wtf')", "open(filename, 'r') as f: l = eval(''.join(f)) codes = []", "= open(filename, 'wb') pickle.dump(self.d, f) f.close() def fit(self, x, y):", "= train_test_split(x,y,0.05) x = ngrams(a) y = [1 if is_miner", "else 0,] * len(x) return x,y def train_on_logs(*filenames, is_miner): classifier", "np.array(y_ref)) / len(y_ref) def f1(self, y_pred, y_ref): tp = (np.array(y_pred)", "d = None f.close() except: return def save(self, filename =", "== 1) * (np.array(y_ref) == 1) tn = (np.array(y_pred) ==", "1 if overlacing else size)] if sum([len(i) == size for", "in res]) != len(res): raise Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2", "x.append(x_) y.append(y_) y_pred = classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred, y)) print(\"F1:", "!= len(res): raise Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05)", "(tp + (fp + fn) / 2) def ngrams(array, size", "= DecisionTreeClassifier() def load(self, filename = 'model.p'): try: f =", "open(filename, 'rb') self.d = pickle.load(f) if type(self.d) != DecisionTreeClassifier: d", "eval(''.join(f)) codes = [] for i in l: if i[0]", "= [] for i in l: if i[0] not in", "import pickle import numpy as np no = [b'runc:[2:INIT]', b'containerssh-ag',", "== 0) * (np.array(y_ref) == 1) return tp / (tp", "for i in l: if i[0] not in no: codes.append(i[1])", "if type(self.d) != DecisionTreeClassifier: d = None f.close() except: return", "= [] with open(filename, 'r') as f: l = eval(''.join(f))", "len(y_ref) def f1(self, y_pred, y_ref): tp = (np.array(y_pred) == 1)", "range(0, len(array)//size * size, 1 if overlacing else size)] if", "def ngrams(array, size = 25, overlacing = False): return [array[i:i+size]", "= [], [] for id, filename in enumerate(filenames): codes =", "size)] res = [array[i:i+size] for i in range(0, len(array)//size *", "no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred", "/ (tp + (fp + fn) / 2) def ngrams(array,", "train_on_logs(*filenames, is_miner): classifier = model() #classifier.load() x, y = [],", "return tp / (tp + (fp + fn) / 2)", "= 0.9): classifier = model() classifier.load() x, y = [],", "fit(self, x, y): self.d.fit(x, y) def predict(self, x): return self.d.predict(x)", "x, y): self.d.fit(x, y) def predict(self, x): return self.d.predict(x) def", "None f.close() except: return def save(self, filename = 'model.p'): f", "== 1) return tp / (tp + (fp + fn)", "* size, 1 if overlacing else size)] if sum([len(i) ==", "[] for id, filename in enumerate(filenames): l = [] with", "class model: def __init__(self): self.d = DecisionTreeClassifier() def load(self, filename", "2) def ngrams(array, size = 25, overlacing = False): return", "!= DecisionTreeClassifier: d = None f.close() except: return def save(self,", "classifier = model() #classifier.load() x, y = [], [] for", "y): self.d.fit(x, y) def predict(self, x): return self.d.predict(x) def accuracy(self,", "[array[i:i+size] for i in range(0, len(array)//size * size, 1 if", "gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x = ngrams(a) y =", "model() #classifier.load() x, y = [], [] for id, filename", "[], [] for id, filename in enumerate(filenames): codes = []", "for i in trace: if i[0] not in no: codes.append(i[1])", "i in res]) != len(res): raise Exception('wtf') def gen_train(a, is_miner):", "len(array)//size * size, 1 if overlacing else size)] if sum([len(i)", "[b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model: def __init__(self): self.d = DecisionTreeClassifier()", "return def save(self, filename = 'model.p'): f = open(filename, 'wb')", "= classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred, y)) def", "size for i in res]) != len(res): raise Exception('wtf') def", "classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred, y)) def predict_on_trace(trace, A = 0.9):", "type(self.d) != DecisionTreeClassifier: d = None f.close() except: return def", "tp / (tp + (fp + fn) / 2) def", "= False): return [array[i:i+size] for i in range(0, len(array)//size *", "filename = 'model.p'): try: f = open(filename, 'rb') self.d =", "== 1) * (np.array(y_ref) == 0) fn = (np.array(y_pred) ==", "== 0) * (np.array(y_ref) == 0) fp = (np.array(y_pred) ==", "def f1(self, y_pred, y_ref): tp = (np.array(y_pred) == 1) *", "'wb') pickle.dump(self.d, f) f.close() def fit(self, x, y): self.d.fit(x, y)", "/ 2) def ngrams(array, size = 25, overlacing = False):", "if overlacing else size)] if sum([len(i) == size for i", "not in no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_)", "size, 1 if overlacing else size)] if sum([len(i) == size", "is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) acc = sum(np.array(y_pred)) /", "1) tn = (np.array(y_pred) == 0) * (np.array(y_ref) == 0)", "== 0) fn = (np.array(y_pred) == 0) * (np.array(y_ref) ==", "classifier = model() classifier.load() x, y = [], [] for", "y_ref): return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref) def f1(self, y_pred,", "0) fp = (np.array(y_pred) == 1) * (np.array(y_ref) == 0)", "open(filename, 'wb') pickle.dump(self.d, f) f.close() def fit(self, x, y): self.d.fit(x,", "def save(self, filename = 'model.p'): f = open(filename, 'wb') pickle.dump(self.d,", "#classifier.save() def predict_on_logs(*filenames, is_miner): classifier = model() classifier.load() x, y", "ngrams(a) y = [1 if is_miner else 0,] * len(x)", "gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) acc = sum(np.array(y_pred))", "for id, filename in enumerate(filenames): codes = [] for i", "res = [array[i:i+size] for i in range(0, len(array)//size * size,", "size, 1 if overlacing else size)] res = [array[i:i+size] for", "Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x = ngrams(a)", "id, filename in enumerate(filenames): l = [] with open(filename, 'r')", "False): return [array[i:i+size] for i in range(0, len(array)//size * size,", "trace: if i[0] not in no: codes.append(i[1]) x_, y_ =", "classifier.predict(x) acc = sum(np.array(y_pred)) / len(y_pred) return acc > A", "sklearn.tree import DecisionTreeClassifier import pickle import numpy as np no", "in range(0, len(array)//size * size, 1 if overlacing else size)]", "'rb') self.d = pickle.load(f) if type(self.d) != DecisionTreeClassifier: d =", "return x,y def train_on_logs(*filenames, is_miner): classifier = model() #classifier.load() x,", "= (np.array(y_pred) == 1) * (np.array(y_ref) == 0) fn =", "codes = [] for i in l: if i[0] not", "= ngrams(a) y = [1 if is_miner else 0,] *", "1) * (np.array(y_ref) == 0) fn = (np.array(y_pred) == 0)", "return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref) def f1(self, y_pred, y_ref):", "def load(self, filename = 'model.p'): try: f = open(filename, 'rb')", "python3 from sklearn.tree import DecisionTreeClassifier import pickle import numpy as", "else size)] res = [array[i:i+size] for i in range(0, len(array)//size", "accuracy(self, y_pred, y_ref): return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref) def", "train_test_split(x,y,0.05) x = ngrams(a) y = [1 if is_miner else", "load(self, filename = 'model.p'): try: f = open(filename, 'rb') self.d", "y_pred = classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred, y))", "if is_miner else 0,] * len(x) return x,y def train_on_logs(*filenames,", "* (np.array(y_ref) == 0) fn = (np.array(y_pred) == 0) *", "0.9): classifier = model() classifier.load() x, y = [], []", "self.d = DecisionTreeClassifier() def load(self, filename = 'model.p'): try: f", "== 1) tn = (np.array(y_pred) == 0) * (np.array(y_ref) ==", "= (np.array(y_pred) == 0) * (np.array(y_ref) == 0) fp =", "y_pred, y_ref): tp = (np.array(y_pred) == 1) * (np.array(y_ref) ==", "as f: l = eval(''.join(f)) codes = [] for i", "classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred, y)) def predict_on_trace(trace,", "[] for i in l: if i[0] not in no:", "DecisionTreeClassifier import pickle import numpy as np no = [b'runc:[2:INIT]',", "classifier.load() x, y = [], [] for id, filename in", "y)) print(\"F1: \",classifier.f1(y_pred, y)) def predict_on_trace(trace, A = 0.9): classifier", "= 'model.p'): f = open(filename, 'wb') pickle.dump(self.d, f) f.close() def", "* (np.array(y_ref) == 0) fp = (np.array(y_pred) == 1) *", "== 0) fp = (np.array(y_pred) == 1) * (np.array(y_ref) ==", "overlacing else size)] res = [array[i:i+size] for i in range(0,", "= model() #classifier.load() x, y = [], [] for id,", "* size, 1 if overlacing else size)] res = [array[i:i+size]", "0) * (np.array(y_ref) == 0) fp = (np.array(y_pred) == 1)", "codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y)", "= 'model.p'): try: f = open(filename, 'rb') self.d = pickle.load(f)", "import numpy as np no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class", "fp = (np.array(y_pred) == 1) * (np.array(y_ref) == 0) fn", "(np.array(y_ref) == 1) return tp / (tp + (fp +", "enumerate(filenames): l = [] with open(filename, 'r') as f: l", "return [array[i:i+size] for i in range(0, len(array)//size * size, 1", "is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner): classifier", "res]) != len(res): raise Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2 =", "= gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) print(\"Accuracy: \",", "#classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner): classifier = model() classifier.load() x,", "x, y = [], [] for id, filename in enumerate(filenames):", "(np.array(y_ref) == 1) tn = (np.array(y_pred) == 0) * (np.array(y_ref)", "pickle import numpy as np no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg']", "y.append(y_) y_pred = classifier.predict(x) acc = sum(np.array(y_pred)) / len(y_pred) return", "return self.d.predict(x) def accuracy(self, y_pred, y_ref): return sum(np.array(y_pred) == np.array(y_ref))", "import DecisionTreeClassifier import pickle import numpy as np no =", "fn) / 2) def ngrams(array, size = 25, overlacing =", "overlacing else size)] if sum([len(i) == size for i in", "f = open(filename, 'rb') self.d = pickle.load(f) if type(self.d) !=", "0) * (np.array(y_ref) == 1) return tp / (tp +", "self.d = pickle.load(f) if type(self.d) != DecisionTreeClassifier: d = None", "if overlacing else size)] res = [array[i:i+size] for i in", "= 25, overlacing = False): return [array[i:i+size] for i in", "with open(filename, 'r') as f: l = eval(''.join(f)) codes =", "= (np.array(y_pred) == 1) * (np.array(y_ref) == 1) tn =", "A = 0.9): classifier = model() classifier.load() x, y =", "y = [], [] for id, filename in enumerate(filenames): l", "in trace: if i[0] not in no: codes.append(i[1]) x_, y_", "(np.array(y_pred) == 0) * (np.array(y_ref) == 0) fp = (np.array(y_pred)", "/ len(y_ref) def f1(self, y_pred, y_ref): tp = (np.array(y_pred) ==", "model() classifier.load() x, y = [], [] for id, filename", "\",classifier.f1(y_pred, y)) def predict_on_trace(trace, A = 0.9): classifier = model()", "x.append(x_) y.append(y_) y_pred = classifier.predict(x) acc = sum(np.array(y_pred)) / len(y_pred)", "l: if i[0] not in no: codes.append(i[1]) x_, y_ =", "raise Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x =", "is_miner): classifier = model() classifier.load() x, y = [], []", "i in range(0, len(array)//size * size, 1 if overlacing else", "= gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) acc =", "no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model: def __init__(self): self.d", "* (np.array(y_ref) == 1) tn = (np.array(y_pred) == 0) *", "y.append(y_) y_pred = classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred,", "f.close() except: return def save(self, filename = 'model.p'): f =", "'model.p'): try: f = open(filename, 'rb') self.d = pickle.load(f) if", "def accuracy(self, y_pred, y_ref): return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref)", "1 if overlacing else size)] res = [array[i:i+size] for i", "25, overlacing = False): return [array[i:i+size] for i in range(0,", "#x1,y1,x2,y2 = train_test_split(x,y,0.05) x = ngrams(a) y = [1 if", "enumerate(filenames): codes = [] for i in trace: if i[0]", "gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner):", "x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner): classifier =", "is_miner else 0,] * len(x) return x,y def train_on_logs(*filenames, is_miner):", "x): return self.d.predict(x) def accuracy(self, y_pred, y_ref): return sum(np.array(y_pred) ==", "x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save()", "if i[0] not in no: codes.append(i[1]) x_, y_ = gen_train(codes,", "f1(self, y_pred, y_ref): tp = (np.array(y_pred) == 1) * (np.array(y_ref)", "overlacing = False): return [array[i:i+size] for i in range(0, len(array)//size", "y_ref): tp = (np.array(y_pred) == 1) * (np.array(y_ref) == 1)", "range(0, len(array)//size * size, 1 if overlacing else size)] res", "x,y def train_on_logs(*filenames, is_miner): classifier = model() #classifier.load() x, y", "y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) print(\"Accuracy:", "\", classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred, y)) def predict_on_trace(trace, A =", "= gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames,", "#!/usr/bin/env python3 from sklearn.tree import DecisionTreeClassifier import pickle import numpy", "in l: if i[0] not in no: codes.append(i[1]) x_, y_", "sum([len(i) == size for i in res]) != len(res): raise", "<reponame>be4r/ssh-miner-detection #!/usr/bin/env python3 from sklearn.tree import DecisionTreeClassifier import pickle import", "def predict_on_logs(*filenames, is_miner): classifier = model() classifier.load() x, y =", "[] for i in trace: if i[0] not in no:", "i[0] not in no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id])", "from sklearn.tree import DecisionTreeClassifier import pickle import numpy as np", "(np.array(y_pred) == 0) * (np.array(y_ref) == 1) return tp /", "(np.array(y_pred) == 1) * (np.array(y_ref) == 0) fn = (np.array(y_pred)", "0) fn = (np.array(y_pred) == 0) * (np.array(y_ref) == 1)", "is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred, y))", "for i in res]) != len(res): raise Exception('wtf') def gen_train(a,", "(fp + fn) / 2) def ngrams(array, size = 25,", "predict_on_logs(*filenames, is_miner): classifier = model() classifier.load() x, y = [],", "size = 25, overlacing = False): return [array[i:i+size] for i", "y = [], [] for id, filename in enumerate(filenames): codes", "if sum([len(i) == size for i in res]) != len(res):", "= [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model: def __init__(self): self.d =", "for id, filename in enumerate(filenames): l = [] with open(filename,", "in no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_)", "i in trace: if i[0] not in no: codes.append(i[1]) x_,", "filename = 'model.p'): f = open(filename, 'wb') pickle.dump(self.d, f) f.close()", "(np.array(y_ref) == 0) fn = (np.array(y_pred) == 0) * (np.array(y_ref)", "l = [] with open(filename, 'r') as f: l =", "id, filename in enumerate(filenames): codes = [] for i in", "DecisionTreeClassifier: d = None f.close() except: return def save(self, filename", "def predict_on_trace(trace, A = 0.9): classifier = model() classifier.load() x,", "= eval(''.join(f)) codes = [] for i in l: if", "codes = [] for i in trace: if i[0] not", "self.d.predict(x) def accuracy(self, y_pred, y_ref): return sum(np.array(y_pred) == np.array(y_ref)) /", "len(array)//size * size, 1 if overlacing else size)] res =", "= [], [] for id, filename in enumerate(filenames): l =", "filename in enumerate(filenames): codes = [] for i in trace:", "= [array[i:i+size] for i in range(0, len(array)//size * size, 1", "print(\"Accuracy: \", classifier.accuracy(y_pred, y)) print(\"F1: \",classifier.f1(y_pred, y)) def predict_on_trace(trace, A", "x = ngrams(a) y = [1 if is_miner else 0,]", "y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner): classifier = model()", "x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x)", "y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) acc", "y_pred = classifier.predict(x) acc = sum(np.array(y_pred)) / len(y_pred) return acc", "1) return tp / (tp + (fp + fn) /", "= classifier.predict(x) acc = sum(np.array(y_pred)) / len(y_pred) return acc >", "pickle.dump(self.d, f) f.close() def fit(self, x, y): self.d.fit(x, y) def", "l = eval(''.join(f)) codes = [] for i in l:", "def predict(self, x): return self.d.predict(x) def accuracy(self, y_pred, y_ref): return", "pickle.load(f) if type(self.d) != DecisionTreeClassifier: d = None f.close() except:", "except: return def save(self, filename = 'model.p'): f = open(filename,", "def __init__(self): self.d = DecisionTreeClassifier() def load(self, filename = 'model.p'):", "tn = (np.array(y_pred) == 0) * (np.array(y_ref) == 0) fp", "1) * (np.array(y_ref) == 1) tn = (np.array(y_pred) == 0)", "[1 if is_miner else 0,] * len(x) return x,y def", "def fit(self, x, y): self.d.fit(x, y) def predict(self, x): return", "= model() classifier.load() x, y = [], [] for id,", "def gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x = ngrams(a) y", "[] for id, filename in enumerate(filenames): codes = [] for", "numpy as np no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model:", "y) def predict(self, x): return self.d.predict(x) def accuracy(self, y_pred, y_ref):", "(np.array(y_pred) == 1) * (np.array(y_ref) == 1) tn = (np.array(y_pred)", "in enumerate(filenames): l = [] with open(filename, 'r') as f:", "b'containerssh-ag', b'apt',b'dpkg'] class model: def __init__(self): self.d = DecisionTreeClassifier() def", "'r') as f: l = eval(''.join(f)) codes = [] for", "predict(self, x): return self.d.predict(x) def accuracy(self, y_pred, y_ref): return sum(np.array(y_pred)", "else size)] if sum([len(i) == size for i in res])", "try: f = open(filename, 'rb') self.d = pickle.load(f) if type(self.d)", "[] with open(filename, 'r') as f: l = eval(''.join(f)) codes", "y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def", "save(self, filename = 'model.p'): f = open(filename, 'wb') pickle.dump(self.d, f)", "in enumerate(filenames): codes = [] for i in trace: if", "f) f.close() def fit(self, x, y): self.d.fit(x, y) def predict(self,", "ngrams(array, size = 25, overlacing = False): return [array[i:i+size] for", "f = open(filename, 'wb') pickle.dump(self.d, f) f.close() def fit(self, x,", "= [] for i in trace: if i[0] not in", "= pickle.load(f) if type(self.d) != DecisionTreeClassifier: d = None f.close()", "len(x) return x,y def train_on_logs(*filenames, is_miner): classifier = model() #classifier.load()", "y_pred, y_ref): return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref) def f1(self,", "size)] if sum([len(i) == size for i in res]) !=", "for i in range(0, len(array)//size * size, 1 if overlacing", "gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) print(\"Accuracy: \", classifier.accuracy(y_pred,", "model: def __init__(self): self.d = DecisionTreeClassifier() def load(self, filename =", "tp = (np.array(y_pred) == 1) * (np.array(y_ref) == 1) tn", "f.close() def fit(self, x, y): self.d.fit(x, y) def predict(self, x):" ]
[ "time bins Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)],", "3, length(data)/3); data = data'; size(data) % 43799-by-3 fclose(fid); %", "bin] = histc(spike_time, time_edges); % column vector % if analyzing", "num2str(i)]; fid = fopen(str, 'r'); cell_data = fscanf(fid, '%f'); cell_data", "1:2); [spike_time_count, bin] = histc(spike_time, time_edges); % column vector %", "% 43799-by-3 fclose(fid); % sampling time Ts = 0.0333; duration", "pause periods T = length(time_edges); AllSpikeData = zeros(C,T); for i=1:C", "= 0.0333; duration = size(data,1) * Ts; % in second", "bins Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges');", "data(:, 2)], time_edges'); vel = abs(diff(Rat_pos, 1, 1 )); %", "the RUN period only uncomment this % spike_time_count = spike_time_count(vel_ind);", "1), data(:, 2)], time_edges'); vel = abs(diff(Rat_pos, 1, 1 ));", "250 ms rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:,", "column vector % if analyzing the RUN period only uncomment", "10); % RUN velocity threshold % using RUN only T", "% unit: cm/s vel_ind = find(rat_vel >= 10); % RUN", "cell_data = fscanf(fid, '%f'); cell_data = reshape(cell_data, 3, length(cell_data)/3)'; spike_time", "length(vel_ind); % using Run + pause periods T = length(time_edges);", "analyzing the RUN period only uncomment this % spike_time_count =", "fclose(fid); % sampling time Ts = 0.0333; duration = size(data,1)", "'r'); cell_data = fscanf(fid, '%f'); cell_data = reshape(cell_data, 3, length(cell_data)/3)';", "time Ts = 0.0333; duration = size(data,1) * Ts; %", "= cell_data(:, 3); spike_pos = cell_data(:, 1:2); [spike_time_count, bin] =", "spike_pos = cell_data(:, 1:2); [spike_time_count, bin] = histc(spike_time, time_edges); %", "sampling time Ts = 0.0333; duration = size(data,1) * Ts;", "data = reshape(data, 3, length(data)/3); data = data'; size(data) %", "Tmax]; % 250 ms per bin % interpolated rat's position", "[spike_time_count, bin] = histc(spike_time, time_edges); % column vector % if", "periods T = length(time_edges); AllSpikeData = zeros(C,T); for i=1:C str", "= length(vel_ind); % using Run + pause periods T =", "per bin % interpolated rat's position in time bins Rat_pos", "velocity threshold % using RUN only T = length(vel_ind); %", "= reshape(data, 3, length(data)/3); data = data'; size(data) % 43799-by-3", "size(data) % 43799-by-3 fclose(fid); % sampling time Ts = 0.0333;", "data'; size(data) % 43799-by-3 fclose(fid); % sampling time Ts =", "= [Tmin: 0.25: Tmax]; % 250 ms per bin %", "% using RUN only T = length(vel_ind); % using Run", "abs(diff(Rat_pos, 1, 1 )); % row difference vel = [vel(1,", "[data(:, 1), data(:, 2)], time_edges'); vel = abs(diff(Rat_pos, 1, 1", "import loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49 data =", "period only uncomment this % spike_time_count = spike_time_count(vel_ind); AllSpikeData(i, :)", "= histc(spike_time, time_edges); % column vector % if analyzing the", "vel = [vel(1, :); vel]; % 250 ms rat_vel =", "time_edges'); vel = abs(diff(Rat_pos, 1, 1 )); % row difference", "if analyzing the RUN period only uncomment this % spike_time_count", "1 )); % row difference vel = [vel(1, :); vel];", "+ vel(:, 2).^2); % unit: cm/s vel_ind = find(rat_vel >=", "only T = length(vel_ind); % using Run + pause periods", "cell_data(:, 1:2); [spike_time_count, bin] = histc(spike_time, time_edges); % column vector", "= length(time_edges); AllSpikeData = zeros(C,T); for i=1:C str = ['Cell_num'", "length(cell_data)/3)'; spike_time = cell_data(:, 3); spike_pos = cell_data(:, 1:2); [spike_time_count,", "for i=1:C str = ['Cell_num' num2str(i)]; fid = fopen(str, 'r');", "rat's position in time bins Rat_pos = interp1(data(:, 3), [data(:,", "RUN velocity threshold % using RUN only T = length(vel_ind);", "np from scipy.io import loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N =", "bin % interpolated rat's position in time bins Rat_pos =", "% spike_time_count = spike_time_count(vel_ind); AllSpikeData(i, :) = spike_time_count'; fclose(fid); end", "using RUN only T = length(vel_ind); % using Run +", "import numpy as np from scipy.io import loadmat data =", "this % spike_time_count = spike_time_count(vel_ind); AllSpikeData(i, :) = spike_time_count'; fclose(fid);", "position in time bins Rat_pos = interp1(data(:, 3), [data(:, 1),", "= data'; size(data) % 43799-by-3 fclose(fid); % sampling time Ts", ")); % row difference vel = [vel(1, :); vel]; %", "+ pause periods T = length(time_edges); AllSpikeData = zeros(C,T); for", "interpolated rat's position in time bins Rat_pos = interp1(data(:, 3),", "T = length(vel_ind); % using Run + pause periods T", "2)], time_edges'); vel = abs(diff(Rat_pos, 1, 1 )); % row", "vector % if analyzing the RUN period only uncomment this", "RUN period only uncomment this % spike_time_count = spike_time_count(vel_ind); AllSpikeData(i,", "find(rat_vel >= 10); % RUN velocity threshold % using RUN", "loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49 data = reshape(data,", "= loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49 data = reshape(data, 3, length(data)/3);", "1).^2 + vel(:, 2).^2); % unit: cm/s vel_ind = find(rat_vel", "fopen(str, 'r'); cell_data = fscanf(fid, '%f'); cell_data = reshape(cell_data, 3,", "sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s vel_ind =", "unit: cm/s vel_ind = find(rat_vel >= 10); % RUN velocity", "% sampling time Ts = 0.0333; duration = size(data,1) *", "% row difference vel = [vel(1, :); vel]; % 250", "second Tmax = data(end, 3); Tmin = data(1,3); time_edges =", "import os import numpy as np from scipy.io import loadmat", "% interpolated rat's position in time bins Rat_pos = interp1(data(:,", "3); Tmin = data(1,3); time_edges = [Tmin: 0.25: Tmax]; %", "% 250 ms per bin % interpolated rat's position in", "row difference vel = [vel(1, :); vel]; % 250 ms", "[Tmin: 0.25: Tmax]; % 250 ms per bin % interpolated", "str = ['Cell_num' num2str(i)]; fid = fopen(str, 'r'); cell_data =", "T = length(time_edges); AllSpikeData = zeros(C,T); for i=1:C str =", "% using Run + pause periods T = length(time_edges); AllSpikeData", "AllSpikeData = zeros(C,T); for i=1:C str = ['Cell_num' num2str(i)]; fid", "= fscanf(fid, '%f'); cell_data = reshape(cell_data, 3, length(cell_data)/3)'; spike_time =", "= data(1,3); time_edges = [Tmin: 0.25: Tmax]; % 250 ms", "vel_ind = find(rat_vel >= 10); % RUN velocity threshold %", "Ts = 0.0333; duration = size(data,1) * Ts; % in", "'%f'); cell_data = reshape(cell_data, 3, length(cell_data)/3)'; spike_time = cell_data(:, 3);", "% column vector % if analyzing the RUN period only", "uncomment this % spike_time_count = spike_time_count(vel_ind); AllSpikeData(i, :) = spike_time_count';", "= [vel(1, :); vel]; % 250 ms rat_vel = 4", "% in second Tmax = data(end, 3); Tmin = data(1,3);", "3, length(cell_data)/3)'; spike_time = cell_data(:, 3); spike_pos = cell_data(:, 1:2);", "Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges'); vel", "zeros(C,T); for i=1:C str = ['Cell_num' num2str(i)]; fid = fopen(str,", "scipy.io import loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49 data", "numpy as np from scipy.io import loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\")", "histc(spike_time, time_edges); % column vector % if analyzing the RUN", "* sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s vel_ind", "= reshape(cell_data, 3, length(cell_data)/3)'; spike_time = cell_data(:, 3); spike_pos =", "% if analyzing the RUN period only uncomment this %", "N = 49 data = reshape(data, 3, length(data)/3); data =", "Tmin = data(1,3); time_edges = [Tmin: 0.25: Tmax]; % 250", "2).^2); % unit: cm/s vel_ind = find(rat_vel >= 10); %", "3); spike_pos = cell_data(:, 1:2); [spike_time_count, bin] = histc(spike_time, time_edges);", "ms per bin % interpolated rat's position in time bins", "spike_time = cell_data(:, 3); spike_pos = cell_data(:, 1:2); [spike_time_count, bin]", "reshape(cell_data, 3, length(cell_data)/3)'; spike_time = cell_data(:, 3); spike_pos = cell_data(:,", "= fopen(str, 'r'); cell_data = fscanf(fid, '%f'); cell_data = reshape(cell_data,", "= data(end, 3); Tmin = data(1,3); time_edges = [Tmin: 0.25:", "3), [data(:, 1), data(:, 2)], time_edges'); vel = abs(diff(Rat_pos, 1,", "vel]; % 250 ms rat_vel = 4 * sqrt(vel(:, 1).^2", "= find(rat_vel >= 10); % RUN velocity threshold % using", "reshape(data, 3, length(data)/3); data = data'; size(data) % 43799-by-3 fclose(fid);", "% 250 ms rat_vel = 4 * sqrt(vel(:, 1).^2 +", "from scipy.io import loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49", "length(time_edges); AllSpikeData = zeros(C,T); for i=1:C str = ['Cell_num' num2str(i)];", "fscanf(fid, '%f'); cell_data = reshape(cell_data, 3, length(cell_data)/3)'; spike_time = cell_data(:,", "interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges'); vel = abs(diff(Rat_pos,", "4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s", "* Ts; % in second Tmax = data(end, 3); Tmin", "difference vel = [vel(1, :); vel]; % 250 ms rat_vel", "loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49 data = reshape(data, 3, length(data)/3); data", "cm/s vel_ind = find(rat_vel >= 10); % RUN velocity threshold", "ms rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2);", "0.0333; duration = size(data,1) * Ts; % in second Tmax", "duration = size(data,1) * Ts; % in second Tmax =", "threshold % using RUN only T = length(vel_ind); % using", "= cell_data(:, 1:2); [spike_time_count, bin] = histc(spike_time, time_edges); % column", "os import numpy as np from scipy.io import loadmat data", ">= 10); % RUN velocity threshold % using RUN only", "using Run + pause periods T = length(time_edges); AllSpikeData =", "Ts; % in second Tmax = data(end, 3); Tmin =", "[vel(1, :); vel]; % 250 ms rat_vel = 4 *", "Run + pause periods T = length(time_edges); AllSpikeData = zeros(C,T);", "only uncomment this % spike_time_count = spike_time_count(vel_ind); AllSpikeData(i, :) =", "in time bins Rat_pos = interp1(data(:, 3), [data(:, 1), data(:,", "size(data,1) * Ts; % in second Tmax = data(end, 3);", "cell_data(:, 3); spike_pos = cell_data(:, 1:2); [spike_time_count, bin] = histc(spike_time,", "= abs(diff(Rat_pos, 1, 1 )); % row difference vel =", "as np from scipy.io import loadmat data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N", "0.25: Tmax]; % 250 ms per bin % interpolated rat's", "= interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges'); vel =", "data = data'; size(data) % 43799-by-3 fclose(fid); % sampling time", ":); vel]; % 250 ms rat_vel = 4 * sqrt(vel(:,", "= size(data,1) * Ts; % in second Tmax = data(end,", "= 49 data = reshape(data, 3, length(data)/3); data = data';", "['Cell_num' num2str(i)]; fid = fopen(str, 'r'); cell_data = fscanf(fid, '%f');", "49 data = reshape(data, 3, length(data)/3); data = data'; size(data)", "rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); %", "vel(:, 2).^2); % unit: cm/s vel_ind = find(rat_vel >= 10);", "250 ms per bin % interpolated rat's position in time", "= 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit:", "1, 1 )); % row difference vel = [vel(1, :);", "length(data)/3); data = data'; size(data) % 43799-by-3 fclose(fid); % sampling", "% RUN velocity threshold % using RUN only T =", "time_edges = [Tmin: 0.25: Tmax]; % 250 ms per bin", "vel = abs(diff(Rat_pos, 1, 1 )); % row difference vel", "data(1,3); time_edges = [Tmin: 0.25: Tmax]; % 250 ms per", "data = loadmat(\"data/hipp_2dtrack_a/smJun03p2.dat\") N = 49 data = reshape(data, 3,", "43799-by-3 fclose(fid); % sampling time Ts = 0.0333; duration =", "in second Tmax = data(end, 3); Tmin = data(1,3); time_edges", "= zeros(C,T); for i=1:C str = ['Cell_num' num2str(i)]; fid =", "i=1:C str = ['Cell_num' num2str(i)]; fid = fopen(str, 'r'); cell_data", "= ['Cell_num' num2str(i)]; fid = fopen(str, 'r'); cell_data = fscanf(fid,", "time_edges); % column vector % if analyzing the RUN period", "data(end, 3); Tmin = data(1,3); time_edges = [Tmin: 0.25: Tmax];", "fid = fopen(str, 'r'); cell_data = fscanf(fid, '%f'); cell_data =", "cell_data = reshape(cell_data, 3, length(cell_data)/3)'; spike_time = cell_data(:, 3); spike_pos", "RUN only T = length(vel_ind); % using Run + pause", "Tmax = data(end, 3); Tmin = data(1,3); time_edges = [Tmin:" ]
[ "is outside of our test filter for age aa_data =", "'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1 and service_2 both used more", "repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a']", "permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self,", "governing permissions and # limitations under the License. import time", "== ['iam_perms'] def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2']", "['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}} assert empty_policies == ['iam_perms'] def", "2.0 (the \"License\"); # you may not use this file", "* 1000, \"serviceNamespace\": \"s3\"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions')", "[{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data = 'some_aa_data' # no", "1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\":", "== {'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect':", "assert roles[1].repoable_services == [] assert roles[2].repoable_permissions == 0 assert roles[2].repoable_services", "assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000)", "assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c',", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) ==", "used too recently, service_4 action 2 is in no_repo_permissions and", "['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:b', 'a:c'])", "ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies,", "all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms = ['b:l',", "def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c', 'b']) ==", "'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1 and service_2", "@patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0])", "= [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value", "2017 Netflix, Inc. # # Licensed under the Apache License,", "sure we get the latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy':", "int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time()) *", "[] def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy',", "@patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role =", "= {} permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1',", "roles[1].disqualified_by = ['some_filter'] roles[1].aa_data = 'some_aa_data' # no AA data", "from mock import patch import repokid.utils.roledata from repokid.role import Role", "roles[1].repoable_services == [] assert roles[2].repoable_permissions == 0 assert roles[2].repoable_services ==", "ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']", "use this file except in compliance with the License. #", "patch import repokid.utils.roledata from repokid.role import Role from repokid.tests.test_repokid_cli import", "= 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks", "assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def", "set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles,", "def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms =", "- 1, 'service_4:action_2': time.time() + 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable", "and action_4 are unsupported actions, service_2 is an unsupported service,", "['some_filter'] roles[1].aa_data = 'some_aa_data' # no AA data roles[2].policies =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "* 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000},", "License. # You may obtain a copy of the License", "== [] def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile',", "@patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2']", "['ec2', 'route53'] expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services)", "Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data = 'some_aa_data' # disqualified by", "mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions", "'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms)", "under the License is distributed on an \"AS IS\" BASIS,", "hooks) assert roles[0].repoable_permissions == 2 assert roles[0].repoable_services == ['iam'] assert", "roles[2].repoable_permissions == 0 assert roles[2].repoable_services == [] def test_get_repoed_policy(self): policies", "License for the specific language governing permissions and # limitations", "'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty policy to make sure", "expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi',", "- 90000) * 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000)", "test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy,", "} class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement,", "= 'some_aa_data' # disqualified by a filter roles[1].policies = [{'Policy':", "* 1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time()) * 1000,", "ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value", "roles[2].repoable_services == [] def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions =", "test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value", "['a:c', 'b']) == ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'],", "def test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS", "permissions and # limitations under the License. import time from", "['iam'] assert roles[1].repoable_permissions == 0 assert roles[1].repoable_services == [] assert", "== set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES", "['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks = {} permissions =", "= { \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"},", "in compliance with the License. # You may obtain a", "mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']]", "= ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert", "software # distributed under the License is distributed on an", "\"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"},", "\"s3\"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions,", "[{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_2',", "'s3:createbucket', 's3:getobject'] # empty policy to make sure we get", "repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a'] assert", "\"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time())", "'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1 and service_2 both used", "too recently, service_4 action 2 is in no_repo_permissions and not", "= True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1':", "in no_repo_permissions and not expired assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2',", "data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data =", "= None hooks = {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts',", "* 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}] no_repo_permissions =", "policies = ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies", "repokid.utils.roledata from repokid.role import Role from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES", "'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services ) def", "# empty policy to make sure we get the latest", "= {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000} repoable_decision", "['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms(", "'s3:getobject'] # empty policy to make sure we get the", "['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services", "(expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def',", "= {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions =", "def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl']", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", ") def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620,", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "expected_permissions, expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620)", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "service_3 # was used too recently, service_4 action 2 is", "['a:j', 'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms = ['b:l', 'c:m'] expected_repoed_services", "to in writing, software # distributed under the License is", "disqualified by a filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by =", "both used more than a day ago, which is outside", "hooks = {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile',", "# See the License for the specific language governing permissions", "roles[1].aa_data = 'some_aa_data' # no AA data roles[2].policies = [{'Policy':", "['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a', 'b']) ==", "from repokid.role import Role from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA", "no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000}", "# disqualified by a filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by", "\"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time())", "or agreed to in writing, software # distributed under the", "[{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']", "# Copyright 2017 Netflix, Inc. # # Licensed under the", "required by applicable law or agreed to in writing, software", "0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\":", "[['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value =", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2':", "service, service_3 # was used too recently, service_4 action 2", "day ago, which is outside of our test filter for", "was used too recently, service_4 action 2 is in no_repo_permissions", "with the License. # You may obtain a copy of", "repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions, minimum_age, hooks) #", "expected_repoed_services = ['b'] expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) ==", "\"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time()) *", "age aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) *", "mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3',", "'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])", "'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age", "\"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\":", "'lastAuthenticated': time.time() * 1000}] no_repo_permissions = {'service_4:action_1': time.time() - 1,", "get the latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value", "repokid.role import Role from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA =", "'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated':", "compliance with the License. # You may obtain a copy", "1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value = {'potentially_repoable_permissions':", "agreed to in writing, software # distributed under the License", "['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a'] assert", "test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by", "'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms(", "True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}}", "== ( expected_permissions, expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620,", "'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age,", "distributed under the License is distributed on an \"AS IS\"", "= [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace':", "= ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2']", "* 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}]", "= repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17', 'Statement':", "\"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [", "unsupported service, service_3 # was used too recently, service_4 action", "= [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data = 'some_aa_data' #", "all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role)", "'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles", "express or implied. # See the License for the specific", "'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions, minimum_age,", "except in compliance with the License. # You may obtain", "assert roles[2].repoable_services == [] def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions", "= repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions, minimum_age, hooks) # service_1:action_3", "= ['b:l', 'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions = ['c:m'] assert", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource':", "not use this file except in compliance with the License.", "int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\":", "def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms", "test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762)", "writing, software # distributed under the License is distributed on", "expired assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks')", "# service_1 and service_2 both used more than a day", "rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies == {'s3_perms': {'Version':", "you may not use this file except in compliance with", "action 2 is in no_repo_permissions and not expired assert repoable_permissions", "= [] roles[0].aa_data = 'some_aa_data' # disqualified by a filter", "= all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions =", "aa_data, no_repo_permissions, minimum_age, hooks) # service_1:action_3 and action_4 are unsupported", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "'c:m', 'c:n'] repoable_perms = ['b:l', 'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions", "repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks = {} permissions = ['service_1:action_1',", "expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) ==", "'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data,", "- 90000) * 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}]", "'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies ==", "assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services =", "no_repo_permissions and not expired assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1'])", "= ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services = ['ec2',", "repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) ==", "['a', 'b']) == ['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c',", "'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b',", "time from mock import patch import repokid.utils.roledata from repokid.role import", "is an unsupported service, service_3 # was used too recently,", "0 assert roles[1].repoable_services == [] assert roles[2].repoable_permissions == 0 assert", "'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1 and", "@patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0]) all_permissions", "CONDITIONS OF ANY KIND, either express or implied. # See", "{'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_3',", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "'service_4:action_2'] # service_1 and service_2 both used more than a", "\"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time())", "AA data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data", "['service_1:action_3', 'service_1:action_4'] hooks = {} permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3',", "permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1", "2 is in no_repo_permissions and not expired assert repoable_permissions ==", "['iam_perms'] def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms", "mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert", "'s3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies == {'s3_perms':", "= [] roles[2].aa_data = None hooks = {} mock_get_role_permissions.side_effect =", "minimum_age, hooks) assert roles[0].repoable_permissions == 2 assert roles[0].repoable_services == ['iam']", "repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age", "2 assert roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions == 0 assert", "not expired assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions')", "and # limitations under the License. import time from mock", "= [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data = None hooks", "import patch import repokid.utils.roledata from repokid.role import Role from repokid.tests.test_repokid_cli", "'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time()", "[] roles[2].aa_data = None hooks = {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile',", "'service_4:action_1', 'service_4:action_2'] # service_1 and service_2 both used more than", "* 1000}] no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time()", "= ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions", "roles[2].aa_data = None hooks = {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy',", "'Allow'}]}} assert empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used']", "'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile',", "OR CONDITIONS OF ANY KIND, either express or implied. #", "service_2 is an unsupported service, service_3 # was used too", "= set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90", "{'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000} repoable_decision =", "1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace':", "= {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'],", "the License is distributed on an \"AS IS\" BASIS, #", "import Role from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA = {", "['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services", "1000}] no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() +", "[Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data = 'some_aa_data' #", "{\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time()) * 1000,", "service_1 and service_2 both used more than a day ago,", "'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty policy to make sure we", "'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1", "1000, \"serviceNamespace\": \"s3\"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def", "roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data = None", "= 'some_aa_data' # no AA data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}]", "'2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}} assert empty_policies", "'service_4:action_2': time.time() + 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True", "'b']) == ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a',", "'service_1:action_4'] hooks = {} permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4',", "new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def", "repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services =", "law or agreed to in writing, software # distributed under", "mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data", "* 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}],", "actions, service_2 is an unsupported service, service_3 # was used", "set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions,", "roles[0].disqualified_by = [] roles[0].aa_data = 'some_aa_data' # disqualified by a", "mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by =", "mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = []", "['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services = ['ec2', 'route53']", "mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age =", "['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect =", "hooks = {} permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1',", "== (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def test_filter_scheduled_repoable_perms(self): assert", "( expected_permissions, expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True))", "repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action':", "def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True))", "'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services ) def test_get_epoch_authenticated(self):", "repoable_perms = ['b:l', 'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions = ['c:m']", "1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\":", "(1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms(", "'b']) == ['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'],", "empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy =", "may obtain a copy of the License at # #", "'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect", "def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0]) all_permissions =", "expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def", "'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) == ['a:b',", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms'])", "{\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time())", "# limitations under the License. import time from mock import", "repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2',", "of our test filter for age aa_data = [{'serviceNamespace': 'service_1',", "roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data = 'some_aa_data'", "may not use this file except in compliance with the", "a day ago, which is outside of our test filter", "+ 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value =", "'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n']", "test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms =", "@patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]),", "= Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] #", "1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}] no_repo_permissions = {'service_4:action_1':", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "= ['a:j', 'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms = ['b:l', 'c:m']", "repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms", "{'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}} assert", "set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES =", "assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) == ['a:b', 'a:c']", "90000) * 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) *", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "rewritten_policies == {'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'],", "service_2 both used more than a day ago, which is", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "\"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\":", "assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j',", "'b:a'], ['a:c', 'b']) == ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c',", "repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions,", "policy to make sure we get the latest test_role.policies =", "than a day ago, which is outside of our test", "= ['ec2', 'route53'] expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert", "== ['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:b',", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "test filter for age aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time()", "import repokid.utils.roledata from repokid.role import Role from repokid.tests.test_repokid_cli import ROLE_POLICIES,", "minimum_age, hooks) # service_1:action_3 and action_4 are unsupported actions, service_2", "assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a']", "\"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0,", "= ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self):", "assert roles[2].repoable_permissions == 0 assert roles[2].repoable_services == [] def test_get_repoed_policy(self):", "mock import patch import repokid.utils.roledata from repokid.role import Role from", "ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def", "{\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000,", "(time.time() - 90000) * 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() -", "repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions)", "{'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None,", "roles[0].repoable_permissions == 2 assert roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions ==", "assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) ==", "mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot',", "= ['some_filter'] roles[1].aa_data = 'some_aa_data' # no AA data roles[2].policies", "[] assert roles[2].repoable_permissions == 0 assert roles[2].repoable_services == [] def", "(1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False))", "language governing permissions and # limitations under the License. import", "empty policy to make sure we get the latest test_role.policies", "'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions):", "ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions ==", "roles[1].repoable_permissions == 0 assert roles[1].repoable_services == [] assert roles[2].repoable_permissions ==", "make sure we get the latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']},", "'Effect': 'Allow'}]}} assert empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self): old_policy =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "no_repo_permissions, minimum_age, hooks) # service_1:action_3 and action_4 are unsupported actions,", "1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}] }", "hooks) # service_1:action_3 and action_4 are unsupported actions, service_2 is", "1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\":", "['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty policy to make", "'some_aa_data' # no AA data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by", "or implied. # See the License for the specific language", "'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated':", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "roles[2].disqualified_by = [] roles[2].aa_data = None hooks = {} mock_get_role_permissions.side_effect", "assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c',", "= ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty policy to", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1':", "= repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision,", "new_policy) assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms =", "repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\":", "'c:n'] repoable_perms = ['b:l', 'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions =", "== 2 assert roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions == 0", "'some_aa_data' # disqualified by a filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}]", "@patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]),", "ROLE_POLICIES, ROLES AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time()) *", "outside of our test filter for age aa_data = [{'serviceNamespace':", "\"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}], \"arn:aws:iam::123456789012:role/unused_ec2\": [ {\"lastAuthenticated\":", "which is outside of our test filter for age aa_data", "Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data = 'some_aa_data' # disqualified", "(the \"License\"); # you may not use this file except", "unsupported actions, service_2 is an unsupported service, service_3 # was", "mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks)", "test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services", "# you may not use this file except in compliance", "== ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a', 'b'])", "{ \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\":", "mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions", "more than a day ago, which is outside of our", "time.time() + 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value", "= ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks')", "test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS =", "= 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions == 2 assert", "'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1 and service_2 both", "minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions == 2", "# # Unless required by applicable law or agreed to", "License. import time from mock import patch import repokid.utils.roledata from", "repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions == 2 assert roles[0].repoable_services ==", "assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age =", "minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4']", "== [] assert roles[2].repoable_permissions == 0 assert roles[2].repoable_services == []", "[ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\":", "repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions, minimum_age, hooks)", "[{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data = None hooks =", "= ['b'] expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services = ['ec2', 'route53'] expected_permissions", "= ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies =", "Version 2.0 (the \"License\"); # you may not use this", "and service_2 both used more than a day ago, which", "old_policy = ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy)", "repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks = {}", "'ses:jkl'] expected_services = ['ec2', 'route53'] expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi',", "from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\": [", "repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self,", "0 assert roles[2].repoable_services == [] def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used']", "new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k',", "implied. # See the License for the specific language governing", "= ['service_1:action_3', 'service_1:action_4'] hooks = {} permissions = ['service_1:action_1', 'service_1:action_2',", "under the Apache License, Version 2.0 (the \"License\"); # you", "Netflix, Inc. # # Licensed under the Apache License, Version", "Copyright 2017 Netflix, Inc. # # Licensed under the Apache", "class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy):", "AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\":", "are unsupported actions, service_2 is an unsupported service, service_3 #", "by applicable law or agreed to in writing, software #", "Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty", "= ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms == set(['ec2:allocatehosts',", "test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies,", "assert roles[0].repoable_permissions == 2 assert roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions", "repoable_permissions) assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'],", "\"serviceNamespace\": \"s3\"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self,", "limitations under the License. import time from mock import patch", "{'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}] no_repo_permissions = {'service_4:action_1': time.time()", "{'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value =", "assert roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions == 0 assert roles[1].repoable_services", "a filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data", "by a filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter']", "'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms = ['b:l', 'c:m'] expected_repoed_services =", "'test_name', permissions, aa_data, no_repo_permissions, minimum_age, hooks) # service_1:action_3 and action_4", "int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\":", "\"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"},", "ago, which is outside of our test filter for age", "Role from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\":", "'ses:ghi', 'ses:jkl'] expected_services = ['ec2', 'route53'] expected_permissions = ['dynamodb:def', 's3:abc',", "[set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions", "service_1:action_3 and action_4 are unsupported actions, service_2 is an unsupported", "set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k', 'b:l', 'c:m',", "all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty policy", "empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17',", "[] roles[0].aa_data = 'some_aa_data' # disqualified by a filter roles[1].policies", "int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement')", "and not expired assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions')", "# no AA data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by =", "1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks =", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data = 'some_aa_data' # no AA", "under the License. import time from mock import patch import", "Unless required by applicable law or agreed to in writing,", "import time from mock import patch import repokid.utils.roledata from repokid.role", "# was used too recently, service_4 action 2 is in", "def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket'])", "[{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}} assert empty_policies == ['iam_perms']", "the specific language governing permissions and # limitations under the", "= [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert", "{} permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2',", "1, 'service_4:action_2': time.time() + 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable =", "{} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile',", "'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b',", "applicable law or agreed to in writing, software # distributed", "== set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k', 'b:l',", "@patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles =", "assert roles[1].repoable_permissions == 0 assert roles[1].repoable_services == [] assert roles[2].repoable_permissions", "aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000},", "permissions, aa_data, no_repo_permissions, minimum_age, hooks) # service_1:action_3 and action_4 are", "\"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}] } class", "ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms", "action_4 are unsupported actions, service_2 is an unsupported service, service_3", "= repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self):", "assert empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy", "in writing, software # distributed under the License is distributed", "'s3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services )", "= repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks):", "'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b',", "to make sure we get the latest test_role.policies = [{'Policy':", "test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c',", "used more than a day ago, which is outside of", "ROLES AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time()) * 1000,", "'b:l', 'c:m', 'c:n'] repoable_perms = ['b:l', 'c:m'] expected_repoed_services = ['b']", "no AA data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = []", "the License. import time from mock import patch import repokid.utils.roledata", "def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])]", "'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions ==", "'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time()", "ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress'])", "assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def test_filter_scheduled_repoable_perms(self):", "time.time() * 1000}] no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2':", "for age aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000)", "import ROLE_POLICIES, ROLES AARDVARK_DATA = { \"arn:aws:iam::123456789012:role/all_services_used\": [ {\"lastAuthenticated\": int(time.time())", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data = None hooks = {}", "License, Version 2.0 (the \"License\"); # you may not use", "TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role", "'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services = ['ec2', 'route53'] expected_permissions = ['dynamodb:def',", "# You may obtain a copy of the License at", "{'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name',", "'Resource': ['*'], 'Effect': 'Allow'}]}} assert empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self):", "{\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy')", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "== (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2', 's3:abc',", "True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def", "'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile',", "= [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value =", "True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b',", "roles[0].aa_data = 'some_aa_data' # disqualified by a filter roles[1].policies =", "== set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks,", "set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies", "expected_services = ['ec2', 'route53'] expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl']", "the License for the specific language governing permissions and #", "filter for age aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() -", "{\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}],", "Apache License, Version 2.0 (the \"License\"); # you may not", "(repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53',", "either express or implied. # See the License for the", "service_4 action 2 is in no_repo_permissions and not expired assert", "repoable_decision.repoable = True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision,", "an unsupported service, service_3 # was used too recently, service_4", "['b:l', 'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms,", "mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume',", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'service_3', 'lastAuthenticated': time.time() * 1000}] no_repo_permissions = {'service_4:action_1': time.time() -", "= set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert", "(None, False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c',", "= [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data = 'some_aa_data'", "['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] #", "90000) * 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}] no_repo_permissions", "= ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks = {} permissions", "* 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": 0, \"serviceNamespace\": \"ec2\"}], \"arn:aws:iam::123456789012:role/young_role\": [", "repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions, minimum_age, hooks) # service_1:action_3 and", "{'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}}", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "\"iam\"}, {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"s3\"}] } class TestRoledata(object):", "['*'], 'Effect': 'Allow'}]}} assert empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self): old_policy", "(time.time() - 90000) * 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() *", "mock_expand_policy): test_role = Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket',", "time.time() - 1, 'service_4:action_2': time.time() + 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision()", "False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c', 'b'])", "== ['iam'] assert roles[1].repoable_permissions == 0 assert roles[1].repoable_services == []", "== 0 assert roles[2].repoable_services == [] def test_get_repoed_policy(self): policies =", "== (None, False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'],", "\"License\"); # you may not use this file except in", "roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions == 0 assert roles[1].repoable_services ==", "'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}} assert empty_policies ==", "expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == (", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "the latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value =", "['b'] expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services))", "test_role = Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject']", "# distributed under the License is distributed on an \"AS", "latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions", "[ {\"lastAuthenticated\": int(time.time()) * 1000, \"serviceNamespace\": \"iam\"}, {\"lastAuthenticated\": int(time.time()) *", "we get the latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}]", "roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data =", "90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions == 2 assert roles[0].repoable_services", "# Unless required by applicable law or agreed to in", "['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])]", "'s3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services = ['ec2', 'route53'] expected_permissions =", "new_policy = ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms ==", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "== (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None,", "filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data =", "You may obtain a copy of the License at #", "None hooks = {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'],", "# service_1:action_3 and action_4 are unsupported actions, service_2 is an", "= ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions,", "recently, service_4 action 2 is in no_repo_permissions and not expired", "permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1',", "repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions,", "the Apache License, Version 2.0 (the \"License\"); # you may", "'route53'] expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) ==", "== 0 assert roles[1].repoable_services == [] assert roles[2].repoable_permissions == 0", "test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0]) all_permissions = ['ec2:associateaddress',", "our test filter for age aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated':", "is in no_repo_permissions and not expired assert repoable_permissions == set(['service_1:action_1'," ]
[ "cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy from # classify_image.py image_data", "RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph() self._cv_bridge =", "std_msgs.msg import String from cv_bridge import CvBridge import cv2 import", "--> English string lookup. node_lookup = classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1]", "predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score =", "copy from # classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates", "sensor_msgs.msg import Image from std_msgs.msg import String from cv_bridge import", "classify_image class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph()", "= self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy from # classify_image.py image_data =", "node_lookup = classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k:", "= predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s (score = %.5f)'", "rospy.Publisher('result', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k',", "callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy from #", "softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions", "# classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from", "tensorflow as tf import classify_image class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract()", "class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph() self._cv_bridge", "self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub", "human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] if score > self.score_threshold:", "{'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID -->", "cv2 import numpy as np import tensorflow as tf import", "= cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from saved GraphDef. softmax_tensor", "if score > self.score_threshold: rospy.loginfo('%s (score = %.5f)' % (human_string,", "(score = %.5f)' % (human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin()", "String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5)", "= predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score", "node ID --> English string lookup. node_lookup = classify_image.NodeLookup() top_k", "classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge() self._sub =", "= rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def callback(self, image_msg):", "Creates graph from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions =", "image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English", "self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def callback(self,", "import cv2 import numpy as np import tensorflow as tf", "predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s (score = %.5f)' %", "saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0':", "self._pub.publish(human_string) def main(self): rospy.spin() if __name__ == '__main__': classify_image.setup_args() rospy.init_node('rostensorflow')", "if __name__ == '__main__': classify_image.setup_args() rospy.init_node('rostensorflow') tensor = RosTensorFlow() tensor.main()", "5) def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy", "node_lookup.id_to_string(node_id) score = predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s (score", "from # classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph", "# Creates node ID --> English string lookup. node_lookup =", "CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub = rospy.Publisher('result',", "np import tensorflow as tf import classify_image class RosTensorFlow(): def", "rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def callback(self, image_msg): cv_image", "def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy from", "import CvBridge import cv2 import numpy as np import tensorflow", "import numpy as np import tensorflow as tf import classify_image", "predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) #", "cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from saved GraphDef. softmax_tensor =", "%.5f)' % (human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin() if __name__", "predictions = np.squeeze(predictions) # Creates node ID --> English string", "classify_image.create_graph() self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1)", "String from cv_bridge import CvBridge import cv2 import numpy as", "cv_bridge import CvBridge import cv2 import numpy as np import", "self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub = rospy.Publisher('result', String,", "np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup", "string lookup. node_lookup = classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id", "from std_msgs.msg import String from cv_bridge import CvBridge import cv2", "from sensor_msgs.msg import Image from std_msgs.msg import String from cv_bridge", "queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def", "import tensorflow as tf import classify_image class RosTensorFlow(): def __init__(self):", "= node_lookup.id_to_string(node_id) score = predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s", "image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy from # classify_image.py", "rospy.loginfo('%s (score = %.5f)' % (human_string, score)) self._pub.publish(human_string) def main(self):", "rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub = rospy.Publisher('result', String, queue_size=1) self.score_threshold", "English string lookup. node_lookup = classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for", "top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id)", "rospy.get_param('~use_top_k', 5) def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") #", "classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from saved", "import classify_image class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session()", "= np.squeeze(predictions) # Creates node ID --> English string lookup.", "__init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge() self._sub", "for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id]", "top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] if score >", "self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\") # copy from # classify_image.py image_data = cv2.imencode('.jpg',", "queue_size=1) self._pub = rospy.Publisher('result', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1)", "image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from saved GraphDef.", "as np import tensorflow as tf import classify_image class RosTensorFlow():", "(human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin() if __name__ == '__main__':", "= rospy.get_param('~use_top_k', 5) def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, \"bgr8\")", "graph from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run(", "# copy from # classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring() #", "Image, self.callback, queue_size=1) self._pub = rospy.Publisher('result', String, queue_size=1) self.score_threshold =", "= classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: human_string", "tf import classify_image class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session =", "cv_image)[1].tostring() # Creates graph from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')", "score)) self._pub.publish(human_string) def main(self): rospy.spin() if __name__ == '__main__': classify_image.setup_args()", "self.callback, queue_size=1) self._pub = rospy.Publisher('result', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold',", "= rospy.Publisher('result', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k =", "= CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub =", "main(self): rospy.spin() if __name__ == '__main__': classify_image.setup_args() rospy.init_node('rostensorflow') tensor =", "= %.5f)' % (human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin() if", "def main(self): rospy.spin() if __name__ == '__main__': classify_image.setup_args() rospy.init_node('rostensorflow') tensor", "= rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub = rospy.Publisher('result', String, queue_size=1)", "as tf import classify_image class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session", "self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions)", "= self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates", "tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback,", "rospy from sensor_msgs.msg import Image from std_msgs.msg import String from", "def __init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge()", "import Image from std_msgs.msg import String from cv_bridge import CvBridge", "GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data})", "self.score_threshold: rospy.loginfo('%s (score = %.5f)' % (human_string, score)) self._pub.publish(human_string) def", "from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor,", "CvBridge import cv2 import numpy as np import tensorflow as", "self._pub = rospy.Publisher('result', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k", "\"bgr8\") # copy from # classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring()", "self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node", "= tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image,", "numpy as np import tensorflow as tf import classify_image class", "self._session = tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw',", "0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def callback(self, image_msg): cv_image =", "softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID", "from cv_bridge import CvBridge import cv2 import numpy as np", "Image from std_msgs.msg import String from cv_bridge import CvBridge import", "score > self.score_threshold: rospy.loginfo('%s (score = %.5f)' % (human_string, score))", "Creates node ID --> English string lookup. node_lookup = classify_image.NodeLookup()", "% (human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin() if __name__ ==", "self.use_top_k = rospy.get_param('~use_top_k', 5) def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg,", "rospy.spin() if __name__ == '__main__': classify_image.setup_args() rospy.init_node('rostensorflow') tensor = RosTensorFlow()", "import String from cv_bridge import CvBridge import cv2 import numpy", "in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] if score", "lookup. node_lookup = classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in", "node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] if", "> self.score_threshold: rospy.loginfo('%s (score = %.5f)' % (human_string, score)) self._pub.publish(human_string)", "ID --> English string lookup. node_lookup = classify_image.NodeLookup() top_k =", "import rospy from sensor_msgs.msg import Image from std_msgs.msg import String", "# Creates graph from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions", "score = predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s (score =", "= self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions =", "classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: human_string =" ]
[ "enumerate(bots, start=1): buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts) @speedo_on_cmd(", "LogIt(message) await log.log_msg(client, d) @speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\":", "unbun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_ =", "await event.edit(engine.get_string(\"NOT_ADMIN\")) return if not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\"))", "image)\", }, ) async def magic_grps(client, message): engine = message.Engine", "ujwal.username or ujwal.id messag = f\"\"\" <b>Admins in {ujwal.title} |", "async def ban_world(client, message): engine = message.Engine bun = await", "\"Not Specified!\" if userz == me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try:", "user_ = await client.get_users(userm) except BaseException as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e))", "ujwal = await client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id) online_ =", ") return try: user = await client.get_users(userl) except BaseException as", "{mentions} \"\"\" await edit_or_send_as_file( messag, pablo, client, f\"`AdminList Of {holy}!`\",", "if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id) s += 1 except:", "await log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get Info", "s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={ \"help\": \"Purge All Messages Till", "except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id", "his ID!\", \"example\": \"{ch}mute (reply to user message OR provide", "e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d = f\"**#Demote** \\n**User :**", "cmd_help={ \"help\": \"Unmute Replied user or provide his ID!\", \"example\":", "+= f\"**Chat-ID :** __{ujwal.id}__ \\n\" msg += f\"**Verified :** __{ujwal.is_verified}__", "text_ = get_text(message) userm, reason = get_user(message, text_) if not", "\"{ch}demote (reply to user message OR provide his ID)\", },", "ID)\", }, ) async def ban_world(client, message): engine = message.Engine", "= await client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat", "= 0 dc = 0 async for member in client.iter_chat_members(message.chat.id):", "engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc > 0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d =", "\"{ch}promote (reply to user message OR provide his ID)\", },", "__{ujwal.description}__ \\n\" msg += f\"**Chat Members Count :** __{ujwal.members_count}__ \\n\"", "message_ids=message_ids, revoke=True ) message_ids.clear() if message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids,", "me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_ = await client.get_users(userz) except", "if not Res: Res = \"Admeme\" if userz == me_m.id:", "can_pin_messages=False, can_promote_members=False, ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e))", "dc = 0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted:", "user message OR provide his ID)\", }, ) async def", "client.get_users(userm) except BaseException as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz =", "async def ujwal_unmute(client, message): engine = message.Engine pablo = await", "== me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await client.unban_chat_member(message.chat.id, int(user_.id)) except", "userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try: user_ = await", "as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for midhun in X:", "try: await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages,", ":** __{ujwal.username}__ \\n\" if ujwal.description: msg += f\"**Chat Description :**", ") async def ujwal_unmute(client, message): engine = message.Engine pablo =", "user = await client.get_users(userl) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e))", "[{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(d) log = LogIt(message) await", "ID!\", \"example\": \"{ch}promote (reply to user message OR provide his", "if member.user.is_deleted: await sleep(1) if member.status == \"member\": dm +=", ">. # # This file is part of < https://github.com/TeamSpeedo/FridayUserBot", "f\"**Chat Username :** __{ujwal.username}__ \\n\" if ujwal.description: msg += f\"**Chat", "edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except", "= f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :**", "\"{ch}silentpin (reply to message)\", }, ) async def spin(client, message):", "engine.get_string(\"TO_DO\").format(\"Demote\") ) return try: user = await client.get_users(usero) except BaseException", "0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm + da +", "userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await client.restrict_chat_member( message.chat.id,", "return try: await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users,", "me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userk, reason =", "return me_ = await message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\"))", "= time.time() u_time = round(end_time - start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len,", "List** \\n\\n\" starky = get_text(message) or message.chat.id pablo = await", "f\"\\n{link} {userid}\" holy = ujwal.username or ujwal.id messag = f\"\"\"", "= await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_", "da > 0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc > 0:", "client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, )", "member.status == \"creator\": dc += 1 text = \"**Zombies Report!**", "pyrogram.types import ChatPermissions import pyrogram from main_start.core.decorators import speedo_on_cmd from", "member.status == \"member\": dm += 1 elif member.status == \"administrator\":", "= user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try:", "sgname.lower().strip() == \"clean\": me = client.me lol = await is_admin_or_owner(message,", "convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin", "message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message)", "\"gpic\"], cmd_help={ \"help\": \"Set Custom Group Pic, For Lazy Peoples!\",", "not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id", ">= 100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear() if", "message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return if not message.reply_to_message:", "await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool = await convert_to_image(message, client) if not", ") async def dpins(client, message): engine = message.Engine await client.unpin_all_chat_messages(message.chat.id)", "try: user = await client.get_users(usero) except BaseException as e: await", "engine = message.Engine mentions = \"\" starky = get_text(message) or", ") async def spin(client, message): engine = message.Engine if not", "[\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\": \"Get List Of Bots In Chat!\",", "< https://github.com/TeamSpeedo/blob/master/LICENSE > # # All rights reserved. import asyncio", "__{ujwal.dc_id}__ \\n\" if ujwal.username: msg += f\"**Chat Username :** __{ujwal.username}__", "https://github.com/TeamSpeedo/FridayUserBot > project, # and is released under the \"GNU", "return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except BaseException", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots = await client.get_chat_members(starky, filter=\"bots\") except", "or message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X =", "client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id != message.message_id: purge_len", "user or provide his ID!\", \"example\": \"{ch}promote (reply to user", "can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException as e: await", "f > 0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd( [\"ban\",", "@speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get Info Of The Chat!\",", "\"GNU v3.0 License Agreement\". # Please see < https://github.com/TeamSpeedo/blob/master/LICENSE >", "await pablo.edit(p) log = LogIt(message) await log.log_msg(client, p) try: if", "Message!\", \"example\": \"{ch}purge (reply to message)\", }, ) async def", "unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as e:", "\"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin All Pinned Messages!\", \"example\": \"{ch}rmpins\"}, )", "\\n\" msg += f\"**Is Scam :** __{ujwal.is_scam}__ \\n\" msg +=", "\"help\": \"Pin Message With Sending Notification To Members!\", \"example\": \"{ch}pin", "await pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Ban", "( convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={ \"help\":", "pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members,", "try: await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\",", "== me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await client.promote_chat_member( message.chat.id, user.id,", "[{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(m) log = LogIt(message) await", "await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return if not", "BaseException as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b = f\"**#Banned**", "`{message.chat.title}` \\n**Title :** `{Res}`\" await pablo.edit(p) log = LogIt(message) await", "user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False,", "edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\",", "or provide his ID!\", \"example\": \"{ch}mute (reply to user message", "To Members!\", \"example\": \"{ch}pin (reply to messages)\", }, ) async", "message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X = await", "async def unban_world(client, message): engine = message.Engine unbun = await", "): if msg.message_id != message.message_id: purge_len += 1 message_ids.append(msg.message_id) if", "revoke=True, ) await message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={ \"help\": \"Set", "as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not", "f\"**Chat-ID :** __{ujwal.id}__ \\n\" msg += f\"**Verified :** __{ujwal.is_verified}__ \\n\"", "return try: user = await client.get_users(userl) except BaseException as e:", "await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False,", "\\n\" await pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={ \"help\": \"Remove Deleted", "e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m = f\"**#Muted** \\n**User :**", "group_only=True, cmd_help={ \"help\": \"Promote Replied user or provide his ID!\",", "unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userm, reason = get_user(message, text_)", "= client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await", "\\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(m) log =", "bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b = f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat", "await client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer))", "await log.log_msg(client, b) @speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\":", "mentions += f\"\\n{link} {userid}\" holy = ujwal.username or ujwal.id messag", "\"example\": \"{ch}zombies\", }, ) async def ujwalzombie(client, message): engine =", "Sending Notification To Members!\", \"example\": \"{ch}pin (reply to messages)\", },", "def bothub(client, message): engine = message.Engine buts = \"**Bot List**", ":** `{message.chat.title}` \\n**Reason :** `{reason}`\" await bun.edit(b) log = LogIt(message)", "Specified!\" if userz == me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_", "\\n\" if ujwal.username: msg += f\"**Chat Username :** __{ujwal.username}__ \\n\"", "= 0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try:", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_", "id)\", }, ) async def unban_world(client, message): engine = message.Engine", "member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id) s", "cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return if not os.path.exists(cool): await", "only_if_admin=True, group_only=True, cmd_help={ \"help\": \"UnBan Replied User or provide his", "__{ujwal.username}__ \\n\" if ujwal.description: msg += f\"**Chat Description :** __{ujwal.description}__", "await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except", "import asyncio import os import time from asyncio import sleep", "sleep(1) if member.status == \"member\": dm += 1 elif member.status", "user.id if not Res: Res = \"Admeme\" if userz ==", "pablo.edit(p) log = LogIt(message) await log.log_msg(client, p) try: if Res:", "if Res: await client.set_administrator_title(message.chat.id, user.id, Res) except: pass @speedo_on_cmd( [\"demote\",", "[\"supergroup\", \"channel\"]: me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await", "group_only=True, cmd_help={ \"help\": \"Unmute Replied user or provide his ID!\",", "< https://github.com/TeamSpeedo/FridayUserBot > project, # and is released under the", "except BaseException as e: await msg_.edit(f\"`Unable To Set Group Photo!", "\"Unpin All Pinned Messages!\", \"example\": \"{ch}rmpins\"}, ) async def dpins(client,", "> project, # and is released under the \"GNU v3.0", "\"prumote\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Promote Replied user or provide", "\\n**Chat :** `{message.chat.title}`\" await pablo.edit(d) log = LogIt(message) await log.log_msg(client,", "Replied Message!\", \"example\": \"{ch}del (reply to message)\", }, ) async", "provide his ID)\", }, ) async def ujwal_mute(client, message): engine", "+= engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da > 0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if", "await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except BaseException as e:", "await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd(", "= await client.get_chat_members(starky, filter=\"bots\") except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\",", "engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m =", "if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await client.restrict_chat_member(", "as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d = f\"**#Demote** \\n**User", "by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >. # # This file is", "await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m = f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id})", "from main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( [\"silentpin\"],", "to user message OR Provide his id)\", }, ) async", "\"Get Adminlist Of Chat!\", \"example\": \"{ch}adminlist\"}, ) async def midhunadmin(client,", "= f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link} {userid}\" holy = ujwal.username or", "@speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Unmute Replied user or", "as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um = f\"**#Un_Muted** \\n**User", "> 0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"],", "async for msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if", "only_if_admin=True, cmd_help={\"help\": \"Unpin All Pinned Messages!\", \"example\": \"{ch}rmpins\"}, ) async", "[{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await bun.edit(b) log", ") return try: user = await client.get_users(usero) except BaseException as", "Notification To Members!\", \"example\": \"{ch}silentpin (reply to message)\", }, )", "provide his ID!\", \"example\": \"{ch}ban (reply to user message OR", "1 message_ids.append(msg.message_id) if len(message_ids) >= 100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids,", "me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return", "me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info,", "f += 1 text = \"\" if s > 0:", "await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return try: user = await client.get_users(userf)", "f = 0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted:", "log.log_msg(client, d) @speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Mute Replied", "his ID!\", \"example\": \"{ch}demote (reply to user message OR provide", "as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if not", "= await client.get_chat_members(starky, filter=\"administrators\") ujwal = await client.get_chat(starky) except BaseException", "= message.Engine unbun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me", "engine = message.Engine msg_ = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not", "+= f\"**Chat Description :** __{ujwal.description}__ \\n\" msg += f\"**Chat Members", "}, ) async def unban_world(client, message): engine = message.Engine unbun", "delmsgs(client, message): engine = message.Engine if not message.reply_to_message: await message.delete()", "User or provide his ID!\", \"example\": \"{ch}ban (reply to user", "[\"purge\"], only_if_admin=True, cmd_help={ \"help\": \"Purge All Messages Till Replied Message!\",", "return userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\"))", "\\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await", "m = f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await", "me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return", "\"Promote Replied user or provide his ID!\", \"example\": \"{ch}promote (reply", "as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await client.kick_chat_member(message.chat.id, int(user_.id)) except", "async def ujwal_mote(client, message): engine = message.Engine pablo = await", "}, ) async def ban_world(client, message): engine = message.Engine bun", "except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m =", ":** `{Res}`\" await pablo.edit(p) log = LogIt(message) await log.log_msg(client, p)", "__{online_.onlines}__ \\n\" if ujwal.photo: msg += f\"**Chat DC :** __{ujwal.dc_id}__", "= await client.get_chat(starky) except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e))", "client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False,", "usero = get_user(message, asplit)[0] if not usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\")", "BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um = f\"**#Un_Muted**", "Chat!\", \"example\": \"{ch}adminlist\"}, ) async def midhunadmin(client, message): engine =", "member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id) s += 1 except: f", "lol = await is_admin_or_owner(message, me.id) if not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\"))", "100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear() if message_ids:", "}, ) async def magic_grps(client, message): engine = message.Engine msg_", "}, ) async def ujwal_unmute(client, message): engine = message.Engine pablo", "user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException", "asyncio import sleep from pyrogram.types import ChatPermissions import pyrogram from", "from pyrogram.types import ChatPermissions import pyrogram from main_start.core.decorators import speedo_on_cmd", "Res = \"Admeme\" if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return", "pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname = message.text.split(None, 1)[1] if", "dm = 0 da = 0 dc = 0 async", "message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={ \"help\": \"Set Custom Group Pic,", "client.get_chat_members(starky, filter=\"bots\") except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return", "Copyright (C) 2020-2021 by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >. # #", "\"example\": \"{ch}Unmute (reply to user message OR provide his ID)\",", "to message)\", }, ) async def purge(client, message): engine =", "as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for nos, ujwal in", "userz == me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await client.unban_chat_member(message.chat.id, int(user_.id))", "msg_.edit(f\"`Unable To Set Group Photo! TraceBack : {e}\") return await", "Title :** __{ujwal.title}__ \\n\" msg += f\"**Users Online :** __{online_.onlines}__", "X = await client.get_chat_members(starky, filter=\"administrators\") ujwal = await client.get_chat(starky) except", "get_text(message) usero = get_user(message, asplit)[0] if not usero: await pablo.edit(", "[{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(um) log = LogIt(message) await", "his ID!\", \"example\": \"{ch}promote (reply to user message OR provide", "his ID!\", \"example\": \"{ch}unban (reply to user message OR Provide", "client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except BaseException as e: await", "\"example\": \"{ch}ban (reply to user message OR provide his ID)\",", "if userz == me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_ =", "dpins(client, message): engine = message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\"))", "await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not reason: reason", "return try: user = await client.get_users(userf) except BaseException as e:", "pyrogram from main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers import ( edit_or_reply,", "pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False,", "\"example\": \"{ch}mute (reply to user message OR provide his ID)\",", "as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m = f\"**#Muted** \\n**User", "async def dpins(client, message): engine = message.Engine await client.unpin_all_chat_messages(message.chat.id) await", "+= f\"**Chat Members Count :** __{ujwal.members_count}__ \\n\" if ujwal.photo: kek", "\"bun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Ban Replied User or provide", "project, # and is released under the \"GNU v3.0 License", "= ujwal.username or ujwal.id messag = f\"\"\" <b>Admins in {ujwal.title}", "me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return", "Info** \\n\\n\" msg += f\"**Chat-ID :** __{ujwal.id}__ \\n\" msg +=", "return b = f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}`", "\\n\\n\" msg += f\"**Chat-ID :** __{ujwal.id}__ \\n\" msg += f\"**Verified", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split()) == 1: dm = 0 da", "message)\", }, ) async def purge(client, message): engine = message.Engine", "not midhun.user.is_deleted: link = f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\"", "ujwal.photo: msg += f\"**Chat DC :** __{ujwal.dc_id}__ \\n\" if ujwal.username:", "int(user_.id)) except BaseException as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b", "ujwal_mute(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "\"help\": \"Demote Replied user or provide his ID!\", \"example\": \"{ch}demote", "if not midhun.user.is_deleted: link = f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid =", "[\"demote\", \"demute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Demote Replied user or", "\"{ch}adminlist\"}, ) async def midhunadmin(client, message): engine = message.Engine mentions", "message.chat.type in [\"supergroup\", \"channel\"]: me_ = await message.chat.get_member(int(me_m.id)) if not", "(reply to user message OR provide his ID)\", }, )", "to message)\", }, ) async def delmsgs(client, message): engine =", "buts = \"**Bot List** \\n\\n\" starky = get_text(message) or message.chat.id", "= f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(um)", "return asplit = get_text(message) userl, Res = get_user(message, asplit) if", "\\n\" msg += f\"**Chat Members Count :** __{ujwal.members_count}__ \\n\" if", "message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\",", "await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit =", "@speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\": \"Get List Of Bots In", "}, ) async def delmsgs(client, message): engine = message.Engine if", "href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link} {userid}\" holy =", "return d = f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\"", "await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await asyncio.sleep(3) await event.delete() @speedo_on_cmd(", ") async def ban_world(client, message): engine = message.Engine bun =", "if not userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return try: user", "if msg.message_id != message.message_id: purge_len += 1 message_ids.append(msg.message_id) if len(message_ids)", "engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except BaseException as", "user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await", "\"delusers\"], cmd_help={ \"help\": \"Remove Deleted Accounts In The Group/Channel!\", \"example\":", "}, ) async def ujwalzombie(client, message): engine = message.Engine pablo", "\"{ch}purge (reply to message)\", }, ) async def purge(client, message):", "\\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await unbun.edit(ub) log =", "}, ) async def ujwal_demote(client, message): engine = message.Engine pablo", "filter=\"bots\") except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for", "\\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}` \\n**Title :** `{Res}`\" await", "+= f\"**Verified :** __{ujwal.is_verified}__ \\n\" msg += f\"**Is Scam :**", "To Members!\", \"example\": \"{ch}silentpin (reply to message)\", }, ) async", "from main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file,", "Description :** __{ujwal.description}__ \\n\" msg += f\"**Chat Members Count :**", "= \"\" if s > 0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if", "cool = await convert_to_image(message, client) if not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a", "BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await client.kick_chat_member(message.chat.id, int(user_.id))", "[{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={ \"help\": \"Remove", "\"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get Info Of The Chat!\", \"example\": \"{ch}chatinfo\"},", "\"clean\": me = client.me lol = await is_admin_or_owner(message, me.id) if", "\"Get List Of Bots In Chat!\", \"example\": \"{ch}botlist\"}, ) async", "= f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(m)", "is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, )", "message): engine = message.Engine if not message.reply_to_message: await message.delete() return", "spin(client, message): engine = message.Engine if not message.reply_to_message: await edit_or_reply(message,", "engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await asyncio.sleep(3) await event.delete() @speedo_on_cmd( [\"del\"], cmd_help={", "Replied User or provide his ID!\", \"example\": \"{ch}ban (reply to", "me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return if not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To", "ujwal_unmute(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "\"{ch}chatinfo\"}, ) async def owo_chat_info(client, message): engine = message.Engine s", "\"help\": \"Promote Replied user or provide his ID!\", \"example\": \"{ch}promote", "message.reply_to_message.message_id ) except BaseException as e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e)", "= get_user(message, text_) if not userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") )", "engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split())", "(C) 2020-2021 by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >. # # This", "\"Pin Message Without Sending Notification To Members!\", \"example\": \"{ch}silentpin (reply", "0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc > 0: text +=", "\"admin-lookup-result\", \"html\", ) @speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\": \"Get List", "log = LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True,", "can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\",", "= user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try:", "\"UnBan Replied User or provide his ID!\", \"example\": \"{ch}unban (reply", "f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(m) log", "client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete() else: await s.edit(msg) @speedo_on_cmd( [\"purge\"],", "if not reason: reason = \"Not Specified!\" if userz ==", "userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return try: user = await", "client.kick_chat_member(message.chat.id, member.user.id) s += 1 except: f += 1 text", "message OR provide his ID)\", }, ) async def ujwal_mote(client,", "[] purge_len = 0 event = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m", "= await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit", "messag, pablo, client, f\"`AdminList Of {holy}!`\", \"admin-lookup-result\", \"html\", ) @speedo_on_cmd(", "client.get_users(userl) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz =", "Group/Channel!\", \"example\": \"{ch}zombies\", }, ) async def ujwalzombie(client, message): engine", "@speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={ \"help\": \"Set Custom Group Pic, For", "not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_ = await client.get_users(userk)", "[\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get Info Of The Chat!\", \"example\":", "e)) return m = f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :**", "BaseException as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if", "um = f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await", "cmd_help={ \"help\": \"Ban Replied User or provide his ID!\", \"example\":", "\\n**Chat :** `{message.chat.title}`\" await pablo.edit(m) log = LogIt(message) await log.log_msg(client,", "import ChatPermissions import pyrogram from main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers", "if not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s = 0 f", ") @speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\": \"Get List Of Bots", "try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except BaseException as e:", "\"example\": \"{ch}pin (reply to messages)\", }, ) async def lpin(client,", "except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um =", "await message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={ \"help\": \"Set Custom Group", "e)) return p = f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :**", "__{ujwal.is_verified}__ \\n\" msg += f\"**Is Scam :** __{ujwal.is_scam}__ \\n\" msg", "userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return", "[\"ban\", \"bun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Ban Replied User or", "not userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return try: user =", ") except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m", "def ujwal_mute(client, message): engine = message.Engine pablo = await edit_or_reply(message,", ") end_time = time.time() u_time = round(end_time - start_time) await", "message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"],", "os import time from asyncio import sleep from pyrogram.types import", "message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\":", "pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return try: user = await client.get_users(usero) except", "if member.status == \"member\": dm += 1 elif member.status ==", "= \"**Bot List** \\n\\n\" starky = get_text(message) or message.chat.id pablo", "Replied user or provide his ID!\", \"example\": \"{ch}mute (reply to", ") async def lpin(client, message): engine = message.Engine if not", "f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\"", "in {ujwal.title} | {holy}</b> {mentions} \"\"\" await edit_or_send_as_file( messag, pablo,", "ID!\", \"example\": \"{ch}Unmute (reply to user message OR provide his", "only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Unmute Replied user or provide his", "if dc > 0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm", "async def lpin(client, message): engine = message.Engine if not message.reply_to_message:", "ID)\", }, ) async def ujwal_mote(client, message): engine = message.Engine", "Replied user or provide his ID!\", \"example\": \"{ch}demote (reply to", "= await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete() else:", "try: await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\",", "purge_len = 0 event = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m =", "engine = message.Engine start_time = time.time() message_ids = [] purge_len", "main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( [\"silentpin\"], only_if_admin=True,", "in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1) if member.status == \"member\":", "revoke=True ) message_ids.clear() if message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X = await client.get_chat_members(starky, filter=\"administrators\") ujwal =", "not usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return try: user =", "== 1: dm = 0 da = 0 dc =", "[{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}` \\n**Title :** `{Res}`\" await pablo.edit(p) log", "+= f\"**Chat Title :** __{ujwal.title}__ \\n\" msg += f\"**Users Online", "engine = message.Engine s = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal =", ") async def delmsgs(client, message): engine = message.Engine if not", "or provide his ID!\", \"example\": \"{ch}demote (reply to user message", "engine.get_string(\"PROCESSING\")) ujwal = await client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id) online_", "link = f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\" mentions +=", "if not usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return try: user", "e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for midhun in X: if", "Sending Notification To Members!\", \"example\": \"{ch}silentpin (reply to message)\", },", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal = await client.get_chat(message.chat.id) peer =", "message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True,", "The Group/Channel!\", \"example\": \"{ch}zombies\", }, ) async def ujwalzombie(client, message):", "BaseException as e: await msg_.edit(f\"`Unable To Set Group Photo! TraceBack", "not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try: user =", "message.Engine msg_ = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not message.reply_to_message: await", "me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userf = get_user(message,", "Messages!\", \"example\": \"{ch}rmpins\"}, ) async def dpins(client, message): engine =", "time from asyncio import sleep from pyrogram.types import ChatPermissions import", "\"\" if s > 0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f", "client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except BaseException as e: await", "cmd_help={ \"help\": \"Pin Message With Sending Notification To Members!\", \"example\":", "message): engine = message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd(", "}, ) async def lpin(client, message): engine = message.Engine if", "This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project, #", "await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try: user = await client.get_users(userf)", "__{ujwal.is_scam}__ \\n\" msg += f\"**Chat Title :** __{ujwal.title}__ \\n\" msg", "= LogIt(message) await log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\":", "group_only=True, cmd_help={ \"help\": \"Demote Replied user or provide his ID!\",", "= 0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await", "Pic, For Lazy Peoples!\", \"example\": \"{ch}setgrppic (reply to image)\", },", "message.reply_to_message.message_id, disable_notification=True, ) except BaseException as e: await edit_or_reply( message,", "message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message)", "file is part of < https://github.com/TeamSpeedo/FridayUserBot > project, # and", "client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub", "get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers import", "text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f > 0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f))", "start_time = time.time() message_ids = [] purge_len = 0 event", "\"Purge All Messages Till Replied Message!\", \"example\": \"{ch}purge (reply to", "except BaseException as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id", "ChatPermissions(can_send_messages=False) ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return", "ujwal_mote(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "msg.message_id != message.message_id: purge_len += 1 message_ids.append(msg.message_id) if len(message_ids) >=", "= message.Engine if not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await", "== me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id,", "+= (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname =", "Till Replied Message!\", \"example\": \"{ch}purge (reply to message)\", }, )", "await convert_to_image(message, client) if not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\"))", "to message)\", }, ) async def spin(client, message): engine =", "\"Mute Replied user or provide his ID!\", \"example\": \"{ch}mute (reply", "return try: await client.set_chat_photo(message.chat.id, photo=cool) except BaseException as e: await", "message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except", "await client.get_chat(starky) except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return", "For Lazy Peoples!\", \"example\": \"{ch}setgrppic (reply to image)\", }, )", "client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\"))", "message): engine = message.Engine start_time = time.time() message_ids = []", "not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await client.set_chat_photo(message.chat.id, photo=cool) except", "await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) )", "not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool = await convert_to_image(message, client)", "1 except: f += 1 text = \"\" if s", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal = await client.get_chat(message.chat.id) peer = await", "for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1) if member.status", "lpin(client, message): engine = message.Engine if not message.reply_to_message: await edit_or_reply(message,", "await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except BaseException as e:", "pablo.edit(d) log = LogIt(message) await log.log_msg(client, d) @speedo_on_cmd( [\"mute\"], only_if_admin=True,", "m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Unmute Replied user", "= f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}` \\n**Title :**", "await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b = f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id})", "pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d = f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat", "import pyrogram from main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers import (", "= f\"\"\" <b>Admins in {ujwal.title} | {holy}</b> {mentions} \"\"\" await", "int(user_.id)) except BaseException as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub =", "for nos, ujwal in enumerate(bots, start=1): buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id})", "\"administrator\": da += 1 elif member.status == \"creator\": dc +=", "log = LogIt(message) await log.log_msg(client, b) @speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True,", "\\n\" msg += f\"**Chat Title :** __{ujwal.title}__ \\n\" msg +=", "[\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin All Pinned Messages!\", \"example\": \"{ch}rmpins\"},", "LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True, group_only=True, cmd_help={", "def purge(client, message): engine = message.Engine start_time = time.time() message_ids", "engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try: user_ = await client.get_users(userm) except BaseException", "== me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id,", "not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ = await message.chat.get_member(int(client.me.id)) if", "return async for msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ):", "buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"],", "generate_meme, ) @speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message Without", "await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return try: user = await client.get_users(userl)", "return try: user = await client.get_users(usero) except BaseException as e:", "\\n**Chat :** `{message.chat.title}`\" await pablo.edit(um) log = LogIt(message) await log.log_msg(client,", "else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname = message.text.split(None, 1)[1] if sgname.lower().strip()", "user_ = await client.get_users(userz) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e))", "await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub = f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat", "0 dc = 0 async for member in client.iter_chat_members(message.chat.id): if", "try: if Res: await client.set_administrator_title(message.chat.id, user.id, Res) except: pass @speedo_on_cmd(", "\"unbun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"UnBan Replied User or provide", "me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members:", "engine.get_string(\"PROCESSING\")) if len(message.text.split()) == 1: dm = 0 da =", "await pablo.edit(um) log = LogIt(message) await log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\",", "await client.kick_chat_member(message.chat.id, member.user.id) s += 1 except: f += 1", "\"Unmute Replied user or provide his ID!\", \"example\": \"{ch}Unmute (reply", "To Set Group Photo! TraceBack : {e}\") return await msg_.edit(engine.get_string(\"DONE_\"))", ":** `{message.chat.title}` \\n**Reason :** `{reason}`\" await unbun.edit(ub) log = LogIt(message)", "await message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool =", "\"example\": \"{ch}silentpin (reply to message)\", }, ) async def spin(client,", ") from main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers import ( convert_to_image,", "await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userf = get_user(message, asplit)[0]", "e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if userz ==", "<a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link} {userid}\" holy", "provide his ID)\", }, ) async def ban_world(client, message): engine", "await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ =", "\"Demote Replied user or provide his ID!\", \"example\": \"{ch}demote (reply", "f\"\"\" <b>Admins in {ujwal.title} | {holy}</b> {mentions} \"\"\" await edit_or_send_as_file(", "pass @speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Demote Replied", "pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if userz == me_m.id: await", "message_ids = [] purge_len = 0 event = await edit_or_reply(message,", "Please see < https://github.com/TeamSpeedo/blob/master/LICENSE > # # All rights reserved.", "engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm + da + dc if d", "\"Remove Deleted Accounts In The Group/Channel!\", \"example\": \"{ch}zombies\", }, )", "can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e))", "try: await client.kick_chat_member(message.chat.id, member.user.id) s += 1 except: f +=", "0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f > 0: text +=", "or provide his ID!\", \"example\": \"{ch}promote (reply to user message", "cmd_help={ \"help\": \"Demote Replied user or provide his ID!\", \"example\":", "= get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") )", "member.user.is_deleted: await sleep(1) if member.status == \"member\": dm += 1", "await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, )", "or provide his ID!\", \"example\": \"{ch}unban (reply to user message", "caption=msg) await s.delete() else: await s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={", "}, ) async def purge(client, message): engine = message.Engine start_time", "return for nos, ujwal in enumerate(bots, start=1): buts += f\"{nos}〉", "bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userk, reason = get_user(message, text_)", "msg += f\"**Chat Username :** __{ujwal.username}__ \\n\" if ujwal.description: msg", "== \"clean\": me = client.me lol = await is_admin_or_owner(message, me.id)", "await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except BaseException as e:", "unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try: user_ = await client.get_users(userm) except", ") async def ujwal_mote(client, message): engine = message.Engine pablo =", "p = f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}` \\n**Title", "await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userk, reason = get_user(message,", "(reply to message)\", }, ) async def purge(client, message): engine", "{holy}</b> {mentions} \"\"\" await edit_or_send_as_file( messag, pablo, client, f\"`AdminList Of", "= \"**Zombies Report!** \\n\\n\" if dm > 0: text +=", "from asyncio import sleep from pyrogram.types import ChatPermissions import pyrogram", "import time from asyncio import sleep from pyrogram.types import ChatPermissions", "e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub = f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id})", "userk, reason = get_user(message, text_) if not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\"))", "reason = \"Not Specified!\" if userz == me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\"))", "get_user(message, asplit) if not userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return", "ID!\", \"example\": \"{ch}ban (reply to user message OR provide his", "if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await client.promote_chat_member(", "f\"**Users Online :** __{online_.onlines}__ \\n\" if ujwal.photo: msg += f\"**Chat", "def delmsgs(client, message): engine = message.Engine if not message.reply_to_message: await", "message OR provide his ID)\", }, ) async def ban_world(client,", "return if not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return async", "return m = f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\"", "Accounts In The Group/Channel!\", \"example\": \"{ch}zombies\", }, ) async def", "s > 0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f > 0:", "def ujwalzombie(client, message): engine = message.Engine pablo = await edit_or_reply(message,", "except BaseException as e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return", "purge_len += 1 message_ids.append(msg.message_id) if len(message_ids) >= 100: await client.delete_messages(", "+= f\"**Chat Username :** __{ujwal.username}__ \\n\" if ujwal.description: msg +=", "= get_user(message, text_) if not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try:", "await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ =", "\"help\": \"Delete Replied Message!\", \"example\": \"{ch}del (reply to message)\", },", "user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await", "can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except BaseException as e: await", "except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d =", "All Pinned Messages!\", \"example\": \"{ch}rmpins\"}, ) async def dpins(client, message):", "pablo.edit(m) log = LogIt(message) await log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True,", "me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return", "unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub = f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :**", "= await client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id) online_ = await", "f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(d) log", "Count :** __{ujwal.members_count}__ \\n\" if ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id)", "p) try: if Res: await client.set_administrator_title(message.chat.id, user.id, Res) except: pass", "message.Engine unbun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_", "await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await client.set_chat_photo(message.chat.id, photo=cool) except BaseException as", "\"Pin Message With Sending Notification To Members!\", \"example\": \"{ch}pin (reply", "message_ids.append(msg.message_id) if len(message_ids) >= 100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True", "+= 1 text = \"\" if s > 0: text", "{ujwal.title} | {holy}</b> {mentions} \"\"\" await edit_or_send_as_file( messag, pablo, client,", "`{message.chat.title}` \\n**Reason :** `{reason}`\" await unbun.edit(ub) log = LogIt(message) await", "\"Admeme\" if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await", "import speedo_on_cmd from main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file, get_text, get_user,", "kek = await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete()", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X = await client.get_chat_members(starky, filter=\"administrators\")", "await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_ = await client.get_users(userz) except BaseException", "await edit_or_send_as_file( messag, pablo, client, f\"`AdminList Of {holy}!`\", \"admin-lookup-result\", \"html\",", "asplit = get_text(message) userf = get_user(message, asplit)[0] if not userf:", "0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da > 0: text +=", "pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Ban Replied", "is released under the \"GNU v3.0 License Agreement\". # Please", "bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not reason: reason =", "provide his ID)\", }, ) async def ujwal_demote(client, message): engine", "= message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me", "await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete() @speedo_on_cmd( [\"setgrppic\",", "> # # All rights reserved. import asyncio import os", "return try: user_ = await client.get_users(userm) except BaseException as e:", "msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool = await convert_to_image(message, client) if not cool:", "message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time = time.time()", ":** __{ujwal.dc_id}__ \\n\" if ujwal.username: msg += f\"**Chat Username :**", "engine.get_string(\"PROCESSING\")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not", "Of The Chat!\", \"example\": \"{ch}chatinfo\"}, ) async def owo_chat_info(client, message):", ":** `{message.chat.title}` \\n**Title :** `{Res}`\" await pablo.edit(p) log = LogIt(message)", "group_only=True, cmd_help={ \"help\": \"Ban Replied User or provide his ID!\",", "starky = get_text(message) or message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "}, ) async def ujwal_mute(client, message): engine = message.Engine pablo", "try: bots = await client.get_chat_members(starky, filter=\"bots\") except BaseException as e:", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me if message.chat.type in [\"supergroup\", \"channel\"]:", "message.message_id: purge_len += 1 message_ids.append(msg.message_id) if len(message_ids) >= 100: await", "return for midhun in X: if not midhun.user.is_deleted: link =", "[\"setgrppic\", \"gpic\"], cmd_help={ \"help\": \"Set Custom Group Pic, For Lazy", "= message.text.split(None, 1)[1] if sgname.lower().strip() == \"clean\": me = client.me", "ujwal.username: msg += f\"**Chat Username :** __{ujwal.username}__ \\n\" if ujwal.description:", "try: await client.set_chat_photo(message.chat.id, photo=cool) except BaseException as e: await msg_.edit(f\"`Unable", "userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await client.restrict_chat_member( message.chat.id,", "= message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"],", "msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return if not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return", "Members!\", \"example\": \"{ch}pin (reply to messages)\", }, ) async def", "msg += f\"**Verified :** __{ujwal.is_verified}__ \\n\" msg += f\"**Is Scam", "s = 0 f = 0 async for member in", "b = f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason", "if not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ = await message.chat.get_member(int(client.me.id))", "messages)\", }, ) async def lpin(client, message): engine = message.Engine", "= await client.get_users(userl) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return", "text = \"**Zombies Report!** \\n\\n\" if dm > 0: text", "(engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\":", "(reply to message)\", }, ) async def delmsgs(client, message): engine", "me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userm, reason =", "= \"Admeme\" if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try:", "engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin All Pinned Messages!\",", "pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try: user = await client.get_users(userf) except", "The Chat!\", \"example\": \"{ch}chatinfo\"}, ) async def owo_chat_info(client, message): engine", "edit_or_send_as_file( messag, pablo, client, f\"`AdminList Of {holy}!`\", \"admin-lookup-result\", \"html\", )", "await client.get_users(userm) except BaseException as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz", "userl, Res = get_user(message, asplit) if not userl: await pablo.edit(", "def ujwal_unmute(client, message): engine = message.Engine pablo = await edit_or_reply(message,", "message OR provide his ID)\", }, ) async def ujwal_demote(client,", "Specified!\" if userz == me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await", "can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException as e:", "# This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project,", "client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"],", ") except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p", ") except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um", "@speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={ \"help\": \"Remove Deleted Accounts In The", "return try: await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False,", "sleep from pyrogram.types import ChatPermissions import pyrogram from main_start.core.decorators import", "await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try: user_ = await client.get_users(userm)", "filter=\"administrators\") ujwal = await client.get_chat(starky) except BaseException as e: await", "not reason: reason = \"Not Specified!\" if userz == me_m.id:", "bots = await client.get_chat_members(starky, filter=\"bots\") except BaseException as e: await", "is part of < https://github.com/TeamSpeedo/FridayUserBot > project, # and is", "b) @speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"UnBan Replied", "message)\", }, ) async def delmsgs(client, message): engine = message.Engine", "message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message)", "Res: Res = \"Admeme\" if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\"))", "= user_.id if not reason: reason = \"Not Specified!\" if", "TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >. # # This file is part", "time.time() u_time = round(end_time - start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time)", "f\"**Chat DC :** __{ujwal.dc_id}__ \\n\" if ujwal.username: msg += f\"**Chat", "get_text(message) userk, reason = get_user(message, text_) if not userk: await", "@speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Ban Replied User", "- start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await asyncio.sleep(3) await", "for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id)", "\"channel\"]: me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\"))", "reason = \"Not Specified!\" if userz == me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\"))", "get_text(message) userl, Res = get_user(message, asplit) if not userl: await", "pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_ =", "ujwal_demote(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "user or provide his ID!\", \"example\": \"{ch}demote (reply to user", "can_promote_members=me_.can_promote_members, ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return", "Report!** \\n\\n\" if dm > 0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if", "holy = ujwal.username or ujwal.id messag = f\"\"\" <b>Admins in", "engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True,", "async def ujwalzombie(client, message): engine = message.Engine pablo = await", "if not message.reply_to_message: await message.delete() return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id],", "ujwal.id messag = f\"\"\" <b>Admins in {ujwal.title} | {holy}</b> {mentions}", "provide his ID!\", \"example\": \"{ch}promote (reply to user message OR", "or provide his ID!\", \"example\": \"{ch}Unmute (reply to user message", "dc if d > 0: text += (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text)", "Replied user or provide his ID!\", \"example\": \"{ch}promote (reply to", "}, ) async def spin(client, message): engine = message.Engine if", ":** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(m) log = LogIt(message)", "\"{ch}ban (reply to user message OR provide his ID)\", },", ") async def ujwalzombie(client, message): engine = message.Engine pablo =", "edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\",", ":** `{reason}`\" await bun.edit(b) log = LogIt(message) await log.log_msg(client, b)", "edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin All Pinned", "await message.delete() return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await", "await event.delete() @speedo_on_cmd( [\"del\"], cmd_help={ \"help\": \"Delete Replied Message!\", \"example\":", "import os import time from asyncio import sleep from pyrogram.types", "await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time = time.time() u_time", "chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time = time.time() u_time = round(end_time", "Group Pic, For Lazy Peoples!\", \"example\": \"{ch}setgrppic (reply to image)\",", "elif member.status == \"administrator\": da += 1 elif member.status ==", "client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except BaseException as e: await", "msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ = await message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await", "as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b = f\"**#Banned** \\n**User", "await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False,", "pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s = 0 f = 0 async for", "e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for nos, ujwal in enumerate(bots,", "await s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={ \"help\": \"Purge All Messages", "[\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get Adminlist Of Chat!\", \"example\": \"{ch}adminlist\"}, )", "= get_user(message, asplit)[0] if not usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") )", "await pablo.edit(d) log = LogIt(message) await log.log_msg(client, d) @speedo_on_cmd( [\"mute\"],", "except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for midhun", "\"{ch}Unmute (reply to user message OR provide his ID)\", },", "| {holy}</b> {mentions} \"\"\" await edit_or_send_as_file( messag, pablo, client, f\"`AdminList", "return try: await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await", "\"example\": \"{ch}setgrppic (reply to image)\", }, ) async def magic_grps(client,", "async def midhunadmin(client, message): engine = message.Engine mentions = \"\"", "import LogIt from main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote, generate_meme, )", "\"{ch}mute (reply to user message OR provide his ID)\", },", "me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool = await convert_to_image(message, client) if", "Custom Group Pic, For Lazy Peoples!\", \"example\": \"{ch}setgrppic (reply to", "pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X = await client.get_chat_members(starky,", "message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool = await", "text_) if not userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try:", "await message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero = get_user(message, asplit)[0] if", "0 f = 0 async for member in client.iter_chat_members(message.chat.id): if", "if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await client.restrict_chat_member(", "await client.get_users(userk) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz", "await client.set_administrator_title(message.chat.id, user.id, Res) except: pass @speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True,", "dm + da + dc if d > 0: text", "Peoples!\", \"example\": \"{ch}setgrppic (reply to image)\", }, ) async def", "import ( edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s", "await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d = f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id})", "\"example\": \"{ch}del (reply to message)\", }, ) async def delmsgs(client,", "client.set_chat_photo(message.chat.id, photo=cool) except BaseException as e: await msg_.edit(f\"`Unable To Set", "try: await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False,", "pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={ \"help\": \"Remove Deleted Accounts In", "is_admin_or_owner(message, me.id) if not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s =", "= get_text(message) userl, Res = get_user(message, asplit) if not userl:", "user_.id if not reason: reason = \"Not Specified!\" if userz", "message.Engine if not message.reply_to_message: await message.delete() return await client.delete_messages( chat_id=message.chat.id,", "\"html\", ) @speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\": \"Get List Of", "f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={ \"help\":", "\"{ch}botlist\"}, ) async def bothub(client, message): engine = message.Engine buts", "asplit) if not userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return try:", ") async def ujwal_mute(client, message): engine = message.Engine pablo =", "if len(message.text.split()) == 1: dm = 0 da = 0", "ban_world(client, message): engine = message.Engine bun = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "\"example\": \"{ch}purge (reply to message)\", }, ) async def purge(client,", "await client.get_chat_members(starky, filter=\"bots\") except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e))", "def spin(client, message): engine = message.Engine if not message.reply_to_message: await", ":** `{reason}`\" await unbun.edit(ub) log = LogIt(message) await log.log_msg(client, ub)", "pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if not Res: Res =", "Notification To Members!\", \"example\": \"{ch}pin (reply to messages)\", }, )", "if len(message_ids) >= 100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True )", "`{message.chat.title}`\" await pablo.edit(m) log = LogIt(message) await log.log_msg(client, m) @speedo_on_cmd(", ":** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await bun.edit(b)", "# # This file is part of < https://github.com/TeamSpeedo/FridayUserBot >", "da + dc if d > 0: text += (engine.get_string(\"WIPE_THEM\"))", "await unbun.edit(ub) log = LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd( [\"promote\",", "text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm + da + dc", "engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message With", "engine.get_string(\"TO_DO\").format(\"Promote\") ) return try: user = await client.get_users(userl) except BaseException", "\\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(um) log =", "return userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\"))", "return asplit = get_text(message) userf = get_user(message, asplit)[0] if not", "2020-2021 by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >. # # This file", "<b>Admins in {ujwal.title} | {holy}</b> {mentions} \"\"\" await edit_or_send_as_file( messag,", "to messages)\", }, ) async def lpin(client, message): engine =", "@speedo_on_cmd( [\"del\"], cmd_help={ \"help\": \"Delete Replied Message!\", \"example\": \"{ch}del (reply", "message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots = await", "return cool = await convert_to_image(message, client) if not cool: await", "(engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname = message.text.split(None,", "message.Engine s = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal = await client.get_chat(message.chat.id)", "= await message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool", "cmd_help={ \"help\": \"Promote Replied user or provide his ID!\", \"example\":", "msg += f\"**Chat DC :** __{ujwal.dc_id}__ \\n\" if ujwal.username: msg", "msg = \"**Chat Info** \\n\\n\" msg += f\"**Chat-ID :** __{ujwal.id}__", "event = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me if message.chat.type", "cmd_help={ \"help\": \"Set Custom Group Pic, For Lazy Peoples!\", \"example\":", "if s > 0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f >", "asyncio.sleep(3) await event.delete() @speedo_on_cmd( [\"del\"], cmd_help={ \"help\": \"Delete Replied Message!\",", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split()) == 1: dm = 0", "his ID)\", }, ) async def ujwal_demote(client, message): engine =", "not Res: Res = \"Admeme\" if userz == me_m.id: await", "purge(client, message): engine = message.Engine start_time = time.time() message_ids =", "f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(um) log", "0: text += (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return", "+= 1 except: f += 1 text = \"\" if", "= message.Engine mentions = \"\" starky = get_text(message) or message.chat.id", "message.Engine start_time = time.time() message_ids = [] purge_len = 0", "Of Chat!\", \"example\": \"{ch}adminlist\"}, ) async def midhunadmin(client, message): engine", "see < https://github.com/TeamSpeedo/blob/master/LICENSE > # # All rights reserved. import", "engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get Adminlist Of Chat!\", \"example\":", "only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Promote Replied user or provide his", "\"Not Specified!\" if userz == me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try:", "await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as", "not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return try: user =", "client.get_users(userk) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz =", "message.Engine buts = \"**Bot List** \\n\\n\" starky = get_text(message) or", "client.me lol = await is_admin_or_owner(message, me.id) if not lol: await", "+= f\"**Users Online :** __{online_.onlines}__ \\n\" if ujwal.photo: msg +=", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if", "Deleted Accounts In The Group/Channel!\", \"example\": \"{ch}zombies\", }, ) async", "License Agreement\". # Please see < https://github.com/TeamSpeedo/blob/master/LICENSE > # #", "if userz == me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await client.unban_chat_member(message.chat.id,", "e)) return um = f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :**", "user.id, Res) except: pass @speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True, group_only=True, cmd_help={", "dc > 0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm +", "dc += 1 text = \"**Zombies Report!** \\n\\n\" if dm", "message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m", "log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Unmute Replied", "as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p = f\"**#Promote** \\n**User", "0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True,", "client.get_chat_members(starky, filter=\"administrators\") ujwal = await client.get_chat(starky) except BaseException as e:", "Username :** __{ujwal.username}__ \\n\" if ujwal.description: msg += f\"**Chat Description", "if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return try: user", "da = 0 dc = 0 async for member in", "try: user = await client.get_users(userf) except BaseException as e: await", "await log.log_msg(client, ub) @speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True, group_only=True, cmd_help={ \"help\":", "if not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id,", "text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da > 0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da)", "1: dm = 0 da = 0 dc = 0", "convert_to_image(message, client) if not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return", "in enumerate(bots, start=1): buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts)", "Pinned Messages!\", \"example\": \"{ch}rmpins\"}, ) async def dpins(client, message): engine", "return text_ = get_text(message) userk, reason = get_user(message, text_) if", "BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for nos, ujwal", "dm > 0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da > 0:", "if not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_ = await", "e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not reason:", "asplit = get_text(message) usero = get_user(message, asplit)[0] if not usero:", "DC :** __{ujwal.dc_id}__ \\n\" if ujwal.username: msg += f\"**Chat Username", "pablo.edit(um) log = LogIt(message) await log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"],", "messag = f\"\"\" <b>Admins in {ujwal.title} | {holy}</b> {mentions} \"\"\"", "cmd_help={ \"help\": \"Pin Message Without Sending Notification To Members!\", \"example\":", "\"help\": \"Ban Replied User or provide his ID!\", \"example\": \"{ch}ban", "event.delete() @speedo_on_cmd( [\"del\"], cmd_help={ \"help\": \"Delete Replied Message!\", \"example\": \"{ch}del", "e)) return b = f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :**", "= client.me await message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero = get_user(message,", "if f > 0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd(", "user or provide his ID!\", \"example\": \"{ch}mute (reply to user", "lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s = 0 f = 0", "me_m = client.me if message.chat.type in [\"supergroup\", \"channel\"]: me_ =", ":** __{online_.onlines}__ \\n\" if ujwal.photo: msg += f\"**Chat DC :**", "> 0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f > 0: text", "d > 0: text += (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else: await", "\"help\": \"Pin Message Without Sending Notification To Members!\", \"example\": \"{ch}silentpin", "message): engine = message.Engine s = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return", "BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if", ":** `{message.chat.title}`\" await pablo.edit(um) log = LogIt(message) await log.log_msg(client, um)", "reason = get_user(message, text_) if not userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\")", "ID)\", }, ) async def ujwal_mute(client, message): engine = message.Engine", "= message.Engine msg_ = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not message.reply_to_message:", "e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b = f\"**#Banned** \\n**User :**", "pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me if message.chat.type in [\"supergroup\",", "except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id", "OR Provide his id)\", }, ) async def unban_world(client, message):", "await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members,", "= \"\" starky = get_text(message) or message.chat.id pablo = await", "await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return try: user = await client.get_users(usero)", "or ujwal.id messag = f\"\"\" <b>Admins in {ujwal.title} | {holy}</b>", "get_text(message) userm, reason = get_user(message, text_) if not userm: await", "await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear() if message_ids: await", "his ID!\", \"example\": \"{ch}Unmute (reply to user message OR provide", "\"Delete Replied Message!\", \"example\": \"{ch}del (reply to message)\", }, )", "return if not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await client.set_chat_photo(message.chat.id,", "await client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat Info**", "midhunadmin(client, message): engine = message.Engine mentions = \"\" starky =", "client.me await message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero = get_user(message, asplit)[0]", "e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um = f\"**#Un_Muted** \\n**User :**", "asplit = get_text(message) userl, Res = get_user(message, asplit) if not", "await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for midhun in X: if not", "OR provide his ID)\", }, ) async def ban_world(client, message):", "ID)\", }, ) async def ujwal_unmute(client, message): engine = message.Engine", "can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except BaseException as e:", "end_time = time.time() u_time = round(end_time - start_time) await event.edit(", "await sleep(1) if member.status == \"member\": dm += 1 elif", "= await client.get_users(userk) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return", "group_only=True, cmd_help={ \"help\": \"Mute Replied user or provide his ID!\",", "client, f\"`AdminList Of {holy}!`\", \"admin-lookup-result\", \"html\", ) @speedo_on_cmd( [\"botlist\", \"bot\"],", "= get_text(message) userf = get_user(message, asplit)[0] if not userf: await", "\"admins\"], cmd_help={\"help\": \"Get Adminlist Of Chat!\", \"example\": \"{ch}adminlist\"}, ) async", "message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return async for msg in", "me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False)", "\\n\\n\" if dm > 0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da", "< https://github.com/TeamSpeedo >. # # This file is part of", "get_text(message) userf = get_user(message, asplit)[0] if not userf: await pablo.edit(", "mentions = \"\" starky = get_text(message) or message.chat.id pablo =", "media\")) return if not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await", "cmd_help={ \"help\": \"Delete Replied Message!\", \"example\": \"{ch}del (reply to message)\",", "}, ) async def ujwal_mote(client, message): engine = message.Engine pablo", "pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um = f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat", "LogIt(message) await log.log_msg(client, p) try: if Res: await client.set_administrator_title(message.chat.id, user.id,", "await s.delete() else: await s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={ \"help\":", "engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f > 0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text)", "await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname = message.text.split(None, 1)[1] if sgname.lower().strip() ==", "disable_notification=True, ) except BaseException as e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e)", "try: user_ = await client.get_users(userz) except BaseException as e: await", "Replied user or provide his ID!\", \"example\": \"{ch}Unmute (reply to", "= await convert_to_image(message, client) if not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid", "e)) return d = f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :**", "+= f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={", "engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True,", "= client.me lol = await is_admin_or_owner(message, me.id) if not lol:", "not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return if not os.path.exists(cool):", "provide his ID!\", \"example\": \"{ch}mute (reply to user message OR", "@speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin All Pinned Messages!\", \"example\":", "0 da = 0 dc = 0 async for member", "me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return try: await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException", "\"**Chat Info** \\n\\n\" msg += f\"**Chat-ID :** __{ujwal.id}__ \\n\" msg", "client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get Adminlist", "= await client.get_users(userf) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return", "async def delmsgs(client, message): engine = message.Engine if not message.reply_to_message:", "return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin", "cmd_help={\"help\": \"Get List Of Bots In Chat!\", \"example\": \"{ch}botlist\"}, )", "only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Ban Replied User or provide his", "> 0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm + da", "v3.0 License Agreement\". # Please see < https://github.com/TeamSpeedo/blob/master/LICENSE > #", "pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for midhun in X: if not midhun.user.is_deleted:", "userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return try: user = await", "= f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link}", "message OR Provide his id)\", }, ) async def unban_world(client,", "return try: user_ = await client.get_users(userz) except BaseException as e:", "Res: await client.set_administrator_title(message.chat.id, user.id, Res) except: pass @speedo_on_cmd( [\"demote\", \"demute\"],", "if message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time =", "engine.get_string(\"PROCESSING\")) try: X = await client.get_chat_members(starky, filter=\"administrators\") ujwal = await", "photo=cool) except BaseException as e: await msg_.edit(f\"`Unable To Set Group", "can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except", "await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userm, reason = get_user(message,", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X = await client.get_chat_members(starky, filter=\"administrators\") ujwal", "ID!\", \"example\": \"{ch}demote (reply to user message OR provide his", "me_m = client.me await message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero =", "as e: await msg_.edit(f\"`Unable To Set Group Photo! TraceBack :", "text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc > 0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\")", "userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return", "= LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True, group_only=True,", "ID!\", \"example\": \"{ch}mute (reply to user message OR provide his", "await client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete() else: await s.edit(msg) @speedo_on_cmd(", "All rights reserved. import asyncio import os import time from", "if not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userf", "= await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return if", "online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat Info** \\n\\n\" msg", "BaseException as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub = f\"**#UnBanned** \\n**User", "msg_ = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\"))", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me if message.chat.type in", "+= f\"\\n{link} {userid}\" holy = ujwal.username or ujwal.id messag =", "message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id )", "as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if userz", "return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except BaseException", "engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try: user = await client.get_users(userf) except BaseException", "unban_world(client, message): engine = message.Engine unbun = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "for msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id", "cmd_help={\"help\": \"Unpin All Pinned Messages!\", \"example\": \"{ch}rmpins\"}, ) async def", "def lpin(client, message): engine = message.Engine if not message.reply_to_message: await", "asplit)[0] if not usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return try:", "dm += 1 elif member.status == \"administrator\": da += 1", "for midhun in X: if not midhun.user.is_deleted: link = f'✱", "userm, reason = get_user(message, text_) if not userm: await unbun.edit(", "reason: reason = \"Not Specified!\" if userz == me_m.id: await", "@speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Demote Replied user", "client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return", "user or provide his ID!\", \"example\": \"{ch}Unmute (reply to user", "edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get Adminlist Of Chat!\",", "userz = user_.id if not reason: reason = \"Not Specified!\"", "try: user_ = await client.get_users(userk) except BaseException as e: await", "userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await client.promote_chat_member( message.chat.id,", "\\n\" msg += f\"**Verified :** __{ujwal.is_verified}__ \\n\" msg += f\"**Is", "return userz = user_.id if not reason: reason = \"Not", "Of {holy}!`\", \"admin-lookup-result\", \"html\", ) @speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\":", "not userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try: user_ =", "return sgname = message.text.split(None, 1)[1] if sgname.lower().strip() == \"clean\": me", "Purge.\")) return async for msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True,", "only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Demote Replied user or provide his", "engine = message.Engine unbun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m =", "@speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message With Sending", "msg += f\"**Chat Members Count :** __{ujwal.members_count}__ \\n\" if ujwal.photo:", "can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except BaseException", "me_ = await message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return", "+= 1 message_ids.append(msg.message_id) if len(message_ids) >= 100: await client.delete_messages( chat_id=message.chat.id,", "`{reason}`\" await bun.edit(b) log = LogIt(message) await log.log_msg(client, b) @speedo_on_cmd(", "Res) except: pass @speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\":", "client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear() if message_ids: await client.delete_messages(", "f\"**Chat Description :** __{ujwal.description}__ \\n\" msg += f\"**Chat Members Count", "me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True)", ") async def magic_grps(client, message): engine = message.Engine msg_ =", "\"**Bot List** \\n\\n\" starky = get_text(message) or message.chat.id pablo =", "if d > 0: text += (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else:", "ujwal = await client.get_chat(starky) except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\",", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots = await client.get_chat_members(starky, filter=\"bots\")", "engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except BaseException", "\\n\" if ujwal.description: msg += f\"**Chat Description :** __{ujwal.description}__ \\n\"", "ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg) await", "https://github.com/TeamSpeedo >. # # This file is part of <", "(reply to messages)\", }, ) async def lpin(client, message): engine", ":** `{message.chat.title}`\" await pablo.edit(m) log = LogIt(message) await log.log_msg(client, m)", "= get_text(message) or message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try:", "__{ujwal.id}__ \\n\" msg += f\"**Verified :** __{ujwal.is_verified}__ \\n\" msg +=", "me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return", "f\"**Verified :** __{ujwal.is_verified}__ \\n\" msg += f\"**Is Scam :** __{ujwal.is_scam}__", "= user.id if not Res: Res = \"Admeme\" if userz", "== \"member\": dm += 1 elif member.status == \"administrator\": da", "return userz = user.id if not Res: Res = \"Admeme\"", "client.me if message.chat.type in [\"supergroup\", \"channel\"]: me_ = await message.chat.get_member(int(me_m.id))", "engine.get_string(\"PROCESSING\")) try: bots = await client.get_chat_members(starky, filter=\"bots\") except BaseException as", "len(message.text.split()) == 1: dm = 0 da = 0 dc", "message.chat.id, message.reply_to_message.message_id ) except BaseException as e: await edit_or_reply( message,", "await pablo.edit(m) log = LogIt(message) await log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"],", "[\"unban\", \"unbun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"UnBan Replied User or", "from main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner, )", "# and is released under the \"GNU v3.0 License Agreement\".", "client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id) s += 1", "userid = f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link} {userid}\" holy = ujwal.username", "in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id) s +=", "unbun.edit(ub) log = LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd( [\"promote\", \"prumote\"],", "[\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message With Sending Notification", "+= (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True, group_only=True, cmd_help={", "message): engine = message.Engine buts = \"**Bot List** \\n\\n\" starky", ") except BaseException as e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) )", "me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False,", "photo=kek, caption=msg) await s.delete() else: await s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True,", "text += (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname", "user.id, ChatPermissions(can_send_messages=False) ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e))", "userf = get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\")", "else: await s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={ \"help\": \"Purge All", "`{message.chat.title}` \\n**Reason :** `{reason}`\" await bun.edit(b) log = LogIt(message) await", ") async def purge(client, message): engine = message.Engine start_time =", ") return try: user_ = await client.get_users(userm) except BaseException as", "in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id != message.message_id:", "\"example\": \"{ch}chatinfo\"}, ) async def owo_chat_info(client, message): engine = message.Engine", "\"example\": \"{ch}botlist\"}, ) async def bothub(client, message): engine = message.Engine", "async def ujwal_demote(client, message): engine = message.Engine pablo = await", "== \"administrator\": da += 1 elif member.status == \"creator\": dc", "try: user = await client.get_users(userl) except BaseException as e: await", "message.Engine mentions = \"\" starky = get_text(message) or message.chat.id pablo", "= await client.get_users(userz) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return", ") async def unban_world(client, message): engine = message.Engine unbun =", "\"{ch}unban (reply to user message OR Provide his id)\", },", "Replied Message!\", \"example\": \"{ch}purge (reply to message)\", }, ) async", "text = \"\" if s > 0: text += engine.get_string(\"REMOVED_ZOMBIES\").format(s)", "@speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message Without Sending Notification", "\"help\": \"Purge All Messages Till Replied Message!\", \"example\": \"{ch}purge (reply", "client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\"))", "start=1): buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await pablo.edit(buts) @speedo_on_cmd( [\"zombies\",", "except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for nos,", "cmd_help={\"help\": \"Get Adminlist Of Chat!\", \"example\": \"{ch}adminlist\"}, ) async def", "reserved. import asyncio import os import time from asyncio import", "def owo_chat_info(client, message): engine = message.Engine s = await edit_or_reply(message,", "speedo_on_cmd from main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner,", "BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m = f\"**#Muted**", "@speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get Adminlist Of Chat!\", \"example\": \"{ch}adminlist\"},", "engine = message.Engine buts = \"**Bot List** \\n\\n\" starky =", "not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userf =", ":** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(um) log = LogIt(message)", "= message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split()) ==", "async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1) if", "message OR provide his ID)\", }, ) async def ujwal_unmute(client,", "Message Without Sending Notification To Members!\", \"example\": \"{ch}silentpin (reply to", "engine.get_string(\"PROCESSING\")) if not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ = await", "await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get", "member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1) if member.status ==", "= 0 da = 0 dc = 0 async for", "\\n**Title :** `{Res}`\" await pablo.edit(p) log = LogIt(message) await log.log_msg(client,", "provide his ID!\", \"example\": \"{ch}demote (reply to user message OR", "sgname = message.text.split(None, 1)[1] if sgname.lower().strip() == \"clean\": me =", "except BaseException as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e)) return b =", "Messages Till Replied Message!\", \"example\": \"{ch}purge (reply to message)\", },", "await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return async for msg in client.iter_history(", "await msg_.edit(f\"`Unable To Set Group Photo! TraceBack : {e}\") return", "await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return if not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\"))", "not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userm, reason", "get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return", "= await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit", "= \"Not Specified!\" if userz == me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return", "return um = f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\"", "pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userl, Res = get_user(message, asplit)", "= f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(d)", "message.Engine if not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message(", "if dm > 0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da >", "if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try: await client.promote_chat_member(", "engine.get_string(\"TO_DO\").format(\"Mute\") ) return try: user = await client.get_users(userf) except BaseException", "client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1) if member.status == \"member\": dm", ":** __{ujwal.members_count}__ \\n\" if ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id) await", "await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return um = f\"**#Un_Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id})", "= \"**Chat Info** \\n\\n\" msg += f\"**Chat-ID :** __{ujwal.id}__ \\n\"", "if not userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return try: user_", "client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat Info** \\n\\n\" msg += f\"**Chat-ID :**", "userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await client.promote_chat_member( message.chat.id,", "message): engine = message.Engine msg_ = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if", "= round(end_time - start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await", "+= 1 elif member.status == \"creator\": dc += 1 text", ") return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={", "msg += f\"**Users Online :** __{online_.onlines}__ \\n\" if ujwal.photo: msg", "message.text.split(None, 1)[1] if sgname.lower().strip() == \"clean\": me = client.me lol", "bothub(client, message): engine = message.Engine buts = \"**Bot List** \\n\\n\"", "= message.Engine s = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal = await", "ujwal in enumerate(bots, start=1): buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\" await", "under the \"GNU v3.0 License Agreement\". # Please see <", "\"{ch}setgrppic (reply to image)\", }, ) async def magic_grps(client, message):", "await log.log_msg(client, d) @speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Mute", "https://github.com/TeamSpeedo/blob/master/LICENSE > # # All rights reserved. import asyncio import", "start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await asyncio.sleep(3) await event.delete()", "`{Res}`\" await pablo.edit(p) log = LogIt(message) await log.log_msg(client, p) try:", "userz = user.id if not Res: Res = \"Admeme\" if", "+= f\"**Chat DC :** __{ujwal.dc_id}__ \\n\" if ujwal.username: msg +=", "if message.chat.type in [\"supergroup\", \"channel\"]: me_ = await message.chat.get_member(int(me_m.id)) if", "if not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return async for", "= time.time() message_ids = [] purge_len = 0 event =", "message): engine = message.Engine mentions = \"\" starky = get_text(message)", "elif member.status == \"creator\": dc += 1 text = \"**Zombies", "get_user(message, text_) if not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_", "OR provide his ID)\", }, ) async def ujwal_unmute(client, message):", "await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) )", "try: user_ = await client.get_users(userm) except BaseException as e: await", "me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userl, Res =", "if not me_.can_change_info: await msg_.edit(engine.get_string(\"NOT_ADMIN\")) return cool = await convert_to_image(message,", "if not me_.can_restrict_members: await unbun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userm,", ":** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await unbun.edit(ub)", "await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await bun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Ban\", e))", "msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await client.set_chat_photo(message.chat.id, photo=cool) except BaseException as e:", "await pablo.edit(buts) @speedo_on_cmd( [\"zombies\", \"delusers\"], cmd_help={ \"help\": \"Remove Deleted Accounts", "u_time) ) await asyncio.sleep(3) await event.delete() @speedo_on_cmd( [\"del\"], cmd_help={ \"help\":", "if not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userl,", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots = await client.get_chat_members(starky, filter=\"bots\") except BaseException", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me await message.chat.get_member(int(me_m.id)) asplit", "text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await pablo.edit(text) @speedo_on_cmd( [\"ban\", \"bun\"], only_if_admin=True, group_only=True,", "chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id != message.message_id: purge_len +=", ") async def bothub(client, message): engine = message.Engine buts =", "convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message", "message.Engine bun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_", "os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await client.set_chat_photo(message.chat.id, photo=cool) except BaseException", "(reply to message)\", }, ) async def spin(client, message): engine", "e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if not Res:", "Bots In Chat!\", \"example\": \"{ch}botlist\"}, ) async def bothub(client, message):", "\"\" starky = get_text(message) or message.chat.id pablo = await edit_or_reply(message,", "pablo, client, f\"`AdminList Of {holy}!`\", \"admin-lookup-result\", \"html\", ) @speedo_on_cmd( [\"botlist\",", ":** __{ujwal.description}__ \\n\" msg += f\"**Chat Members Count :** __{ujwal.members_count}__", "1 text = \"\" if s > 0: text +=", "\\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await bun.edit(b) log =", "`{reason}`\" await unbun.edit(ub) log = LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd(", "\"{ch}rmpins\"}, ) async def dpins(client, message): engine = message.Engine await", "= 0 f = 0 async for member in client.iter_chat_members(message.chat.id):", "def ban_world(client, message): engine = message.Engine bun = await edit_or_reply(message,", "member.user.id) s += 1 except: f += 1 text =", "Res = get_user(message, asplit) if not userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\")", "+= engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d = dm + da + dc if", "magic_grps(client, message): engine = message.Engine msg_ = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_ = await", "message): engine = message.Engine bun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m", "cmd_help={ \"help\": \"UnBan Replied User or provide his ID!\", \"example\":", "= await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_", "except BaseException as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub = f\"**#UnBanned**", "await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userl, Res = get_user(message,", "ChatPermissions(can_send_messages=True) ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e)) return", "if ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg)", "engine.get_string(\"PROCESSING\")) me_m = client.me if message.chat.type in [\"supergroup\", \"channel\"]: me_", "await client.set_chat_photo(message.chat.id, photo=cool) except BaseException as e: await msg_.edit(f\"`Unable To", "message)\", }, ) async def spin(client, message): engine = message.Engine", "await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s = 0 f = 0 async", "pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p = f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat", "__{ujwal.members_count}__ \\n\" if ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id,", "await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_ = await client.get_users(userk) except BaseException", "\"\"\" await edit_or_send_as_file( messag, pablo, client, f\"`AdminList Of {holy}!`\", \"admin-lookup-result\",", "msg += f\"**Chat Title :** __{ujwal.title}__ \\n\" msg += f\"**Users", "message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except BaseException as e: await edit_or_reply(", "provide his ID!\", \"example\": \"{ch}unban (reply to user message OR", "import ( convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={", "[\"silentpin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message Without Sending Notification To", "@speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"UnBan Replied User", ") return try: user = await client.get_users(userf) except BaseException as", "ub = f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason", "can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except BaseException as", "usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return try: user = await", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) if not message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ =", "{holy}!`\", \"admin-lookup-result\", \"html\", ) @speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True, cmd_help={\"help\": \"Get", "pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for nos, ujwal in enumerate(bots, start=1): buts", "f\"**Chat Members Count :** __{ujwal.members_count}__ \\n\" if ujwal.photo: kek =", "message.reply_to_message: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ = await message.chat.get_member(int(client.me.id)) if not", "e)) return for midhun in X: if not midhun.user.is_deleted: link", "group_only=True, cmd_help={ \"help\": \"UnBan Replied User or provide his ID!\",", "= get_text(message) userk, reason = get_user(message, text_) if not userk:", "\"pin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message With Sending Notification To", "edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message", "round(end_time - start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await asyncio.sleep(3)", "revoke=True ) end_time = time.time() u_time = round(end_time - start_time)", "\\n**Reason :** `{reason}`\" await unbun.edit(ub) log = LogIt(message) await log.log_msg(client,", "async def bothub(client, message): engine = message.Engine buts = \"**Bot", "his ID!\", \"example\": \"{ch}ban (reply to user message OR provide", ") async def owo_chat_info(client, message): engine = message.Engine s =", "= get_user(message, asplit) if not userl: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") )", "event.edit(engine.get_string(\"NOT_ADMIN\")) return if not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return", "pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split()) == 1: dm", "and is released under the \"GNU v3.0 License Agreement\". #", "try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except BaseException as", "pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except", "log = LogIt(message) await log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True,", ":** __{ujwal.is_verified}__ \\n\" msg += f\"**Is Scam :** __{ujwal.is_scam}__ \\n\"", "def ujwal_demote(client, message): engine = message.Engine pablo = await edit_or_reply(message,", "his ID)\", }, ) async def ujwal_unmute(client, message): engine =", "return userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"un-mute\"))", "await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except BaseException as e: await", "\"help\": \"Remove Deleted Accounts In The Group/Channel!\", \"example\": \"{ch}zombies\", },", "# Please see < https://github.com/TeamSpeedo/blob/master/LICENSE > # # All rights", "message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message)", "[\"unmute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Unmute Replied user or provide", "e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not reason:", "engine.get_string(\"PROCESSING\")) me_m = client.me await message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero", "return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete() @speedo_on_cmd(", "edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s import LogIt", "me.id) if not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s = 0", "\"**Zombies Report!** \\n\\n\" if dm > 0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm)", "Members!\", \"example\": \"{ch}silentpin (reply to message)\", }, ) async def", "pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname = message.text.split(None, 1)[1] if sgname.lower().strip() == \"clean\":", "\\n**Reason :** `{reason}`\" await bun.edit(b) log = LogIt(message) await log.log_msg(client,", "= LogIt(message) await log.log_msg(client, p) try: if Res: await client.set_administrator_title(message.chat.id,", "= client.me if message.chat.type in [\"supergroup\", \"channel\"]: me_ = await", "\"help\": \"Mute Replied user or provide his ID!\", \"example\": \"{ch}mute", "Adminlist Of Chat!\", \"example\": \"{ch}adminlist\"}, ) async def midhunadmin(client, message):", ") async def midhunadmin(client, message): engine = message.Engine mentions =", ":** __{ujwal.id}__ \\n\" msg += f\"**Verified :** __{ujwal.is_verified}__ \\n\" msg", "ujwalzombie(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "client.set_administrator_title(message.chat.id, user.id, Res) except: pass @speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True, group_only=True,", "[\"mute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Mute Replied user or provide", "msg += f\"**Chat Description :** __{ujwal.description}__ \\n\" msg += f\"**Chat", "await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as", "With Sending Notification To Members!\", \"example\": \"{ch}pin (reply to messages)\",", "await pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\")) return sgname = message.text.split(None, 1)[1]", "def midhunadmin(client, message): engine = message.Engine mentions = \"\" starky", "userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_ = await client.get_users(userk) except", "return s = 0 f = 0 async for member", "\\n\\n\" starky = get_text(message) or message.chat.id pablo = await edit_or_reply(message,", "get_user(message, asplit)[0] if not usero: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Demote\") ) return", ":** __{ujwal.is_scam}__ \\n\" msg += f\"**Chat Title :** __{ujwal.title}__ \\n\"", "cmd_help={ \"help\": \"Mute Replied user or provide his ID!\", \"example\":", "engine = message.Engine if not message.reply_to_message: await message.delete() return await", "\\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(d) log =", "= [] purge_len = 0 event = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "\"Ban Replied User or provide his ID!\", \"example\": \"{ch}ban (reply", ") @speedo_on_cmd( [\"silentpin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin Message Without Sending", "client.get_chat(starky) except BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for", "f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link} {userid}\"", "event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) ) await asyncio.sleep(3) await event.delete() @speedo_on_cmd( [\"del\"],", "await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p = f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id})", "await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit =", "message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={ \"help\":", "asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return try:", "= client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await", "await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"image\")) return me_ = await message.chat.get_member(int(client.me.id)) if not me_.can_change_info:", "text_ = get_text(message) userk, reason = get_user(message, text_) if not", "User or provide his ID!\", \"example\": \"{ch}unban (reply to user", "time.time() message_ids = [] purge_len = 0 event = await", "can_promote_members=False, ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return", "= LogIt(message) await log.log_msg(client, d) @speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True, cmd_help={", "msg += f\"**Is Scam :** __{ujwal.is_scam}__ \\n\" msg += f\"**Chat", "his ID)\", }, ) async def ujwal_mute(client, message): engine =", "message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False,", "message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\",", "await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if userz == me_m.id:", "client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat Info** \\n\\n\"", "not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return async for msg", "main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file, get_text,", "== me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await client.promote_chat_member( message.chat.id, user.id,", "= await client.get_users(usero) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return", "d = f\"**#Demote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await", "can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\",", "0 event = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me if", "`{message.chat.title}`\" await pablo.edit(d) log = LogIt(message) await log.log_msg(client, d) @speedo_on_cmd(", "userz == me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_ = await", "# # All rights reserved. import asyncio import os import", "await client.get_users(userz) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try:", "try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except BaseException as", "f\"**Is Scam :** __{ujwal.is_scam}__ \\n\" msg += f\"**Chat Title :**", "LogIt(message) await log.log_msg(client, b) @speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True, group_only=True, cmd_help={", "= message.Engine buts = \"**Bot List** \\n\\n\" starky = get_text(message)", "0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1)", "def unban_world(client, message): engine = message.Engine unbun = await edit_or_reply(message,", "client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\"))", "message): engine = message.Engine if not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\"))", "= \"Not Specified!\" if userz == me_m.id: await unbun.edit(engine.get_string(\"TF_DO_IT\").format(\"Un-Ban\")) return", "um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get Info Of The", "message OR provide his ID)\", }, ) async def ujwal_mute(client,", "s = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal = await client.get_chat(message.chat.id) peer", "len(message_ids) >= 100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear()", "await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e))", "return try: await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await", "if da > 0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc >", ") await message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={ \"help\": \"Set Custom", "if not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try: await client.set_chat_photo(message.chat.id, photo=cool)", "s.delete() else: await s.edit(msg) @speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={ \"help\": \"Purge", "if not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return if not", "owo_chat_info(client, message): engine = message.Engine s = await edit_or_reply(message, engine.get_string(\"PROCESSING\"))", "+= 1 text = \"**Zombies Report!** \\n\\n\" if dm >", ") await asyncio.sleep(3) await event.delete() @speedo_on_cmd( [\"del\"], cmd_help={ \"help\": \"Delete", "Of Bots In Chat!\", \"example\": \"{ch}botlist\"}, ) async def bothub(client,", "u_time = round(end_time - start_time) await event.edit( engine.get_string(\"PURGE_\").format(purge_len, u_time) )", "his ID)\", }, ) async def ujwal_mote(client, message): engine =", "return text_ = get_text(message) userm, reason = get_user(message, text_) if", "\\n**Chat :** `{message.chat.title}` \\n**Title :** `{Res}`\" await pablo.edit(p) log =", "as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not", "try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except BaseException as", "except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p =", "client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\"))", "\"{ch}del (reply to message)\", }, ) async def delmsgs(client, message):", "+= 1 elif member.status == \"administrator\": da += 1 elif", "user_ = await client.get_users(userk) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e))", "provide his ID!\", \"example\": \"{ch}Unmute (reply to user message OR", "= f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :**", ") except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d", "@speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Promote Replied user", "user = await client.get_users(usero) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e))", "ID)\", }, ) async def ujwal_demote(client, message): engine = message.Engine", "user = await client.get_users(userf) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e))", "to image)\", }, ) async def magic_grps(client, message): engine =", "\"bot\"], group_only=True, cmd_help={\"help\": \"Get List Of Bots In Chat!\", \"example\":", "if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try: user", "!= message.message_id: purge_len += 1 message_ids.append(msg.message_id) if len(message_ids) >= 100:", "group_only=True, cmd_help={\"help\": \"Get List Of Bots In Chat!\", \"example\": \"{ch}botlist\"},", "async def ujwal_mute(client, message): engine = message.Engine pablo = await", "msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id !=", "BaseException as e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await", "d = dm + da + dc if d >", "\"example\": \"{ch}demote (reply to user message OR provide his ID)\",", "= 0 event = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me", "chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear() if message_ids: await client.delete_messages( chat_id=message.chat.id,", "await log.log_msg(client, p) try: if Res: await client.set_administrator_title(message.chat.id, user.id, Res)", "= await client.get_users(userm) except BaseException as e: await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return", "> 0: text += engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da > 0: text", "log.log_msg(client, ub) @speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Promote", "Chat!\", \"example\": \"{ch}chatinfo\"}, ) async def owo_chat_info(client, message): engine =", "asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try:", "in [\"supergroup\", \"channel\"]: me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages:", "only_if_admin=True, cmd_help={ \"help\": \"Purge All Messages Till Replied Message!\", \"example\":", "LogIt from main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd(", "log = LogIt(message) await log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True,", "client) if not cool: await msg_.edit(engine.get_string(\"NEEDS_REPLY\").format(\"a valid media\")) return if", "main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote, generate_meme,", "All Messages Till Replied Message!\", \"example\": \"{ch}purge (reply to message)\",", "== me_m.id: await bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_ = await client.get_users(userz)", "Scam :** __{ujwal.is_scam}__ \\n\" msg += f\"**Chat Title :** __{ujwal.title}__", "log = LogIt(message) await log.log_msg(client, d) @speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True,", "\"{ch}zombies\", }, ) async def ujwalzombie(client, message): engine = message.Engine", "e)) return for nos, ujwal in enumerate(bots, start=1): buts +=", "To Purge.\")) return async for msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id,", "get_text, get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers", "BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if", "reverse=True, ): if msg.message_id != message.message_id: purge_len += 1 message_ids.append(msg.message_id)", "engine = message.Engine bun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m =", "from main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote,", "= get_text(message) usero = get_user(message, asplit)[0] if not usero: await", "OR provide his ID)\", }, ) async def ujwal_mute(client, message):", "pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me await message.chat.get_member(int(me_m.id))", "0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await", "me = client.me lol = await is_admin_or_owner(message, me.id) if not", "bun.edit(b) log = LogIt(message) await log.log_msg(client, b) @speedo_on_cmd( [\"unban\", \"unbun\"],", "Members Count :** __{ujwal.members_count}__ \\n\" if ujwal.photo: kek = await", "OR provide his ID)\", }, ) async def ujwal_mote(client, message):", "> 0: text += (engine.get_string(\"WIPE_THEM\")) await pablo.edit(text) else: await pablo.edit(engine.get_string(\"NO_ZOMBIES\"))", "def ujwal_mote(client, message): engine = message.Engine pablo = await edit_or_reply(message,", "Lazy Peoples!\", \"example\": \"{ch}setgrppic (reply to image)\", }, ) async", ":** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`\" await pablo.edit(d) log = LogIt(message)", "BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p = f\"**#Promote**", "await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat Info** \\n\\n\" msg += f\"**Chat-ID", "Message!\", \"example\": \"{ch}del (reply to message)\", }, ) async def", "`{message.chat.title}`\" await pablo.edit(um) log = LogIt(message) await log.log_msg(client, um) @speedo_on_cmd(", "= user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return try:", "get_user(message, text_) if not userm: await unbun.edit( engine.get_string(\"TO_DO\").format(\"Un-Ban\") ) return", "async def purge(client, message): engine = message.Engine start_time = time.time()", "event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message To Purge.\")) return async for msg in client.iter_history( chat_id=message.chat.id,", "In Chat!\", \"example\": \"{ch}botlist\"}, ) async def bothub(client, message): engine", "client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time = time.time() u_time =", ") message_ids.clear() if message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True )", "engine = message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\",", "\"example\": \"{ch}adminlist\"}, ) async def midhunadmin(client, message): engine = message.Engine", "Agreement\". # Please see < https://github.com/TeamSpeedo/blob/master/LICENSE > # # All", "\"help\": \"UnBan Replied User or provide his ID!\", \"example\": \"{ch}unban", "<filename>plugins/grouputils.py # Copyright (C) 2020-2021 by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >.", "message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_", "rights reserved. import asyncio import os import time from asyncio", "async def spin(client, message): engine = message.Engine if not message.reply_to_message:", "msg += f\"**Chat-ID :** __{ujwal.id}__ \\n\" msg += f\"**Verified :**", "if ujwal.description: msg += f\"**Chat Description :** __{ujwal.description}__ \\n\" msg", "\\n\" msg += f\"**Users Online :** __{online_.onlines}__ \\n\" if ujwal.photo:", "= get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") )", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) ujwal = await client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id)", "(reply to image)\", }, ) async def magic_grps(client, message): engine", "@speedo_on_cmd( [\"purge\"], only_if_admin=True, cmd_help={ \"help\": \"Purge All Messages Till Replied", "his id)\", }, ) async def unban_world(client, message): engine =", "await unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not reason: reason", "f\"<code>{midhun.user.id}</code>\" mentions += f\"\\n{link} {userid}\" holy = ujwal.username or ujwal.id", "message.reply_to_message: await message.delete() return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, )", "await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Promote\")) return try: await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages,", "BaseException as e: await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Admins\", e)) return for midhun in", "async def owo_chat_info(client, message): engine = message.Engine s = await", "provide his ID)\", }, ) async def ujwal_mote(client, message): engine", "if sgname.lower().strip() == \"clean\": me = client.me lol = await", ":** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}` \\n**Title :** `{Res}`\" await pablo.edit(p)", "pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Mute\", e)) return m = f\"**#Muted** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat", "engine = message.Engine if not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try:", "LogIt(message) await log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\":", "[\"zombies\", \"delusers\"], cmd_help={ \"help\": \"Remove Deleted Accounts In The Group/Channel!\",", "client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except BaseException as e: await edit_or_reply(", "await client.get_chat_members(starky, filter=\"administrators\") ujwal = await client.get_chat(starky) except BaseException as", "cmd_help={ \"help\": \"Remove Deleted Accounts In The Group/Channel!\", \"example\": \"{ch}zombies\",", "= message.Engine if not message.reply_to_message: await message.delete() return await client.delete_messages(", "Chat!\", \"example\": \"{ch}botlist\"}, ) async def bothub(client, message): engine =", "userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return try: user = await", "await bun.edit(b) log = LogIt(message) await log.log_msg(client, b) @speedo_on_cmd( [\"unban\",", "= LogIt(message) await log.log_msg(client, b) @speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True, group_only=True,", "of < https://github.com/TeamSpeedo/FridayUserBot > project, # and is released under", "Info Of The Chat!\", \"example\": \"{ch}chatinfo\"}, ) async def owo_chat_info(client,", "peer = await client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg =", "OR provide his ID)\", }, ) async def ujwal_demote(client, message):", "cmd_help={ \"help\": \"Purge All Messages Till Replied Message!\", \"example\": \"{ch}purge", "\"creator\": dc += 1 text = \"**Zombies Report!** \\n\\n\" if", "\"demute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Demote Replied user or provide", "log.log_msg(client, p) try: if Res: await client.set_administrator_title(message.chat.id, user.id, Res) except:", "nos, ujwal in enumerate(bots, start=1): buts += f\"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \\n\"", "await client.get_users(userl) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz", "+= engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc > 0: text += engine.get_string(\"GRP_OWNER_IS_ZOMBIE\") d", "1 elif member.status == \"administrator\": da += 1 elif member.status", "= get_text(message) userm, reason = get_user(message, text_) if not userm:", "ChatPermissions import pyrogram from main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers import", "only_if_admin=True, cmd_help={ \"help\": \"Pin Message With Sending Notification To Members!\",", "not message.reply_to_message: await message.delete() return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True,", "engine.get_string(\"TOTAL_ZOMBIES_USERS\").format(dm) if da > 0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc", "ID!\", \"example\": \"{ch}unban (reply to user message OR Provide his", "log = LogIt(message) await log.log_msg(client, p) try: if Res: await", "can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException as", "client.get_users(usero) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz =", "Online :** __{online_.onlines}__ \\n\" if ujwal.photo: msg += f\"**Chat DC", "client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete() else: await s.edit(msg)", "part of < https://github.com/TeamSpeedo/FridayUserBot > project, # and is released", "= await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split()) == 1: dm =", "\"Set Custom Group Pic, For Lazy Peoples!\", \"example\": \"{ch}setgrppic (reply", "message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if len(message.text.split()) == 1:", "released under the \"GNU v3.0 License Agreement\". # Please see", "edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me await message.chat.get_member(int(me_m.id)) asplit = get_text(message)", ") return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\":", "return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={ \"help\":", "ujwal.description: msg += f\"**Chat Description :** __{ujwal.description}__ \\n\" msg +=", "provide his ID)\", }, ) async def ujwal_unmute(client, message): engine", "cmd_help={\"help\": \"Get Info Of The Chat!\", \"example\": \"{ch}chatinfo\"}, ) async", "BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Demote\", e)) return d = f\"**#Demote**", "List Of Bots In Chat!\", \"example\": \"{ch}botlist\"}, ) async def", "except: f += 1 text = \"\" if s >", "not me_.can_promote_members: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userl, Res", "await pablo.edit(engine.get_string(\"CANT_FETCH_ADMIN\").format(\"Bots\", e)) return for nos, ujwal in enumerate(bots, start=1):", "# All rights reserved. import asyncio import os import time", "as e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message,", "f\"`AdminList Of {holy}!`\", \"admin-lookup-result\", \"html\", ) @speedo_on_cmd( [\"botlist\", \"bot\"], group_only=True,", "1 elif member.status == \"creator\": dc += 1 text =", "== \"creator\": dc += 1 text = \"**Zombies Report!** \\n\\n\"", "or message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots =", "chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete() @speedo_on_cmd( [\"setgrppic\", \"gpic\"], cmd_help={", "d) @speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Mute Replied user", "Replied User or provide his ID!\", \"example\": \"{ch}unban (reply to", "ub) @speedo_on_cmd( [\"promote\", \"prumote\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Promote Replied", "( edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s import", "message_ids.clear() if message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time", "pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\") ) return try: user = await client.get_users(userf) except", "except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await client.kick_chat_member(message.chat.id,", "pablo.edit(engine.get_string(\"NOT_ADMIN\")) return asplit = get_text(message) userf = get_user(message, asplit)[0] if", "return p = f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}`", "client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg", "+ da + dc if d > 0: text +=", "message_ids=message_ids, revoke=True ) end_time = time.time() u_time = round(end_time -", "group_only=True, cmd_help={\"help\": \"Get Info Of The Chat!\", \"example\": \"{ch}chatinfo\"}, )", "await log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Unmute", "\"help\": \"Set Custom Group Pic, For Lazy Peoples!\", \"example\": \"{ch}setgrppic", "log.log_msg(client, b) @speedo_on_cmd( [\"unban\", \"unbun\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"UnBan", "\"example\": \"{ch}promote (reply to user message OR provide his ID)\",", "pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots = await client.get_chat_members(starky,", "@speedo_on_cmd( [\"mute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Mute Replied user or", "member.status == \"administrator\": da += 1 elif member.status == \"creator\":", "the \"GNU v3.0 License Agreement\". # Please see < https://github.com/TeamSpeedo/blob/master/LICENSE", "\\n\" if ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek,", "\"{ch}pin (reply to messages)\", }, ) async def lpin(client, message):", "user.id, ChatPermissions(can_send_messages=True) ) except BaseException as e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-mute\", e))", "get_text(message) or message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: bots", "\"example\": \"{ch}unban (reply to user message OR Provide his id)\",", "= await is_admin_or_owner(message, me.id) if not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return", "e: await edit_or_reply( message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\"))", "if ujwal.photo: msg += f\"**Chat DC :** __{ujwal.dc_id}__ \\n\" if", "await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user.id if not Res: Res", "async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id,", "log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get Info Of", "e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException", "= dm + da + dc if d > 0:", "try: X = await client.get_chat_members(starky, filter=\"administrators\") ujwal = await client.get_chat(starky)", "client.get_users(userz) except BaseException as e: await bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await", "user message OR Provide his id)\", }, ) async def", "bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_ = await client.get_users(userk) except BaseException as", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id))", "Provide his id)\", }, ) async def unban_world(client, message): engine", "Without Sending Notification To Members!\", \"example\": \"{ch}silentpin (reply to message)\",", "await client.get_users(userf) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz", "message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero = get_user(message, asplit)[0] if not", "[{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\" await unbun.edit(ub) log", "LogIt(message) await log.log_msg(client, um) @speedo_on_cmd( [\"chatinfo\", \"grpinfo\"], group_only=True, cmd_help={\"help\": \"Get", "def dpins(client, message): engine = message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message,", "message.delete() return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete()", "await asyncio.sleep(3) await event.delete() @speedo_on_cmd( [\"del\"], cmd_help={ \"help\": \"Delete Replied", "async def magic_grps(client, message): engine = message.Engine msg_ = await", "da += 1 elif member.status == \"creator\": dc += 1", "valid media\")) return if not os.path.exists(cool): await msg_.edit(engine.get_string(\"INVALID_MEDIA\")) return try:", "> 0: text += engine.get_string(\"TOTAL_ZOMBIES_ADMINS\").format(da) if dc > 0: text", "e: await pablo.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Promote\", e)) return p = f\"**#Promote** \\n**User :**", "get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Un-Mute\") ) return", "midhun.user.is_deleted: link = f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid = f\"<code>{midhun.user.id}</code>\" mentions", "in X: if not midhun.user.is_deleted: link = f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>'", "asyncio import os import time from asyncio import sleep from", "message, engine.get_string(\"UNABLE_TO_PIN\").format(e) ) return await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"],", "edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except BaseException", "return try: user_ = await client.get_users(userk) except BaseException as e:", "his ID)\", }, ) async def ban_world(client, message): engine =", "e: await msg_.edit(f\"`Unable To Set Group Photo! TraceBack : {e}\")", "reason = get_user(message, text_) if not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return", "await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"unpin\", \"rmpins\"], only_if_admin=True, cmd_help={\"help\": \"Unpin All", "await edit_or_reply(message, engine.get_string(\"UNPINNED\")) @speedo_on_cmd( [\"adminlist\", \"admins\"], cmd_help={\"help\": \"Get Adminlist Of", "bun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me me_ =", "user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Mute\")) return try: await", "is_admin_or_owner, ) from main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers import (", "unbun.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz = user_.id if not reason: reason =", "not message.reply_to_message: await edit_or_reply(message, engine.get_string(\"REPLY_TO_PIN\")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id,", ":** `{message.chat.title}`\" await pablo.edit(d) log = LogIt(message) await log.log_msg(client, d)", "await client.get_users(usero) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz", "or provide his ID!\", \"example\": \"{ch}ban (reply to user message", "def magic_grps(client, message): engine = message.Engine msg_ = await edit_or_reply(message,", "\"example\": \"{ch}rmpins\"}, ) async def dpins(client, message): engine = message.Engine", "pablo.edit( engine.get_string(\"TO_DO\").format(\"Promote\") ) return try: user = await client.get_users(userl) except", "[\"promote\", \"prumote\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Promote Replied user or", "+= engine.get_string(\"REMOVED_ZOMBIES\").format(s) if f > 0: text += (engine.get_string(\"FAILED_ZOMBIES\").format(f)) await", "to user message OR provide his ID)\", }, ) async", "bun.edit(engine.get_string(\"TF_DO_IT\").format(\"Ban\")) return try: user_ = await client.get_users(userz) except BaseException as", "(reply to user message OR Provide his id)\", }, )", "me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members:", "edit_or_send_as_file, get_text, get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s import LogIt from", "import sleep from pyrogram.types import ChatPermissions import pyrogram from main_start.core.decorators", "not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return if not message.reply_to_message: await event.edit(engine.get_string(\"NEEDS_REPLY\").format(\"Message", "not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userk, reason", "offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id != message.message_id: purge_len += 1", "X: if not midhun.user.is_deleted: link = f'✱ <a href=\"tg://user?id={midhun.user.id}\">{midhun.user.first_name}</a>' userid", "f\"**#Promote** \\n**User :** [{user.first_name}](tg://user?id={user.id}) \\n**Chat :** `{message.chat.title}` \\n**Title :** `{Res}`\"", "message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me await", "[\"del\"], cmd_help={ \"help\": \"Delete Replied Message!\", \"example\": \"{ch}del (reply to", "except: pass @speedo_on_cmd( [\"demote\", \"demute\"], only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Demote", "= LogIt(message) await log.log_msg(client, m) @speedo_on_cmd( [\"unmute\"], only_if_admin=True, group_only=True, cmd_help={", "\\n\" if ujwal.photo: msg += f\"**Chat DC :** __{ujwal.dc_id}__ \\n\"", ":** __{ujwal.title}__ \\n\" msg += f\"**Users Online :** __{online_.onlines}__ \\n\"", "1)[1] if sgname.lower().strip() == \"clean\": me = client.me lol =", "# Copyright (C) 2020-2021 by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >. #", ") async def ujwal_demote(client, message): engine = message.Engine pablo =", "f\"**#Banned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}` \\n**Reason :** `{reason}`\"", "not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s = 0 f =", "message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) if", "midhun in X: if not midhun.user.is_deleted: link = f'✱ <a", "as e: await unbun.edit(engine.get_string(\"FAILED_ADMIN_ACTION\").format(\"Un-Ban\", e)) ub = f\"**#UnBanned** \\n**User :**", "bun.edit(engine.get_string(\"USER_MISSING\").format(e)) return try: await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as e:", "f\"**Chat Title :** __{ujwal.title}__ \\n\" msg += f\"**Users Online :**", "if not me_.can_delete_messages: await event.edit(engine.get_string(\"NOT_ADMIN\")) return if not message.reply_to_message: await", "message): engine = message.Engine unbun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m", "get_text(message) or message.chat.id pablo = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) try: X", "text_) if not userk: await bun.edit(engine.get_string(\"TO_DO\").format(\"Ban\")) return try: user_ =", "userf = get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string(\"TO_DO\").format(\"Mute\")", "if ujwal.username: msg += f\"**Chat Username :** __{ujwal.username}__ \\n\" if", "e)) ub = f\"**#UnBanned** \\n**User :** [{user_.first_name}](tg://user?id={user_.id}) \\n**Chat :** `{message.chat.title}`", "In The Group/Channel!\", \"example\": \"{ch}zombies\", }, ) async def ujwalzombie(client,", "Message With Sending Notification To Members!\", \"example\": \"{ch}pin (reply to", "__{ujwal.title}__ \\n\" msg += f\"**Users Online :** __{online_.onlines}__ \\n\" if", "main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner, ) from", "1 text = \"**Zombies Report!** \\n\\n\" if dm > 0:", "userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string(\"TF_DO_IT\").format(\"Demote\")) return", "\"Get Info Of The Chat!\", \"example\": \"{ch}chatinfo\"}, ) async def", "= await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = \"**Chat Info** \\n\\n\" msg +=", "only_if_admin=True, cmd_help={ \"help\": \"Pin Message Without Sending Notification To Members!\",", "only_if_admin=True, group_only=True, cmd_help={ \"help\": \"Mute Replied user or provide his", "= message.Engine bun = await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me", "await edit_or_reply(message, engine.get_string(\"PINNED\")) @speedo_on_cmd( [\"pinloud\", \"pin\"], only_if_admin=True, cmd_help={ \"help\": \"Pin", "{userid}\" holy = ujwal.username or ujwal.id messag = f\"\"\" <b>Admins", "s += 1 except: f += 1 text = \"\"", "+ dc if d > 0: text += (engine.get_string(\"WIPE_THEM\")) await", "client.get_users(userf) except BaseException as e: await pablo.edit(engine.get_string(\"USER_MISSING\").format(e)) return userz =", "+= f\"**Is Scam :** __{ujwal.is_scam}__ \\n\" msg += f\"**Chat Title", "await edit_or_reply(message, engine.get_string(\"PROCESSING\")) me_m = client.me await message.chat.get_member(int(me_m.id)) asplit =", "if not me_.can_restrict_members: await bun.edit(engine.get_string(\"NOT_ADMIN\")) return text_ = get_text(message) userk,", "\"member\": dm += 1 elif member.status == \"administrator\": da +=", "\"help\": \"Unmute Replied user or provide his ID!\", \"example\": \"{ch}Unmute", "= message.Engine start_time = time.time() message_ids = [] purge_len =", "await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete() else: await", "await is_admin_or_owner(message, me.id) if not lol: await pablo.edit(engine.get_string(\"NOT_ADMIN\")) return s" ]
[ "now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": \"The stream has ended.\",", "\"upcoming\" and not announced: # This premiere is upcoming and", "announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None if", "import CronTrigger from discord.ext import commands from carberretta import Config", "@commands.is_owner() async def command_feed_vod(self, ctx: commands.Context) -> None: last_vod =", "\"#VOD\" not in item.summary: # This isnt a VOD await", "to see you there!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc", "tuple: data = await self.call_twitch_api() if data: live_now = await", "if last_vod else \"No new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async def", "True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async def group_feed(self, ctx: commands.Context) ->", "?\", \"video\") for item in await self.call_feed(): data = await", "async def command_feed_stream(self, ctx: commands.Context) -> None: if not (last_stream", "== \"upcoming\" and not announced: # This premiere is upcoming", "= bot async def call_feed(self) -> dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\"", "self.bot = bot async def call_feed(self) -> dict: url =", "= data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming =", "? WHERE ID = 1\", 0, dt.datetime.utcnow() ) start, stream_message,", "await self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm live on Twitch now! Come", "self.bot.session.get(url) as response: if not 200 <= response.status <= 299:", "invoke_without_command=True) @commands.is_owner() async def group_feed(self, ctx: commands.Context) -> None: pass", "premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1,", "dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url) as response: if", "def command_feed_stream(self, ctx: commands.Context) -> None: if not (last_stream :=", "await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere is scheduled for", "500 else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"},", "elif live_content == \"live\" and not upcoming and not announced:", "await self.bot.db.field(\"SELECT StreamLive FROM streams WHERE ID = 1\") if", "last_premiere[1] else f\"Announced upcoming premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner() async", "command_feed_stream(self, ctx: commands.Context) -> None: if not (last_stream := await", "-> None: self.bot = bot async def call_feed(self) -> dict:", "<= 299: return [] if not (data := await response.json()):", "current_vid = await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType =", "duration != \"P0D\": # We have not seen this premiere", "is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!\",", ":= await self.get_new_streams()): await ctx.send(\"No new streams.\") else: await ctx.send(", "Config from carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR", "@commands.Cog.listener() async def on_ready(self) -> None: if not self.bot.ready.booted: self.videos_channel", "last_stream[1] else f\"Announced stream: {last_stream[0]}.\" ) def setup(bot: commands.Bot) ->", "us!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc :=", "chron.from_iso(start) try: message = await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException):", "response.status <= 299: return [] if not (data := await", ") return item.yt_videoid async def get_new_videos(self) -> str: current_vid =", "(VideoID, Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 0,", "-> None: if not (last_stream := await self.get_new_streams()): await ctx.send(\"No", "ID = 1\", 0, dt.datetime.utcnow() ) start, stream_message, end =", "Feeds(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot", "discord.HTTPException): return else: await message.edit( content=f\"Hey {self.streams_role.mention}, I'm live on", "= f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl) as response: if not 200", "return elif \"#VOD\" in item.summary: # This is a vod", "f\"Stream ended: {last_stream[0]}.\" if last_stream[1] else f\"Announced stream: {last_stream[0]}.\" )", "= await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] live_content", "ContentType = ?\", item.yt_videoid, \"video\" ) return item.yt_videoid async def", "videos SET ContentValue = ? WHERE ContentType = ?\", item.yt_videoid,", "ctx.send( f\"Announced live premiere: {last_premiere[0]}.\" if last_premiere[1] else f\"Announced upcoming", "self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)}", "from carberretta import Config from carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR", "False elif live_content == \"live\" and not upcoming and not", "UTC! Come and join us!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\":", "return [] if not (data := await response.json()): return []", "await self.bot.db.execute( \"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?,", "chron.from_iso(end) - chron.from_iso(start) try: message = await self.videos_channel.fetch_message(stream_message) except (discord.NotFound,", "\"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute(", "live on Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": \"The", "headers=headers) as response: if not 200 <= response.status <= 299:", "async def command_feed_vod(self, ctx: commands.Context) -> None: last_vod = await", "FEEDS Handles YouTube and Twitch feed notifications. \"\"\" import datetime", "[] if not (data := await response.json()): return [] return", "\"#VOD\" in item.summary: # This is a vod we havent", "\"title\": \"The stream has ended.\", \"description\": \"**Catch you in the", "item.title, \"description\": desc if len(desc := item.summary) <= 500 else", "StreamEnd FROM streams WHERE ID = 1\" ) duration =", "= chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm live", "not upcoming and duration != \"P0D\": # We have not", "import discord import feedparser from apscheduler.triggers.cron import CronTrigger from discord.ext", "await self.bot.db.record( \"SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID", "return else: await message.edit( content=f\"Hey {self.streams_role.mention}, I'm live on Twitch", "* FROM premieres\") } for item in await self.call_feed(): data", "else: await ctx.send( f\"Announced live premiere: {last_premiere[0]}.\" if last_premiere[1] else", "Come watch!\", embed=discord.Embed.from_dict( { \"title\": \"The stream has ended.\", \"description\":", "data[\"contentDetails\"][\"duration\"] if item.yt_videoid == current_vid: # This is a video", "embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc := item.summary)", "else f\"Announced stream: {last_stream[0]}.\" ) def setup(bot: commands.Bot) -> None:", "?, ?)\", item.yt_videoid, 1, 1, ) return item.yt_videoid, True elif", "= await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if", "seen this premiere before if live_content == \"upcoming\" and not", "not announced: # This premiere is upcoming and not live", "async def call_feed(self) -> dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with", "= 1\", 0, dt.datetime.utcnow() ) start, stream_message, end = await", "\"UPDATE streams SET StreamLive = ?, StreamStart = ?, StreamMessage=", "I'm live on Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\":", "streams WHERE ID = 1\" ) duration = chron.from_iso(end) -", "?, StreamEnd = ? WHERE ID = 1\", 0, dt.datetime.utcnow()", "dt import discord import feedparser from apscheduler.triggers.cron import CronTrigger from", "if (await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\",", "None announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None", "f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url) as response: if not 200 <=", "\"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ),", "self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"]", "-> dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url) as response:", "f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl) as response: if", "it out!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc", "= await self.get_new_vods() await ctx.send(f\"Announced VOD: {last_vod}.\" if last_vod else", "last_video = await self.get_new_videos() await ctx.send(f\"Announced video: {last_video}.\" if last_video", "command_feed_vod(self, ctx: commands.Context) -> None: last_vod = await self.get_new_vods() await", "upcoming and duration != \"P0D\": # We have not seen", "self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id", "not (last_stream := await self.get_new_streams()): await ctx.send(\"No new streams.\") else:", "started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!\", embed=discord.Embed.from_dict( {", "def on_ready(self) -> None: if not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID)", "= ?, StreamEnd = ? WHERE ID = 1\", 0,", "This is a video we already announced return elif \"liveStreamingDetails\"", "response.json())[\"access_token\"]): return [] headers = { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer", "?)\", item.yt_videoid, 1, 1, ) return item.yt_videoid, True elif not", "data.keys(): # A new video is live and its was", "and duration != \"P0D\": # We have not seen this", "This premiere is upcoming and not live await self.videos_channel.send( f\"Hey", "was never upcoming is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention},", "else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\":", "data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, } ), ) return data[\"title\"],", "for _id, _upcoming, _announced in await self.bot.db.records(\"SELECT * FROM premieres\")", "we already announced return elif \"liveStreamingDetails\" not in data.keys(): #", "discord.Forbidden, discord.HTTPException): return else: await message.edit( content=f\"Hey {self.streams_role.mention}, I'm live", "None: if not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID)", "already announced return elif \"liveStreamingDetails\" not in data.keys(): # A", "this premiere before if live_content == \"upcoming\" and not announced:", "StreamMessage= ? WHERE ID = 1\", 1, start, message.id, )", "command_feed_video(self, ctx: commands.Context) -> None: last_video = await self.get_new_videos() await", "None: if not (last_stream := await self.get_new_streams()): await ctx.send(\"No new", "start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm", "WHERE ID = 1\", 1, start, message.id, ) return data[\"title\"],", "upcoming and not announced: # The premiere was never upcoming", "SET StreamLive = ?, StreamEnd = ? WHERE ID =", "def get_new_vods(self) -> str: current_vod = await self.bot.db.field(\"SELECT ContentValue FROM", "\"video\" ) return item.yt_videoid async def get_new_premieres(self) -> tuple: known_premieres", "item.yt_videoid, 1, 0, ) return item.yt_videoid, False elif live_content ==", "return [] return data[\"items\"][0] async def call_twitch_api(self) -> dict: url", "if \"liveStreamingDetails\" in data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time)", "import datetime as dt import discord import feedparser from apscheduler.triggers.cron", "not seen this premiere before if live_content == \"upcoming\" and", "not in data.keys(): # A new video is live and", "url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url) as response: if not", "async def call_twitch_api(self) -> dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl =", "def group_feed(self, ctx: commands.Context) -> None: pass @group_feed.command(name=\"video\") @commands.is_owner() async", "with self.bot.session.get(url) as response: if not 200 <= response.status <=", "announced return elif \"liveStreamingDetails\" not in data.keys(): # A new", "and its was not a premiere if \"#VOD\" not in", "return data[\"items\"][0] async def call_twitch_api(self) -> dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\"", "else \"No new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async def command_feed_vod(self, ctx:", "await ctx.send( f\"Stream ended: {last_stream[0]}.\" if last_stream[1] else f\"Announced stream:", "{self.vods_role.mention}, a new VOD just went live! Catch up on", "CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async", "return [] return data async def call_yt_api(self, video_id: str) ->", "= self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\") if (await", "async def call_yt_api(self, video_id: str) -> dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\"", "async def get_new_premieres(self) -> tuple: known_premieres = { _id: [_upcoming,", "and is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new", "live_now: # The stream is live and we havent announced", "self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role", "in item.summary: # This is a vod we havent announced", "@commands.is_owner() async def command_feed_stream(self, ctx: commands.Context) -> None: if not", "await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if item.yt_videoid", "self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams,", "-> tuple: data = await self.call_twitch_api() if data: live_now =", "SET ContentValue = ? WHERE ContentType = ?\", item.yt_videoid, \"vod\"", "new video just went live! Come check it out!\", embed=discord.Embed.from_dict(", "= data[\"contentDetails\"][\"duration\"] if current_vod == item.yt_videoid: # We announced this", "Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 0, )", "carberretta import Config from carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR =", "data[\"is_live\"] and not live_now: # The stream is live and", "? WHERE ID = 1\", 1, start, message.id, ) return", "is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere", "for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails", "data[\"data\"][0] @commands.Cog.listener() async def on_ready(self) -> None: if not self.bot.ready.booted:", "self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods,", "[] return data async def call_yt_api(self, video_id: str) -> dict:", "elif \"#VOD\" in item.summary: # This is a vod we", "\"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)}", "start, stream_message, end = await self.bot.db.record( \"SELECT StreamStart, StreamMessage, StreamEnd", "videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async def command_feed_vod(self, ctx: commands.Context) -> None:", "\"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)\",", "{last_premiere[0]}.\" if last_premiere[1] else f\"Announced upcoming premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\")", "= self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0))", "known_premieres = { _id: [_upcoming, _announced] for _id, _upcoming, _announced", "if not (data := await response.json()): return [] return data[\"data\"][0]", "and not announced: # The premiere was never upcoming is", "vod we havent announced await self.videos_channel.send( f\"Hey {self.vods_role.mention}, a new", "UTC! Hope to see you there!\", embed=discord.Embed.from_dict( { \"title\": item.title,", "Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 1, ) return", "YouTube and Twitch feed notifications. \"\"\" import datetime as dt", "= self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id == 696804435321552906:", "second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async def", "thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if current_vod == item.yt_videoid:", "chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm live on", "live on Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": data[\"title\"],", "if last_premiere[1] else f\"Announced upcoming premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner()", "== item.yt_videoid: # We announced this vod already return elif", "self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID)", "await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if current_vod", "\"Authorization\": f\"Bearer {twitch_tok}\", } async with self.bot.session.get(url=url, headers=headers) as response:", "in the next one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\":", "isnt a VOD await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new video", "= data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid", "new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see", ") return item.yt_videoid, False elif live_content == \"live\" and not", "= ?, StreamMessage= ? WHERE ID = 1\", 1, start,", "len(desc := item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\":", "VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog): def __init__(self, bot: commands.Bot) ->", "live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere is scheduled", "= await self.get_new_videos() await ctx.send(f\"Announced video: {last_video}.\" if last_video else", "? WHERE ContentType = ?\", item.yt_videoid, \"video\" ) return item.yt_videoid", "} for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid)", "await self.get_new_vods() await ctx.send(f\"Announced VOD: {last_vod}.\" if last_vod else \"No", "premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!\", embed=discord.Embed.from_dict(", "await message.edit( content=f\"Hey {self.streams_role.mention}, I'm live on Twitch now! Come", "?, ?)\", item.yt_videoid, 1, 1 ) return item.yt_videoid, True async", "VOD just went live! Catch up on anything you missed", "embed=discord.Embed.from_dict( { \"title\": \"The stream has ended.\", \"description\": \"**Catch you", "\"title\": data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\":", "<= 299: return [] if not (data := feedparser.parse(await response.text()).entries):", "\"video\") for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid)", "commands.Context) -> None: if not (last_stream := await self.get_new_streams()): await", "is a video we already announced return elif \"liveStreamingDetails\" not", "self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!\",", "self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException): return else: await message.edit( content=f\"Hey", "was upcoming, and is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention},", "is live and its was not a premiere if \"#VOD\"", "you missed from the last stream!\", embed=discord.Embed.from_dict( { \"title\": item.title,", "= chron.from_iso(end) - chron.from_iso(start) try: message = await self.videos_channel.fetch_message(stream_message) except", "join us!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc", "= ?, StreamStart = ?, StreamMessage= ? WHERE ID =", "def command_feed_premiere(self, ctx: commands.Context) -> None: if not (last_premiere :=", "self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID)", "async def group_feed(self, ctx: commands.Context) -> None: pass @group_feed.command(name=\"video\") @commands.is_owner()", "scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!\", embed=discord.Embed.from_dict(", "data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async def group_feed(self, ctx: commands.Context)", "premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you", "{\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"},", "last_vod else \"No new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async def command_feed_premiere(self,", "The stream is live and we havent announced it yet", "as response: if not 200 <= response.status <= 299: return", "a VOD await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new video just", "A premiere was upcoming, and is now live await self.videos_channel.send(", "= 0x9146FF VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog): def __init__(self, bot:", "self.bot.session.get(url=url, headers=headers) as response: if not 200 <= response.status <=", "), ) await self.bot.db.execute( \"UPDATE streams SET StreamLive = ?,", "SET ContentValue = ? WHERE ContentType = ?\", item.yt_videoid, \"video\"", "299: return [] if not (twitch_tok := (await response.json())[\"access_token\"]): return", "url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl) as", "not (data := await response.json()): return [] return data[\"data\"][0] @commands.Cog.listener()", "await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)}", "premiere if \"#VOD\" not in item.summary: # This isnt a", "VALUES (?, ?, ?)\", item.yt_videoid, 1, 1 ) return item.yt_videoid,", "_upcoming, _announced in await self.bot.db.records(\"SELECT * FROM premieres\") } for", "else: await message.edit( content=f\"Hey {self.streams_role.mention}, I'm live on Twitch now!", "discord.ext import commands from carberretta import Config from carberretta.utils import", "await self.bot.db.execute( \"UPDATE streams SET StreamLive = ?, StreamStart =", "= ?\", item.yt_videoid, \"video\" ) return item.yt_videoid async def get_new_premieres(self)", "if item.yt_videoid in known_premieres.keys() else None if \"liveStreamingDetails\" in data.keys():", "Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 1 )", "[] return data[\"items\"][0] async def call_twitch_api(self) -> dict: url =", "ContentValue FROM videos WHERE ContentType = ?\", \"vod\") for item", "await self.bot.db.execute( \"UPDATE streams SET StreamLive = ?, StreamEnd =", "def call_yt_api(self, video_id: str) -> dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async", "# A premiere was upcoming, and is now live await", "discord import feedparser from apscheduler.triggers.cron import CronTrigger from discord.ext import", "\"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"}, }", "from discord.ext import commands from carberretta import Config from carberretta.utils", "\"description\": desc if len(desc := item.summary) <= 500 else f\"{desc[:500]}...\",", "announced it yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await self.videos_channel.send(", "from apscheduler.triggers.cron import CronTrigger from discord.ext import commands from carberretta", "await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration", "its was not a premiere if \"#VOD\" not in item.summary:", "?, ?)\", item.yt_videoid, 1, 0, ) return item.yt_videoid, False elif", "new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!\",", "(?, ?, ?)\", item.yt_videoid, 1, 1 ) return item.yt_videoid, True", "LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\":", "ctx.send( f\"Stream ended: {last_stream[0]}.\" if last_stream[1] else f\"Announced stream: {last_stream[0]}.\"", "call_twitch_api(self) -> dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async", "async def command_feed_video(self, ctx: commands.Context) -> None: last_video = await", "{last_stream[0]}.\" if last_stream[1] else f\"Announced stream: {last_stream[0]}.\" ) def setup(bot:", "f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\":", "data: live_now = await self.bot.db.field(\"SELECT StreamLive FROM streams WHERE ID", "-> None: last_video = await self.get_new_videos() await ctx.send(f\"Announced video: {last_video}.\"", "= await self.bot.db.field(\"SELECT StreamLive FROM streams WHERE ID = 1\")", "live! Come check it out!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\":", "in await self.bot.db.records(\"SELECT * FROM premieres\") } for item in", "= 1\", 1, start, message.id, ) return data[\"title\"], False elif", "a new VOD just went live! Catch up on anything", "# This is a video we already announced return elif", "stream!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc :=", "\"liveStreamingDetails\" in data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time) if", "= known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None announced =", "FROM streams WHERE ID = 1\") if data[\"is_live\"] and not", "await ctx.send(f\"Announced VOD: {last_vod}.\" if last_vod else \"No new VODs.\")", "-> str: current_vod = await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE", "1\") if data[\"is_live\"] and not live_now: # The stream is", "return [] headers = { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\",", "# This premiere is upcoming and not live await self.videos_channel.send(", "\"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, } ), ) return data[\"title\"], True", "data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if current_vod == item.yt_videoid: # We", "return item.yt_videoid, True elif not announced: # A premiere was", "= data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else", "\"live\" and not upcoming and not announced: # The premiere", "[] if not (twitch_tok := (await response.json())[\"access_token\"]): return [] headers", "= 1\") if data[\"is_live\"] and not live_now: # The stream", "in known_premieres.keys() else None if \"liveStreamingDetails\" in data.keys(): start_time =", "} ), ) await self.bot.db.execute( \"UPDATE videos SET ContentValue =", "commands.Context) -> None: last_vod = await self.get_new_vods() await ctx.send(f\"Announced VOD:", "\"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration,", "item.yt_videoid, \"video\" ) return item.yt_videoid async def get_new_premieres(self) -> tuple:", "missed from the last stream!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\":", "if not (data := await response.json()): return [] return data[\"items\"][0]", "{ \"title\": item.title, \"description\": desc if len(desc := item.summary) <=", "commands from carberretta import Config from carberretta.utils import DEFAULT_EMBED_COLOUR, chron", "f\"Hey {self.videos_role.mention}, a new video just went live! Come check", "= await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException): return else: await", "1, start, message.id, ) return data[\"title\"], False elif not data[\"is_live\"]", "len(desc := item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\":", "and not upcoming and not announced: # The premiere was", "thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), ) await", "FROM videos WHERE ContentType = ?\", \"vod\") for item in", "item.yt_videoid in known_premieres.keys() else None if \"liveStreamingDetails\" in data.keys(): start_time", "return item.yt_videoid, True async def get_new_streams(self) -> tuple: data =", "VOD: {last_vod}.\" if last_vod else \"No new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner()", "= await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType = ?\",", "\"\"\" FEEDS Handles YouTube and Twitch feed notifications. \"\"\" import", "data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time) if not upcoming", "Catch up on anything you missed from the last stream!\",", "live_content == \"upcoming\" and not announced: # This premiere is", "await ctx.send(f\"Announced video: {last_video}.\" if last_video else \"No new videos.\")", "\"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"},", "self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if item.yt_videoid ==", "self.bot.ready.up(self) async def get_new_vods(self) -> str: current_vod = await self.bot.db.field(\"SELECT", "async with self.bot.session.post(url=oauthurl) as response: if not 200 <= response.status", "[] headers = { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\", }", "chron.from_iso(start_time) if not upcoming and duration != \"P0D\": # We", "{ \"title\": data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\",", ") return data[\"title\"], False elif not data[\"is_live\"] and live_now: #", "1\" ) duration = chron.from_iso(end) - chron.from_iso(start) try: message =", "-> None: if not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role =", "has ended.\", \"description\": \"**Catch you in the next one!**\", \"color\":", "await response.json()): return [] return data[\"data\"][0] @commands.Cog.listener() async def on_ready(self)", "a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join", "None: last_vod = await self.get_new_vods() await ctx.send(f\"Announced VOD: {last_vod}.\" if", "commands.Context) -> None: if not (last_premiere := await self.get_new_premieres()): await", "ctx.send(\"No new streams.\") else: await ctx.send( f\"Stream ended: {last_stream[0]}.\" if", "current_vod = await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType =", "await self.get_new_premieres()): await ctx.send(\"No new premieres.\") else: await ctx.send( f\"Announced", "self.bot.db.execute( \"UPDATE streams SET StreamLive = ?, StreamEnd = ?", "(?, ?, ?)\", item.yt_videoid, 1, 1, ) return item.yt_videoid, True", "response.json()): return [] return data[\"data\"][0] @commands.Cog.listener() async def on_ready(self) ->", "item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime:", "1 ) return item.yt_videoid, True async def get_new_streams(self) -> tuple:", "LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog): def __init__(self,", ") await self.bot.db.execute( \"UPDATE videos SET ContentValue = ? WHERE", "item.yt_videoid == current_vid: # This is a video we already", "check it out!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if", "= await self.call_twitch_api() if data: live_now = await self.bot.db.field(\"SELECT StreamLive", "<= response.status <= 299: return [] if not (twitch_tok :=", "new VOD just went live! Catch up on anything you", "__init__(self, bot: commands.Bot) -> None: self.bot = bot async def", "live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys()", "is not live and last we checked it was (stream", "self.get_new_videos() await ctx.send(f\"Announced video: {last_video}.\" if last_video else \"No new", "async def command_feed_premiere(self, ctx: commands.Context) -> None: if not (last_premiere", "async def get_new_streams(self) -> tuple: data = await self.call_twitch_api() if", "{last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner() async def command_feed_stream(self, ctx: commands.Context) ->", "response.status <= 299: return [] if not (data := feedparser.parse(await", "vod already return elif \"#VOD\" in item.summary: # This is", "new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async def command_feed_premiere(self, ctx: commands.Context) ->", "= ? WHERE ID = 1\", 0, dt.datetime.utcnow() ) start,", "get_new_vods(self) -> str: current_vod = await self.bot.db.field(\"SELECT ContentValue FROM videos", "return item.yt_videoid, False elif live_content == \"live\" and not upcoming", "now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\",", "a new video just went live! Come check it out!\",", "StreamEnd = ? WHERE ID = 1\", 0, dt.datetime.utcnow() )", "item.yt_videoid: # We announced this vod already return elif \"#VOD\"", "WHERE ID = 1\") if data[\"is_live\"] and not live_now: #", "self.bot.db.field(\"SELECT StreamLive FROM streams WHERE ID = 1\") if data[\"is_live\"]", "before if live_content == \"upcoming\" and not announced: # This", ":= await response.json()): return [] return data[\"data\"][0] @commands.Cog.listener() async def", "video just went live! Come check it out!\", embed=discord.Embed.from_dict( {", "\"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime:", "dt.datetime.utcnow() ) start, stream_message, end = await self.bot.db.record( \"SELECT StreamStart,", "now live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere started", "premiere was upcoming, and is now live await self.videos_channel.send( f\"Hey", "it yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await self.videos_channel.send( f\"Hey", "video is live and its was not a premiere if", "streams SET StreamLive = ?, StreamEnd = ? WHERE ID", "(twitch_tok := (await response.json())[\"access_token\"]): return [] headers = { \"client-id\":", "{twitch_tok}\", } async with self.bot.session.get(url=url, headers=headers) as response: if not", "item.yt_videoid async def get_new_premieres(self) -> tuple: known_premieres = { _id:", "start, message.id, ) return data[\"title\"], False elif not data[\"is_live\"] and", "= await self.bot.db.record( \"SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE", "and Twitch feed notifications. \"\"\" import datetime as dt import", "we havent announced await self.videos_channel.send( f\"Hey {self.vods_role.mention}, a new VOD", ") return item.yt_videoid, True elif not announced: # A premiere", "current_vid: # This is a video we already announced return", "(VideoID, Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 1,", "just went live! Come check it out!\", embed=discord.Embed.from_dict( { \"title\":", "await ctx.send( f\"Announced live premiere: {last_premiere[0]}.\" if last_premiere[1] else f\"Announced", "FROM premieres\") } for item in await self.call_feed(): data =", "StreamMessage, StreamEnd FROM streams WHERE ID = 1\" ) duration", "item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\":", "UTC\"}, } ), ) await self.bot.db.execute( \"UPDATE streams SET StreamLive", "see you there!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if", "{self.videos_role.mention}, a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope", "if not (twitch_tok := (await response.json())[\"access_token\"]): return [] headers =", "!= \"P0D\": # We have not seen this premiere before", "ContentType = ?\", item.yt_videoid, \"vod\" ) return item.yt_videoid async def", "= f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl) as response:", "<filename>carberretta/bot/cogs/feeds.py \"\"\" FEEDS Handles YouTube and Twitch feed notifications. \"\"\"", "second=45)) self.bot.ready.up(self) async def get_new_vods(self) -> str: current_vod = await", "went live! Catch up on anything you missed from the", "call_feed(self) -> dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url) as", "duration = data[\"contentDetails\"][\"duration\"] if item.yt_videoid == current_vid: # This is", "_announced] for _id, _upcoming, _announced in await self.bot.db.records(\"SELECT * FROM", "await self.bot.db.execute( \"UPDATE videos SET ContentValue = ? WHERE ContentType", "not (twitch_tok := (await response.json())[\"access_token\"]): return [] headers = {", "f\"Bearer {twitch_tok}\", } async with self.bot.session.get(url=url, headers=headers) as response: if", "with self.bot.session.post(url=oauthurl) as response: if not 200 <= response.status <=", "we checked it was (stream is over) await self.bot.db.execute( \"UPDATE", "data = await self.call_twitch_api() if data: live_now = await self.bot.db.field(\"SELECT", "self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube", "= ?\", item.yt_videoid, \"vod\" ) return item.yt_videoid async def get_new_videos(self)", "f\"Hey {self.vods_role.mention}, a new VOD just went live! Catch up", "f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\":", "= { _id: [_upcoming, _announced] for _id, _upcoming, _announced in", "1, 1, ) return item.yt_videoid, True elif not announced: #", "upcoming is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new", "@commands.is_owner() async def command_feed_premiere(self, ctx: commands.Context) -> None: if not", "if item.yt_videoid in known_premieres.keys() else None announced = known_premieres[item.yt_videoid][1] if", "ID = 1\", 1, start, message.id, ) return data[\"title\"], False", "{last_video}.\" if last_video else \"No new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async", "= data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if current_vod == item.yt_videoid: #", "was (stream is over) await self.bot.db.execute( \"UPDATE streams SET StreamLive", "duration = data[\"contentDetails\"][\"duration\"] if current_vod == item.yt_videoid: # We announced", "if data: live_now = await self.bot.db.field(\"SELECT StreamLive FROM streams WHERE", "self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres,", "We announced this vod already return elif \"#VOD\" in item.summary:", "premieres.\") else: await ctx.send( f\"Announced live premiere: {last_premiere[0]}.\" if last_premiere[1]", "f\"Started: {chron.long_date_and_time(start)} UTC\"}, } ), ) await self.bot.db.execute( \"UPDATE streams", "await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException): return else: await message.edit(", "ctx: commands.Context) -> None: if not (last_stream := await self.get_new_streams()):", "duration = data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0] if", "apscheduler.triggers.cron import CronTrigger from discord.ext import commands from carberretta import", "videos WHERE ContentType = ?\", \"video\") for item in await", "item.yt_videoid, 1, 1, ) return item.yt_videoid, True elif not announced:", "_id, _upcoming, _announced in await self.bot.db.records(\"SELECT * FROM premieres\") }", "carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR = 0x3498DB", "\"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\", } async with self.bot.session.get(url=url, headers=headers)", "{\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), )", "f\"Announced live premiere: {last_premiere[0]}.\" if last_premiere[1] else f\"Announced upcoming premiere:", "data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0]", "if not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role", "f\"Hey {self.videos_role.mention}, a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC!", "299: return [] if not (data := await response.json()): return", "self.bot.session.post(url=oauthurl) as response: if not 200 <= response.status <= 299:", "), ) await self.bot.db.execute( \"REPLACE INTO premieres (VideoID, Upcoming, Announced)", "import feedparser from apscheduler.triggers.cron import CronTrigger from discord.ext import commands", "= f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url) as response: if not 200", ") return item.yt_videoid async def get_new_premieres(self) -> tuple: known_premieres =", "f\"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come", "1, ) return item.yt_videoid, True elif not announced: # A", "\"The stream has ended.\", \"description\": \"**Catch you in the next", "await ctx.send(\"No new premieres.\") else: await ctx.send( f\"Announced live premiere:", "WHERE ContentType = ?\", item.yt_videoid, \"vod\" ) return item.yt_videoid async", "{self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and", "FROM streams WHERE ID = 1\" ) duration = chron.from_iso(end)", "self.bot.db.records(\"SELECT * FROM premieres\") } for item in await self.call_feed():", "= await self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm live on Twitch now!", "content=f\"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!\", embed=discord.Embed.from_dict(", "self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\") if", "{chron.long_delta(duration)}\"}, } ), ) return data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner()", "a vod we havent announced await self.videos_channel.send( f\"Hey {self.vods_role.mention}, a", "tuple: known_premieres = { _id: [_upcoming, _announced] for _id, _upcoming,", "url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url) as response: if not", "not (data := feedparser.parse(await response.text()).entries): return [] return data async", "{\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"}, } ), )", "None: self.bot = bot async def call_feed(self) -> dict: url", ") await self.bot.db.execute( \"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES", "StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID = 1\" )", "live_content == \"live\" and not upcoming and not announced: #", "upcoming and not live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new", "announced: # This premiere is upcoming and not live await", "it was (stream is over) await self.bot.db.execute( \"UPDATE streams SET", "0, dt.datetime.utcnow() ) start, stream_message, end = await self.bot.db.record( \"SELECT", "and join us!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if", "200 <= response.status <= 299: return [] if not (data", "not (last_premiere := await self.get_new_premieres()): await ctx.send(\"No new premieres.\") else:", "streams WHERE ID = 1\") if data[\"is_live\"] and not live_now:", "(await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15))", "(data := feedparser.parse(await response.text()).entries): return [] return data async def", "<= 500 else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\":", "f\"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!\", embed=discord.Embed.from_dict(", "if live_content == \"upcoming\" and not announced: # This premiere", "on Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": \"The stream", "on Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": data[\"title\"], \"description\":", "self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos,", "stream is live and we havent announced it yet start", "= ? WHERE ContentType = ?\", item.yt_videoid, \"vod\" ) return", "self.youtube = self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\",", "commands.Context) -> None: last_video = await self.get_new_videos() await ctx.send(f\"Announced video:", "not in item.summary: # This isnt a VOD await self.videos_channel.send(", "_announced in await self.bot.db.records(\"SELECT * FROM premieres\") } for item", "# This isnt a VOD await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a", "return data[\"data\"][0] @commands.Cog.listener() async def on_ready(self) -> None: if not", "async def get_new_videos(self) -> str: current_vid = await self.bot.db.field(\"SELECT ContentValue", "{\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, } ), ) return data[\"title\"], True @commands.group(name=\"feed\",", "{\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute( \"UPDATE", "have not seen this premiere before if live_content == \"upcoming\"", "async def on_ready(self) -> None: if not self.bot.ready.booted: self.videos_channel =", "upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None announced", "-> str: current_vid = await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE", "not live_now: # The stream is live and we havent", "\"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started:", "try: message = await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException): return", "f\"Runtime: {chron.long_delta(duration)}\"}, } ), ) return data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True)", "not upcoming and not announced: # The premiere was never", "data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time) if not upcoming and duration !=", "WHERE ContentType = ?\", \"vod\") for item in await self.call_feed():", "return data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async def group_feed(self, ctx:", "StreamLive FROM streams WHERE ID = 1\") if data[\"is_live\"] and", "(last_premiere := await self.get_new_premieres()): await ctx.send(\"No new premieres.\") else: await", "WHERE ID = 1\", 0, dt.datetime.utcnow() ) start, stream_message, end", "A new video is live and its was not a", "{self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute( \"REPLACE INTO premieres", "chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog): def", "-> dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url) as response:", "StreamStart = ?, StreamMessage= ? WHERE ID = 1\", 1,", "await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] live_content =", "scheduled_time = chron.from_iso(start_time) if not upcoming and duration != \"P0D\":", "we havent announced it yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message =", "Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 0, ) return", "not announced: # The premiere was never upcoming is now", "datetime as dt import discord import feedparser from apscheduler.triggers.cron import", "if len(desc := item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR,", "await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new video just went live!", "second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async def get_new_vods(self) -> str:", "def call_twitch_api(self) -> dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\"", "oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl) as response: if not", "CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async def get_new_vods(self) ->", "= ? WHERE ContentType = ?\", item.yt_videoid, \"video\" ) return", "await ctx.send(\"No new streams.\") else: await ctx.send( f\"Stream ended: {last_stream[0]}.\"", "Hope to see you there!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\":", "= self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role =", "item.yt_videoid in known_premieres.keys() else None announced = known_premieres[item.yt_videoid][1] if item.yt_videoid", "in item.summary: # This isnt a VOD await self.videos_channel.send( f\"Hey", "in known_premieres.keys() else None announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in", "\"vod\") for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid)", "data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in", "not live and last we checked it was (stream is", "checked it was (stream is over) await self.bot.db.execute( \"UPDATE streams", "-> dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with", "else f\"Announced upcoming premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner() async def", "await self.get_new_videos() await ctx.send(f\"Announced video: {last_video}.\" if last_video else \"No", "new video is live and its was not a premiere", "await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType = ?\", \"vod\")", "(VideoID, Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 1", "if not (data := feedparser.parse(await response.text()).entries): return [] return data", "), ) await self.bot.db.execute( \"UPDATE videos SET ContentValue = ?", "else \"No new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async def command_feed_premiere(self, ctx:", "?)\", item.yt_videoid, 1, 1 ) return item.yt_videoid, True async def", "f\"Announced stream: {last_stream[0]}.\" ) def setup(bot: commands.Bot) -> None: bot.add_cog(Feeds(bot))", "0x3498DB class Feeds(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot", "await response.json()): return [] return data[\"items\"][0] async def call_twitch_api(self) ->", "(?, ?, ?)\", item.yt_videoid, 1, 0, ) return item.yt_videoid, False", ") start, stream_message, end = await self.bot.db.record( \"SELECT StreamStart, StreamMessage,", "VALUES (?, ?, ?)\", item.yt_videoid, 1, 1, ) return item.yt_videoid,", "response.text()).entries): return [] return data async def call_yt_api(self, video_id: str)", "= ?\", \"vod\") for item in await self.call_feed(): data =", "Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": \"The stream has", "you there!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc", "-> None: pass @group_feed.command(name=\"video\") @commands.is_owner() async def command_feed_video(self, ctx: commands.Context)", "not a premiere if \"#VOD\" not in item.summary: # This", "f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\":", ":= feedparser.parse(await response.text()).entries): return [] return data async def call_yt_api(self,", "= 0x3498DB class Feeds(commands.Cog): def __init__(self, bot: commands.Bot) -> None:", "(await response.json())[\"access_token\"]): return [] headers = { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\":", "known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None if \"liveStreamingDetails\" in", "{chron.long_date_and_time(start)} UTC\"}, } ), ) await self.bot.db.execute( \"UPDATE streams SET", "return data[\"title\"], False elif not data[\"is_live\"] and live_now: # The", "@group_feed.command(name=\"stream\") @commands.is_owner() async def command_feed_stream(self, ctx: commands.Context) -> None: if", "f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\", } async with self.bot.session.get(url=url, headers=headers) as", "else None if \"liveStreamingDetails\" in data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time", "Come check it out!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc", "self.get_new_premieres()): await ctx.send(\"No new premieres.\") else: await ctx.send( f\"Announced live", "== \"live\" and not upcoming and not announced: # The", "else: await ctx.send( f\"Stream ended: {last_stream[0]}.\" if last_stream[1] else f\"Announced", "} async with self.bot.session.get(url=url, headers=headers) as response: if not 200", "<= 500 else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\":", "current_vod == item.yt_videoid: # We announced this vod already return", "200 <= response.status <= 299: return [] if not (twitch_tok", "up on anything you missed from the last stream!\", embed=discord.Embed.from_dict(", "the next one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"},", "@group_feed.command(name=\"premiere\") @commands.is_owner() async def command_feed_premiere(self, ctx: commands.Context) -> None: if", "response: if not 200 <= response.status <= 299: return []", "command_feed_premiere(self, ctx: commands.Context) -> None: if not (last_premiere := await", "} ), ) await self.bot.db.execute( \"UPDATE streams SET StreamLive =", "live and its was not a premiere if \"#VOD\" not", "return item.yt_videoid async def get_new_premieres(self) -> tuple: known_premieres = {", "def command_feed_video(self, ctx: commands.Context) -> None: last_video = await self.get_new_videos()", "696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30))", ") return item.yt_videoid, True async def get_new_streams(self) -> tuple: data", "return elif \"liveStreamingDetails\" not in data.keys(): # A new video", "premiere was never upcoming is now live await self.videos_channel.send( f\"Hey", "data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"}, } ), ) await", "{ \"title\": \"The stream has ended.\", \"description\": \"**Catch you in", "<= 299: return [] if not (twitch_tok := (await response.json())[\"access_token\"]):", "bot async def call_feed(self) -> dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async", "notifications. \"\"\" import datetime as dt import discord import feedparser", "- chron.from_iso(start) try: message = await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden,", "\"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"}, } ),", "\"UPDATE videos SET ContentValue = ? WHERE ContentType = ?\",", ") duration = chron.from_iso(end) - chron.from_iso(start) try: message = await", "\"description\": \"**Catch you in the next one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\":", "not live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere is", "live and we havent announced it yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\"))", "The stream is not live and last we checked it", "long=True)}\"}, } ), ) await self.bot.db.execute( \"UPDATE videos SET ContentValue", "self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC!", "premieres\") } for item in await self.call_feed(): data = await", "299: return [] if not (data := feedparser.parse(await response.text()).entries): return", "# The stream is live and we havent announced it", "@commands.is_owner() async def group_feed(self, ctx: commands.Context) -> None: pass @group_feed.command(name=\"video\")", "CronTrigger from discord.ext import commands from carberretta import Config from", "new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async def command_feed_vod(self, ctx: commands.Context) ->", "DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\":", "never upcoming is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a", "self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\") if (await self.bot.application_info()).id ==", "f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute( \"REPLACE INTO", "} ), ) return data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async", "feedparser from apscheduler.triggers.cron import CronTrigger from discord.ext import commands from", "start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time) if not upcoming and", "on_ready(self) -> None: if not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role", "\"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, } ), )", "await self.call_twitch_api() if data: live_now = await self.bot.db.field(\"SELECT StreamLive FROM", "Twitch feed notifications. \"\"\" import datetime as dt import discord", "None if \"liveStreamingDetails\" in data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time =", "-> None: last_vod = await self.get_new_vods() await ctx.send(f\"Announced VOD: {last_vod}.\"", "\"liveStreamingDetails\" not in data.keys(): # A new video is live", "# The premiere was never upcoming is now live await", "watch!\", embed=discord.Embed.from_dict( { \"title\": \"The stream has ended.\", \"description\": \"**Catch", "(data := await response.json()): return [] return data[\"items\"][0] async def", "= chron.from_iso(start_time) if not upcoming and duration != \"P0D\": #", "(data := await response.json()): return [] return data[\"data\"][0] @commands.Cog.listener() async", "0, ) return item.yt_videoid, False elif live_content == \"live\" and", "None: if not (last_premiere := await self.get_new_premieres()): await ctx.send(\"No new", "= self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube =", "VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async def command_feed_premiere(self, ctx: commands.Context) -> None:", "elif not announced: # A premiere was upcoming, and is", "videos WHERE ContentType = ?\", \"vod\") for item in await", "self.videos_channel.send( f\"Hey {self.vods_role.mention}, a new VOD just went live! Catch", "self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType = ?\", \"video\") for", "return [] if not (data := feedparser.parse(await response.text()).entries): return []", "data[\"items\"][0] async def call_twitch_api(self) -> dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl", "a video we already announced return elif \"liveStreamingDetails\" not in", "self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new video just went live! Come", "{self.streams_role.mention}, I'm live on Twitch now! Come watch!\", embed=discord.Embed.from_dict( {", "{\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute( \"REPLACE", "item.yt_videoid, 1, 1 ) return item.yt_videoid, True async def get_new_streams(self)", "last_vod = await self.get_new_vods() await ctx.send(f\"Announced VOD: {last_vod}.\" if last_vod", "live premiere: {last_premiere[0]}.\" if last_premiere[1] else f\"Announced upcoming premiere: {last_premiere[0]}.\"", "ContentValue = ? WHERE ContentType = ?\", item.yt_videoid, \"video\" )", "), ) return data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async def", "str: current_vid = await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType", "video_id: str) -> dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url)", "ctx.send(f\"Announced VOD: {last_vod}.\" if last_vod else \"No new VODs.\") @group_feed.command(name=\"premiere\")", "[] if not (data := feedparser.parse(await response.text()).entries): return [] return", "async with self.bot.session.get(url) as response: if not 200 <= response.status", "long=True)}\"}, } ), ) await self.bot.db.execute( \"REPLACE INTO premieres (VideoID,", "Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 1 ) return", "item.summary: # This isnt a VOD await self.videos_channel.send( f\"Hey {self.videos_role.mention},", "the last stream!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if", "self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self)", "VALUES (?, ?, ?)\", item.yt_videoid, 1, 0, ) return item.yt_videoid,", "\"description\": f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"},", "{\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"},", "1\", 1, start, message.id, ) return data[\"title\"], False elif not", "[] return data[\"data\"][0] @commands.Cog.listener() async def on_ready(self) -> None: if", ") return data[\"title\"], True @commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async def group_feed(self,", "return item.yt_videoid async def get_new_videos(self) -> str: current_vid = await", "last_video else \"No new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async def command_feed_vod(self,", "self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if current_vod ==", "if not upcoming and duration != \"P0D\": # We have", "if last_stream[1] else f\"Announced stream: {last_stream[0]}.\" ) def setup(bot: commands.Bot)", "# The stream is not live and last we checked", "@group_feed.command(name=\"vod\") @commands.is_owner() async def command_feed_vod(self, ctx: commands.Context) -> None: last_vod", "\"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\":", "get_new_streams(self) -> tuple: data = await self.call_twitch_api() if data: live_now", "ctx: commands.Context) -> None: last_video = await self.get_new_videos() await ctx.send(f\"Announced", "Handles YouTube and Twitch feed notifications. \"\"\" import datetime as", "with self.bot.session.get(url=url, headers=headers) as response: if not 200 <= response.status", "this vod already return elif \"#VOD\" in item.summary: # This", "class Feeds(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot =", "False elif not data[\"is_live\"] and live_now: # The stream is", "{chron.long_date_and_time(scheduled_time)} UTC! Come and join us!\", embed=discord.Embed.from_dict( { \"title\": item.title,", "None: pass @group_feed.command(name=\"video\") @commands.is_owner() async def command_feed_video(self, ctx: commands.Context) ->", "# A new video is live and its was not", "anything you missed from the last stream!\", embed=discord.Embed.from_dict( { \"title\":", "= ?\", \"video\") for item in await self.call_feed(): data =", "elif not data[\"is_live\"] and live_now: # The stream is not", "stream_message, end = await self.bot.db.record( \"SELECT StreamStart, StreamMessage, StreamEnd FROM", "\"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]},", "?\", item.yt_videoid, \"video\" ) return item.yt_videoid async def get_new_premieres(self) ->", "FROM videos WHERE ContentType = ?\", \"video\") for item in", "\"SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID = 1\"", "headers = { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\", } async", "?)\", item.yt_videoid, 1, 0, ) return item.yt_videoid, False elif live_content", "you in the next one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\":", "stream is not live and last we checked it was", "WHERE ContentType = ?\", \"video\") for item in await self.call_feed():", "\"No new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async def command_feed_vod(self, ctx: commands.Context)", "Twitch now! Come watch!\", embed=discord.Embed.from_dict( { \"title\": data[\"title\"], \"description\": f\"**Category:", "just went live! Catch up on anything you missed from", "dict: url = f\"https://api.twitch.tv/helix/search/channels?query=carberratutorials\" oauthurl = f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl)", "\"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\": {\"text\": f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, }", "over) await self.bot.db.execute( \"UPDATE streams SET StreamLive = ?, StreamEnd", "and last we checked it was (stream is over) await", "str: current_vod = await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType", "data[\"is_live\"] and live_now: # The stream is not live and", "ctx: commands.Context) -> None: last_vod = await self.get_new_vods() await ctx.send(f\"Announced", "commands.Bot) -> None: self.bot = bot async def call_feed(self) ->", "return data async def call_yt_api(self, video_id: str) -> dict: url", "{self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute( \"UPDATE videos SET", "= 1\" ) duration = chron.from_iso(end) - chron.from_iso(start) try: message", "@group_feed.command(name=\"video\") @commands.is_owner() async def command_feed_video(self, ctx: commands.Context) -> None: last_video", "data async def call_yt_api(self, video_id: str) -> dict: url =", ":= item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR, \"url\": item.link,", "ended.\", \"description\": \"**Catch you in the next one!**\", \"color\": LIVE_EMBED_COLOUR,", "def command_feed_vod(self, ctx: commands.Context) -> None: last_vod = await self.get_new_vods()", "\"color\": VOD_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]},", "self.bot.db.execute( \"UPDATE streams SET StreamLive = ?, StreamStart = ?,", "last we checked it was (stream is over) await self.bot.db.execute(", "live! Catch up on anything you missed from the last", "self.get_new_streams()): await ctx.send(\"No new streams.\") else: await ctx.send( f\"Stream ended:", "self.get_new_vods() await ctx.send(f\"Announced VOD: {last_vod}.\" if last_vod else \"No new", "else None announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else", "item.yt_videoid, True elif not announced: # A premiere was upcoming,", "self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType = ?\", \"vod\") for", "is a vod we havent announced await self.videos_channel.send( f\"Hey {self.vods_role.mention},", "message = await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException): return else:", "stream has ended.\", \"description\": \"**Catch you in the next one!**\",", "item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\": item.link, \"author\":", "havent announced await self.videos_channel.send( f\"Hey {self.vods_role.mention}, a new VOD just", "a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to", "new streams.\") else: await ctx.send( f\"Stream ended: {last_stream[0]}.\" if last_stream[1]", "Come and join us!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc", "last stream!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc", "and not announced: # This premiere is upcoming and not", "announced: # A premiere was upcoming, and is now live", "as dt import discord import feedparser from apscheduler.triggers.cron import CronTrigger", "self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog(\"YouTube\")", "video: {last_video}.\" if last_video else \"No new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner()", "# This is a vod we havent announced await self.videos_channel.send(", "= data[\"contentDetails\"][\"duration\"] if item.yt_videoid == current_vid: # This is a", "not (data := await response.json()): return [] return data[\"items\"][0] async", "f\"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials\" async with self.bot.session.post(url=oauthurl) as response: if not 200 <=", "response.status <= 299: return [] if not (twitch_tok := (await", "return [] return data[\"data\"][0] @commands.Cog.listener() async def on_ready(self) -> None:", "premiere: {last_premiere[0]}.\" if last_premiere[1] else f\"Announced upcoming premiere: {last_premiere[0]}.\" )", "watch!\", embed=discord.Embed.from_dict( { \"title\": data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR,", "item.yt_videoid, False elif live_content == \"live\" and not upcoming and", "str) -> dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url) as", "not 200 <= response.status <= 299: return [] if not", "500 else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"},", "= known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None if \"liveStreamingDetails\"", "second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45))", "CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async def get_new_vods(self) -> str: current_vod =", "item.yt_videoid async def get_new_videos(self) -> str: current_vid = await self.bot.db.field(\"SELECT", "await self.bot.db.field(\"SELECT ContentValue FROM videos WHERE ContentType = ?\", \"video\")", "(stream is over) await self.bot.db.execute( \"UPDATE streams SET StreamLive =", "f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url) as response: if not 200 <=", "except (discord.NotFound, discord.Forbidden, discord.HTTPException): return else: await message.edit( content=f\"Hey {self.streams_role.mention},", "Come watch!\", embed=discord.Embed.from_dict( { \"title\": data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\", \"color\":", "f\"Announced upcoming premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner() async def command_feed_stream(self,", "bot: commands.Bot) -> None: self.bot = bot async def call_feed(self)", "data = await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"]", "commands.Context) -> None: pass @group_feed.command(name=\"video\") @commands.is_owner() async def command_feed_video(self, ctx:", "import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR = 0x3498DB class", "self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async def get_new_vods(self) -> str: current_vod", "CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\",", "\"P0D\": # We have not seen this premiere before if", "pass @group_feed.command(name=\"video\") @commands.is_owner() async def command_feed_video(self, ctx: commands.Context) -> None:", "data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if item.yt_videoid == current_vid: # This", "self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"] duration =", "item.yt_videoid, True async def get_new_streams(self) -> tuple: data = await", "if item.yt_videoid == current_vid: # This is a video we", "_id: [_upcoming, _announced] for _id, _upcoming, _announced in await self.bot.db.records(\"SELECT", "This is a vod we havent announced await self.videos_channel.send( f\"Hey", "data[\"snippet\"][\"liveBroadcastContent\"] upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None", "VOD_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\": {\"url\": thumbnails[\"maxres\"][\"url\"]}, \"footer\":", "= data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if item.yt_videoid == current_vid: #", "[_upcoming, _announced] for _id, _upcoming, _announced in await self.bot.db.records(\"SELECT *", "@commands.is_owner() async def command_feed_video(self, ctx: commands.Context) -> None: last_video =", "{data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\":", "def __init__(self, bot: commands.Bot) -> None: self.bot = bot async", "data[\"title\"], False elif not data[\"is_live\"] and live_now: # The stream", "premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner() async def command_feed_stream(self, ctx: commands.Context)", "} ), ) await self.bot.db.execute( \"REPLACE INTO premieres (VideoID, Upcoming,", "thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] live_content = data[\"snippet\"][\"liveBroadcastContent\"] upcoming", "on anything you missed from the last stream!\", embed=discord.Embed.from_dict( {", "= data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time) if not upcoming and duration", "premiere is upcoming and not live await self.videos_channel.send( f\"Hey {self.videos_role.mention},", "havent announced it yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await", "embed=discord.Embed.from_dict( { \"title\": data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\":", "1, 1 ) return item.yt_videoid, True async def get_new_streams(self) ->", "await self.get_new_streams()): await ctx.send(\"No new streams.\") else: await ctx.send( f\"Stream", "video we already announced return elif \"liveStreamingDetails\" not in data.keys():", "SET StreamLive = ?, StreamStart = ?, StreamMessage= ? WHERE", "ContentValue FROM videos WHERE ContentType = ?\", \"video\") for item", "?\", item.yt_videoid, \"vod\" ) return item.yt_videoid async def get_new_videos(self) ->", "if not (last_premiere := await self.get_new_premieres()): await ctx.send(\"No new premieres.\")", "live_now: # The stream is not live and last we", "? WHERE ContentType = ?\", item.yt_videoid, \"vod\" ) return item.yt_videoid", "return [] if not (twitch_tok := (await response.json())[\"access_token\"]): return []", "feedparser.parse(await response.text()).entries): return [] return data async def call_yt_api(self, video_id:", "ID = 1\") if data[\"is_live\"] and not live_now: # The", "= { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\", } async with", "= f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url) as response: if not 200", ":= await self.get_new_premieres()): await ctx.send(\"No new premieres.\") else: await ctx.send(", "<= response.status <= 299: return [] if not (data :=", "if data[\"is_live\"] and not live_now: # The stream is live", "ctx: commands.Context) -> None: pass @group_feed.command(name=\"video\") @commands.is_owner() async def command_feed_video(self,", "async def get_new_vods(self) -> str: current_vod = await self.bot.db.field(\"SELECT ContentValue", "{ _id: [_upcoming, _announced] for _id, _upcoming, _announced in await", "def call_feed(self) -> dict: url = f\"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}\" async with self.bot.session.get(url)", "1\", 0, dt.datetime.utcnow() ) start, stream_message, end = await self.bot.db.record(", "if not 200 <= response.status <= 299: return [] if", "in data.keys(): # A new video is live and its", "This isnt a VOD await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new", "StreamLive = ?, StreamEnd = ? WHERE ID = 1\",", "message = await self.videos_channel.send( f\"Hey {self.streams_role.mention}, I'm live on Twitch", "await self.videos_channel.send( f\"Hey {self.vods_role.mention}, a new VOD just went live!", "Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid, 1, 1, )", "known_premieres.keys() else None if \"liveStreamingDetails\" in data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\")", "== 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute=\"*/3\", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute=\"*/3\", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\",", "\"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, } ),", "a premiere if \"#VOD\" not in item.summary: # This isnt", "StreamLive = ?, StreamStart = ?, StreamMessage= ? WHERE ID", "data[\"contentDetails\"][\"duration\"] if current_vod == item.yt_videoid: # We announced this vod", "VOD await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new video just went", "# We have not seen this premiere before if live_content", "announced: # The premiere was never upcoming is now live", "# We announced this vod already return elif \"#VOD\" in", "get_new_premieres(self) -> tuple: known_premieres = { _id: [_upcoming, _announced] for", "message.edit( content=f\"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!\",", "{\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"}, } ), ) await self.bot.db.execute( \"UPDATE", "is live and we havent announced it yet start =", "@commands.group(name=\"feed\", invoke_without_command=True) @commands.is_owner() async def group_feed(self, ctx: commands.Context) -> None:", "\"\"\" import datetime as dt import discord import feedparser from", ") await self.bot.db.execute( \"UPDATE streams SET StreamLive = ?, StreamStart", "?, StreamStart = ?, StreamMessage= ? WHERE ID = 1\",", "ctx.send(\"No new premieres.\") else: await ctx.send( f\"Announced live premiere: {last_premiere[0]}.\"", "self.call_twitch_api() if data: live_now = await self.bot.db.field(\"SELECT StreamLive FROM streams", "if \"#VOD\" not in item.summary: # This isnt a VOD", "{\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, } ), ) return", "from carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR =", "dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with self.bot.session.get(url) as response: if", "import Config from carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF", "ctx.send(f\"Announced video: {last_video}.\" if last_video else \"No new videos.\") @group_feed.command(name=\"vod\")", "there!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc :=", "known_premieres.keys() else None announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys()", "True elif not announced: # A premiere was upcoming, and", "and we havent announced it yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message", "(discord.NotFound, discord.Forbidden, discord.HTTPException): return else: await message.edit( content=f\"Hey {self.streams_role.mention}, I'm", "from the last stream!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc", "async with self.bot.session.get(url=url, headers=headers) as response: if not 200 <=", "We have not seen this premiere before if live_content ==", "response.json()): return [] return data[\"items\"][0] async def call_twitch_api(self) -> dict:", "for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!\", embed=discord.Embed.from_dict( {", "on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!\", embed=discord.Embed.from_dict( { \"title\":", "True async def get_new_streams(self) -> tuple: data = await self.call_twitch_api()", "elif \"liveStreamingDetails\" not in data.keys(): # A new video is", "not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role =", "self.bot.db.execute( \"UPDATE videos SET ContentValue = ? WHERE ContentType =", "\"vod\" ) return item.yt_videoid async def get_new_videos(self) -> str: current_vid", "known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None announced = known_premieres[item.yt_videoid][1]", "self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute=\"*/3\", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute=\"*/3\", second=45)) self.bot.ready.up(self) async def get_new_vods(self)", "desc if len(desc := item.summary) <= 500 else f\"{desc[:500]}...\", \"color\":", "f\"Runtime: {self.youtube.get_duration(duration, long=True)}\"}, } ), ) await self.bot.db.execute( \"UPDATE videos", "call_yt_api(self, video_id: str) -> dict: url = f\"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}\" async with", "?, StreamMessage= ? WHERE ID = 1\", 1, start, message.id,", "0x9146FF VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog): def __init__(self, bot: commands.Bot)", "WHERE ID = 1\" ) duration = chron.from_iso(end) - chron.from_iso(start)", "upcoming, and is now live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a", "{last_vod}.\" if last_vod else \"No new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async", "live_now = await self.bot.db.field(\"SELECT StreamLive FROM streams WHERE ID =", "ctx: commands.Context) -> None: if not (last_premiere := await self.get_new_premieres()):", ":= await response.json()): return [] return data[\"items\"][0] async def call_twitch_api(self)", "out!\", embed=discord.Embed.from_dict( { \"title\": item.title, \"description\": desc if len(desc :=", ") @group_feed.command(name=\"stream\") @commands.is_owner() async def command_feed_stream(self, ctx: commands.Context) -> None:", "went live! Come check it out!\", embed=discord.Embed.from_dict( { \"title\": item.title,", "live and last we checked it was (stream is over)", "not data[\"is_live\"] and live_now: # The stream is not live", "(last_stream := await self.get_new_streams()): await ctx.send(\"No new streams.\") else: await", "if len(desc := item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": DEFAULT_EMBED_COLOUR,", "if current_vod == item.yt_videoid: # We announced this vod already", "not announced: # A premiere was upcoming, and is now", "\"title\": item.title, \"description\": desc if len(desc := item.summary) <= 500", "?\", \"vod\") for item in await self.call_feed(): data = await", "item.yt_videoid, \"vod\" ) return item.yt_videoid async def get_new_videos(self) -> str:", ":= item.summary) <= 500 else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\": item.link,", "if last_video else \"No new videos.\") @group_feed.command(name=\"vod\") @commands.is_owner() async def", "import commands from carberretta import Config from carberretta.utils import DEFAULT_EMBED_COLOUR,", "announced await self.videos_channel.send( f\"Hey {self.vods_role.mention}, a new VOD just went", "end = await self.bot.db.record( \"SELECT StreamStart, StreamMessage, StreamEnd FROM streams", "\"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\":", "The premiere was never upcoming is now live await self.videos_channel.send(", "ended: {last_stream[0]}.\" if last_stream[1] else f\"Announced stream: {last_stream[0]}.\" ) def", "{ \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\", \"Authorization\": f\"Bearer {twitch_tok}\", } async with self.bot.session.get(url=url,", "is upcoming and not live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a", "premiere before if live_content == \"upcoming\" and not announced: #", "in data.keys(): start_time = data[\"liveStreamingDetails\"][\"scheduledStartTime\"].strip(\"Z\") scheduled_time = chron.from_iso(start_time) if not", "\"**Catch you in the next one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\",", "already return elif \"#VOD\" in item.summary: # This is a", "if not (last_stream := await self.get_new_streams()): await ctx.send(\"No new streams.\")", "get_new_videos(self) -> str: current_vid = await self.bot.db.field(\"SELECT ContentValue FROM videos", "1, 0, ) return item.yt_videoid, False elif live_content == \"live\"", "item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails =", "yet start = chron.from_iso(data[\"started_at\"].strip(\"Z\")) message = await self.videos_channel.send( f\"Hey {self.streams_role.mention},", "message.id, ) return data[\"title\"], False elif not data[\"is_live\"] and live_now:", "live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere started on", "ContentValue = ? WHERE ContentType = ?\", item.yt_videoid, \"vod\" )", "one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\":", "DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog):", "next one!**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\":", "group_feed(self, ctx: commands.Context) -> None: pass @group_feed.command(name=\"video\") @commands.is_owner() async def", "feed notifications. \"\"\" import datetime as dt import discord import", "None: last_video = await self.get_new_videos() await ctx.send(f\"Announced video: {last_video}.\" if", "else f\"{desc[:500]}...\", \"color\": VOD_EMBED_COLOUR, \"url\": item.link, \"author\": {\"name\": \"<NAME>\"}, \"image\":", "{self.videos_role.mention}, a new video just went live! Come check it", "== current_vid: # This is a video we already announced", "is over) await self.bot.db.execute( \"UPDATE streams SET StreamLive = ?,", "\"UPDATE streams SET StreamLive = ?, StreamEnd = ? WHERE", "and not live await self.videos_channel.send( f\"Hey {self.videos_role.mention}, a new premiere", "{\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]}, \"footer\": {\"text\": f\"Runtime: {chron.long_delta(duration)}\"}, }", "thumbnails = data[\"snippet\"][\"thumbnails\"] duration = data[\"contentDetails\"][\"duration\"] if item.yt_videoid == current_vid:", "\"footer\": {\"text\": f\"Started: {chron.long_date_and_time(start)} UTC\"}, } ), ) await self.bot.db.execute(", "ID = 1\" ) duration = chron.from_iso(end) - chron.from_iso(start) try:", "\"No new VODs.\") @group_feed.command(name=\"premiere\") @commands.is_owner() async def command_feed_premiere(self, ctx: commands.Context)", "streams.\") else: await ctx.send( f\"Stream ended: {last_stream[0]}.\" if last_stream[1] else", "def get_new_videos(self) -> str: current_vid = await self.bot.db.field(\"SELECT ContentValue FROM", "self.bot.db.record( \"SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID =", "\"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\": \"<NAME>\"}, \"thumbnail\": {\"url\": data[\"thumbnail_url\"]},", "ContentType = ?\", \"vod\") for item in await self.call_feed(): data", "in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails = data[\"snippet\"][\"thumbnails\"]", "ContentType = ?\", \"video\") for item in await self.call_feed(): data", "-> tuple: known_premieres = { _id: [_upcoming, _announced] for _id,", ":= (await response.json())[\"access_token\"]): return [] headers = { \"client-id\": f\"{Config.TWITCH_CLIENT_ID}\",", "item.summary: # This is a vod we havent announced await", "INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)\", item.yt_videoid,", "WHERE ContentType = ?\", item.yt_videoid, \"video\" ) return item.yt_videoid async", "new premieres.\") else: await ctx.send( f\"Announced live premiere: {last_premiere[0]}.\" if", "data[\"title\"], \"description\": f\"**Category: {data['game_name']}**\", \"color\": LIVE_EMBED_COLOUR, \"url\": \"https://www.twitch.tv/carberratutorials\", \"author\": {\"name\":", "announced this vod already return elif \"#VOD\" in item.summary: #", "and live_now: # The stream is not live and last", "and not live_now: # The stream is live and we", "await self.bot.db.records(\"SELECT * FROM premieres\") } for item in await", "streams SET StreamLive = ?, StreamStart = ?, StreamMessage= ?", "upcoming premiere: {last_premiere[0]}.\" ) @group_feed.command(name=\"stream\") @commands.is_owner() async def command_feed_stream(self, ctx:", "{chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!\", embed=discord.Embed.from_dict( { \"title\":", "-> None: if not (last_premiere := await self.get_new_premieres()): await ctx.send(\"No", "def get_new_streams(self) -> tuple: data = await self.call_twitch_api() if data:", "duration = chron.from_iso(end) - chron.from_iso(start) try: message = await self.videos_channel.fetch_message(stream_message)", "self.bot.db.execute( \"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?,", "def get_new_premieres(self) -> tuple: known_premieres = { _id: [_upcoming, _announced]", "was not a premiere if \"#VOD\" not in item.summary: #" ]
[ "data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer =", "self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue", "0 pkt = GDBPacket() buffer_len = len(buffer) while total_bytes_consumed <", "self._target_read_buffer: bytearray = bytearray() def set_connection(self, sock, addr): super().set_connection(sock, addr)", "from __future__ import annotations import logging import socket from typing", "just add it to the buffer and let it be", "method # before adding to the read buffer to simplify", "return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self,", "buffer.append(unescaped) data = data[escape_char_index + 2 :] buffer.extend(data) def _log_rsp_bytes(self,", "Process any left over escapes. if buffer and buffer[-1] ==", "proxy.\"\"\" def __init__(self, target_addr: Tuple[str, int], colorize: bool = False):", "https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__ import annotations import logging", "except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\") self.close() return self._target =", "import annotations import logging import socket from typing import Optional", "no more characters after the escape char, just add it", "it be # processed when more data is received. break", "escape_char_index == len(data): # If there are no more characters", "is received. break if escape_char_index: buffer.extend(data[: escape_char_index - 1]) unescaped", "more data is received. break if escape_char_index: buffer.extend(data[: escape_char_index -", "self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed =", "GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^ 0x20 data = data[1:] escape_char_index", "_on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self,", "self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes):", "log_prefix: str, buffer: bytearray) -> int: total_bytes_consumed = 0 pkt", "socket from typing import Optional from typing import Tuple from", "= data[escape_char_index + 2 :] buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str,", "<<Interrupt request>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue leader", "= data[escape_char_index + 1] ^ 0x20 buffer.append(unescaped) data = data[escape_char_index", "<<ack>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue if buffer[0]", "ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\") self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read,", "target at {self.target_addr}\") try: target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection", "= data[0] ^ 0x20 data = data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR)", "packet {pkt}\") else: logger.info(f\"{log_prefix} Received empty packet\") if len(buffer): logger.debug(", "Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}\" ) buffer =", "^ 0x20 data = data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index", "False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport]", "the buffer and let it be # processed when more", "buffer = buffer[bytes_consumed:] if not bytes_consumed: break total_bytes_consumed += bytes_consumed", "\"\"\"GDB Remote Serial Protocol proxy.\"\"\" def __init__(self, target_addr: Tuple[str, int],", "read buffer to simplify parsing. if not data: return #", "if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed += 1 buffer = buffer[1:]", "if escape_char_index == len(data): # If there are no more", ") buffer = buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer = buffer[bytes_consumed:]", "class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial Protocol proxy.\"\"\" def __init__(self, target_addr:", "<gh_stars>1-10 \"\"\"Provides a GDB logging proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html", "= buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER) if leader > 0:", "if buffer[0] == ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed +=", "+= 1 buffer = buffer[1:] continue if buffer[0] == ord(\"-\"):", "in this method # before adding to the read buffer", "from .packet import GDBPacket from net import ip_transport logger =", "bytearray() self._target_read_buffer: bytearray = bytearray() def set_connection(self, sock, addr): super().set_connection(sock,", "bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if bytes_consumed:", "Serial Protocol proxy.\"\"\" def __init__(self, target_addr: Tuple[str, int], colorize: bool", "len(data): # If there are no more characters after the", "Received packet {pkt}\") else: logger.info(f\"{log_prefix} Received empty packet\") if len(buffer):", "non-leader bytes {buffer[:total_bytes_consumed + leader]}\" ) buffer = buffer[leader:] bytes_consumed", "buffer[0] == ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed += 1", "typing import Tuple from .packet import GDBPacket from net import", "buffer to simplify parsing. if not data: return # Process", "if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray, data:", "return # Process any left over escapes. if buffer and", "len(buffer) while total_bytes_consumed < buffer_len: if buffer[0] == ord(\"+\"): if", "^ 0x20 buffer.append(unescaped) data = data[escape_char_index + 2 :] buffer.extend(data)", "import Tuple from .packet import GDBPacket from net import ip_transport", "logger.info(f\"{log_prefix} Received empty packet\") if len(buffer): logger.debug( f\"{log_prefix} After processing:", "bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:])", "self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\" else: self.target_color = \"\"", "escape char, just add it to the buffer and let", "else: self.target_color = \"\" self.gdb_color = \"\" self._gdb_read_buffer: bytearray =", "RSP uses '}' as an escape character. Escapes are processed", "left over escapes. if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1]", "escape_char_index >= 0: if escape_char_index == len(data): # If there", "bytes_consumed = pkt.parse(buffer) buffer = buffer[bytes_consumed:] if not bytes_consumed: break", "None if colorize: self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\" else:", "- 1]) unescaped = data[escape_char_index + 1] ^ 0x20 buffer.append(unescaped)", "let it be # processed when more data is received.", "colorize: self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\" else: self.target_color =", "buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer = buffer[bytes_consumed:] if not bytes_consumed:", "\"\" self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer: bytearray = bytearray() def", "bytearray, data: bytes): # RSP uses '}' as an escape", "from typing import Optional from typing import Tuple from .packet", "Tuple from .packet import GDBPacket from net import ip_transport logger", "logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial Protocol proxy.\"\"\" def __init__(self,", "net import ip_transport logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote", "bytearray) -> int: total_bytes_consumed = 0 pkt = GDBPacket() buffer_len", "= len(buffer) while total_bytes_consumed < buffer_len: if buffer[0] == ord(\"+\"):", "0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed += 1 buffer = buffer[1:]", "ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed += 1 buffer =", "total_bytes_consumed < buffer_len: if buffer[0] == ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix}", "def _on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def", "= self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod", "buffer[1:] continue if buffer[0] == 0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed", "\"\" self.gdb_color = \"\" self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer: bytearray", "self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def", "_log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int: total_bytes_consumed = 0", "to Target@{self.target_addr} refused.\") self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock,", "== ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed += 1 buffer", "self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer)", "= bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray, data: bytes): # RSP", "self._target: Optional[ip_transport.IPTransport] = None if colorize: self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color", "{pkt}\") else: logger.info(f\"{log_prefix} Received empty packet\") if len(buffer): logger.debug( f\"{log_prefix}", "buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes):", "Optional from typing import Tuple from .packet import GDBPacket from", "= self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer,", "ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed += 1 buffer =", "== ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed += 1 buffer", "logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue if", "= \"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\" else: self.target_color = \"\" self.gdb_color", "socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\") self.close() return self._target", "proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__ import annotations", "self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer: bytearray = bytearray() def set_connection(self,", "== len(data): # If there are no more characters after", "set_connection(self, sock, addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to target at {self.target_addr}\")", "_unescape_and_append(buffer: bytearray, data: bytes): # RSP uses '}' as an", "= None if colorize: self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\"", "= bytearray() def set_connection(self, sock, addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to", "packet\") if len(buffer): logger.debug( f\"{log_prefix} After processing: [{len(buffer)}] {buffer}\" )", "total_bytes_consumed = 0 pkt = GDBPacket() buffer_len = len(buffer) while", "a GDB logging proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from", "_ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data:", "when more data is received. break if escape_char_index: buffer.extend(data[: escape_char_index", "buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int: total_bytes_consumed", "-> int: total_bytes_consumed = 0 pkt = GDBPacket() buffer_len =", "self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB", "bytearray = bytearray() self._target_read_buffer: bytearray = bytearray() def set_connection(self, sock,", "buffer_len: if buffer[0] == ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed", "self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed", "continue if buffer[0] == ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed", "= \"\" self.gdb_color = \"\" self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer:", "data = data[escape_char_index + 2 :] buffer.extend(data) def _log_rsp_bytes(self, log_prefix:", "sock, addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to target at {self.target_addr}\") try:", "any left over escapes. if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR:", "this method # before adding to the read buffer to", "_append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer)", "break if escape_char_index: buffer.extend(data[: escape_char_index - 1]) unescaped = data[escape_char_index", "== 0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed += 1 buffer =", "to the read buffer to simplify parsing. if not data:", "and let it be # processed when more data is", "> 0: logger.warning( f\"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed +", "if not bytes_consumed: break total_bytes_consumed += bytes_consumed if pkt.data: logger.info(f\"{log_prefix}", "target_addr self._target: Optional[ip_transport.IPTransport] = None if colorize: self.target_color = \"\\x1b[34m\\x1b[47m\"", "self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer)", "from typing import Tuple from .packet import GDBPacket from net", "= self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer =", "@staticmethod def _unescape_and_append(buffer: bytearray, data: bytes): # RSP uses '}'", "+ 2 :] buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray)", "buffer = buffer[1:] continue if buffer[0] == ord(\"-\"): if self.log_acks:", "= self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def", "bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if bytes_consumed:", "character. Escapes are processed in this method # before adding", "bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray, data: bytes):", "= False self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport] = None if", "total_bytes_consumed += 1 buffer = buffer[1:] continue if buffer[0] ==", "== GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^ 0x20 data = data[1:]", "{buffer[:total_bytes_consumed + leader]}\" ) buffer = buffer[leader:] bytes_consumed = pkt.parse(buffer)", "add it to the buffer and let it be #", "# Process any left over escapes. if buffer and buffer[-1]", "bytes {buffer[:total_bytes_consumed + leader]}\" ) buffer = buffer[leader:] bytes_consumed =", "self.gdb_color = \"\" self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer: bytearray =", "= GDBPacket() buffer_len = len(buffer) while total_bytes_consumed < buffer_len: if", "an escape character. Escapes are processed in this method #", "simplify parsing. if not data: return # Process any left", ":\", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data:", "0x20 data = data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >=", "logging proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__ import", "self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray, data: bytes): #", "= bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed =", "request>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue leader =", "typing import Optional from typing import Tuple from .packet import", "logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial Protocol proxy.\"\"\"", "self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer))", "Tuple[str, int], colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False", "= False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr = target_addr self._target:", "from net import ip_transport logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB", "data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if", "if not data: return # Process any left over escapes.", "buffer = buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer = buffer[bytes_consumed:] if", "data is received. break if escape_char_index: buffer.extend(data[: escape_char_index - 1])", "_ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored):", "def __init__(self, target_addr: Tuple[str, int], colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read)", "False self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport] = None if colorize:", "bytearray() def set_connection(self, sock, addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to target", "self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored):", "self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed", "\"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\" else: self.target_color = \"\" self.gdb_color =", "ip_transport logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial Protocol", "<<nack>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue if buffer[0]", "processed when more data is received. break if escape_char_index: buffer.extend(data[:", "len(buffer): logger.debug( f\"{log_prefix} After processing: [{len(buffer)}] {buffer}\" ) return total_bytes_consumed", "buffer[-1] = data[0] ^ 0x20 data = data[1:] escape_char_index =", ":\", self._target_read_buffer) if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer:", "import ip_transport logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial", "buffer = buffer[1:] continue if buffer[0] == 0x03: logger.info(f\"{log_prefix} <<Interrupt", "self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data)", "be # processed when more data is received. break if", "else: logger.info(f\"{log_prefix} Received empty packet\") if len(buffer): logger.debug( f\"{log_prefix} After", "1 buffer = buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER) if leader", "colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr =", "total_bytes_consumed += 1 buffer = buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER)", "escape character. Escapes are processed in this method # before", "adding to the read buffer to simplify parsing. if not", "target_addr: Tuple[str, int], colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks =", "logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue", "bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr = target_addr", "leader > 0: logger.warning( f\"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed", ".packet import GDBPacket from net import ip_transport logger = logging.getLogger(__name__)", "_on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self,", "parsing. if not data: return # Process any left over", "if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^", "if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed += 1 buffer = buffer[1:]", "bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray, data: bytes): # RSP uses", "= buffer[bytes_consumed:] if not bytes_consumed: break total_bytes_consumed += bytes_consumed if", "= ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer", "characters after the escape char, just add it to the", "if buffer[0] == 0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed += 1", "Remote Serial Protocol proxy.\"\"\" def __init__(self, target_addr: Tuple[str, int], colorize:", "1 buffer = buffer[1:] continue if buffer[0] == 0x03: logger.info(f\"{log_prefix}", "Escapes are processed in this method # before adding to", "received. break if escape_char_index: buffer.extend(data[: escape_char_index - 1]) unescaped =", "+ 1] ^ 0x20 buffer.append(unescaped) data = data[escape_char_index + 2", "def _on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def", "1]) unescaped = data[escape_char_index + 1] ^ 0x20 buffer.append(unescaped) data", "+ leader]}\" ) buffer = buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer", "= \"\" self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer: bytearray = bytearray()", "bytes_consumed: break total_bytes_consumed += bytes_consumed if pkt.data: logger.info(f\"{log_prefix} Received packet", "data[escape_char_index + 1] ^ 0x20 buffer.append(unescaped) data = data[escape_char_index +", "logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\") self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\")", "total_bytes_consumed += bytes_consumed if pkt.data: logger.info(f\"{log_prefix} Received packet {pkt}\") else:", "logger.info(f\"{log_prefix} Received packet {pkt}\") else: logger.info(f\"{log_prefix} Received empty packet\") if", "buffer: bytearray) -> int: total_bytes_consumed = 0 pkt = GDBPacket()", "1 buffer = buffer[1:] continue if buffer[0] == ord(\"-\"): if", "if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer,", "data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if bytes_consumed: self._target_read_buffer =", "data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0: if escape_char_index == len(data): #", "import GDBPacket from net import ip_transport logger = logging.getLogger(__name__) class", "data = data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0:", "self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self,", "refused.\") self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target)", "buffer[1:] continue if buffer[0] == ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\")", "__init__(self, target_addr: Tuple[str, int], colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks", "ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer =", "data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if", "the read buffer to simplify parsing. if not data: return", "0: if escape_char_index == len(data): # If there are no", "escape_char_index - 1]) unescaped = data[escape_char_index + 1] ^ 0x20", "+= 1 buffer = buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER) if", "self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer))", "it to the buffer and let it be # processed", "buffer[0] == 0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed += 1 buffer", "annotations import logging import socket from typing import Optional from", "uses '}' as an escape character. Escapes are processed in", "buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER) if leader > 0: logger.warning(", "{leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}\" ) buffer = buffer[leader:]", "there are no more characters after the escape char, just", "def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int: total_bytes_consumed =", "while total_bytes_consumed < buffer_len: if buffer[0] == ord(\"+\"): if self.log_acks:", "__future__ import annotations import logging import socket from typing import", "# RSP uses '}' as an escape character. Escapes are", "= socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\") self.close() return", "break total_bytes_consumed += bytes_consumed if pkt.data: logger.info(f\"{log_prefix} Received packet {pkt}\")", "= pkt.parse(buffer) buffer = buffer[bytes_consumed:] if not bytes_consumed: break total_bytes_consumed", "pkt = GDBPacket() buffer_len = len(buffer) while total_bytes_consumed < buffer_len:", "leader]}\" ) buffer = buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer =", "after the escape char, just add it to the buffer", "addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to target at {self.target_addr}\") try: target_sock", "def set_connection(self, sock, addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to target at", "to simplify parsing. if not data: return # Process any", "buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer", "= bytearray() self._target_read_buffer: bytearray = bytearray() def set_connection(self, sock, addr):", "self._target_read_buffer) if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray,", "Protocol proxy.\"\"\" def __init__(self, target_addr: Tuple[str, int], colorize: bool =", ">= 0: if escape_char_index == len(data): # If there are", "= buffer[1:] continue if buffer[0] == 0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\")", "while escape_char_index >= 0: if escape_char_index == len(data): # If", "buffer and let it be # processed when more data", "logger.debug(f\"{self.target_color}Connecting to target at {self.target_addr}\") try: target_sock = socket.create_connection(self.target_addr) except", "1] ^ 0x20 buffer.append(unescaped) data = data[escape_char_index + 2 :]", "self.gdb_color = \"\\x1b[30m\\x1b[47m\" else: self.target_color = \"\" self.gdb_color = \"\"", "= target_addr self._target: Optional[ip_transport.IPTransport] = None if colorize: self.target_color =", "self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer", "bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data)", "data[escape_char_index + 2 :] buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str, buffer:", "super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport] =", "str, buffer: bytearray) -> int: total_bytes_consumed = 0 pkt =", "data[0] ^ 0x20 data = data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while", "char, just add it to the buffer and let it", "data: bytes): # RSP uses '}' as an escape character.", "processed in this method # before adding to the read", "if buffer[0] == ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed +=", "before adding to the read buffer to simplify parsing. if", "data: return # Process any left over escapes. if buffer", "if len(buffer): logger.debug( f\"{log_prefix} After processing: [{len(buffer)}] {buffer}\" ) return", "import logging import socket from typing import Optional from typing", "self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer)", "self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer)", "= 0 pkt = GDBPacket() buffer_len = len(buffer) while total_bytes_consumed", "not bytes_consumed: break total_bytes_consumed += bytes_consumed if pkt.data: logger.info(f\"{log_prefix} Received", "\"\"\" from __future__ import annotations import logging import socket from", "See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__ import annotations import logging import", "buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^ 0x20", "empty packet\") if len(buffer): logger.debug( f\"{log_prefix} After processing: [{len(buffer)}] {buffer}\"", "bytes): # RSP uses '}' as an escape character. Escapes", "pkt.data: logger.info(f\"{log_prefix} Received packet {pkt}\") else: logger.info(f\"{log_prefix} Received empty packet\")", "def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\",", "self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer", "at {self.target_addr}\") try: target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to", "GDBPacket() buffer_len = len(buffer) while total_bytes_consumed < buffer_len: if buffer[0]", "_append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer)", "try: target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\")", "f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer", "self.target_color = \"\" self.gdb_color = \"\" self._gdb_read_buffer: bytearray = bytearray()", "buffer = buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER) if leader >", "import socket from typing import Optional from typing import Tuple", "+= 1 buffer = buffer[1:] continue if buffer[0] == 0x03:", "as an escape character. Escapes are processed in this method", "# If there are no more characters after the escape", "# processed when more data is received. break if escape_char_index:", "= \"\\x1b[30m\\x1b[47m\" else: self.target_color = \"\" self.gdb_color = \"\" self._gdb_read_buffer:", "Optional[ip_transport.IPTransport] = None if colorize: self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color =", "if pkt.data: logger.info(f\"{log_prefix} Received packet {pkt}\") else: logger.info(f\"{log_prefix} Received empty", "import Optional from typing import Tuple from .packet import GDBPacket", "= buffer[1:] continue if buffer[0] == ord(\"-\"): if self.log_acks: logger.info(f\"{log_prefix}", "f\"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}\" ) buffer", "2 :] buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) ->", "+= bytes_consumed if pkt.data: logger.info(f\"{log_prefix} Received packet {pkt}\") else: logger.info(f\"{log_prefix}", "bytes_consumed if pkt.data: logger.info(f\"{log_prefix} Received packet {pkt}\") else: logger.info(f\"{log_prefix} Received", "buffer_len = len(buffer) while total_bytes_consumed < buffer_len: if buffer[0] ==", "buffer[bytes_consumed:] if not bytes_consumed: break total_bytes_consumed += bytes_consumed if pkt.data:", "https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__ import annotations import logging import socket", "0: logger.warning( f\"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}\"", "buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^ 0x20 data =", "buffer.find(GDBPacket.PACKET_LEADER) if leader > 0: logger.warning( f\"{log_prefix} Skipping {leader} non-leader", "self.log_acks: logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue", "logger.warning( f\"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}\" )", "def _unescape_and_append(buffer: bytearray, data: bytes): # RSP uses '}' as", "the escape char, just add it to the buffer and", "more characters after the escape char, just add it to", "0x20 buffer.append(unescaped) data = data[escape_char_index + 2 :] buffer.extend(data) def", "are no more characters after the escape char, just add", "unescaped = data[escape_char_index + 1] ^ 0x20 buffer.append(unescaped) data =", "escapes. if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0]", "logging import socket from typing import Optional from typing import", "target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr} refused.\") self.close()", "Received empty packet\") if len(buffer): logger.debug( f\"{log_prefix} After processing: [{len(buffer)}]", "self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def", "super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting to target at {self.target_addr}\") try: target_sock =", "not data: return # Process any left over escapes. if", "If there are no more characters after the escape char,", "self.log_acks = False self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport] = None", "escape_char_index: buffer.extend(data[: escape_char_index - 1]) unescaped = data[escape_char_index + 1]", "buffer.extend(data[: escape_char_index - 1]) unescaped = data[escape_char_index + 1] ^", "if colorize: self.target_color = \"\\x1b[34m\\x1b[47m\" self.gdb_color = \"\\x1b[30m\\x1b[47m\" else: self.target_color", "self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if bytes_consumed: self._target_read_buffer", "and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^ 0x20 data", "logger.info(f\"{log_prefix} <<nack>>\") total_bytes_consumed += 1 buffer = buffer[1:] continue if", "\"\\x1b[30m\\x1b[47m\" else: self.target_color = \"\" self.gdb_color = \"\" self._gdb_read_buffer: bytearray", "= data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0: if escape_char_index == len(data):", "leader = buffer.find(GDBPacket.PACKET_LEADER) if leader > 0: logger.warning( f\"{log_prefix} Skipping", "if leader > 0: logger.warning( f\"{log_prefix} Skipping {leader} non-leader bytes", "Target@{self.target_addr} refused.\") self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f\"Target@{self.target_addr}\") self._target.set_connection(target_sock, self.target_addr)", "data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0: if escape_char_index", "bytearray = bytearray() def set_connection(self, sock, addr): super().set_connection(sock, addr) logger.debug(f\"{self.target_color}Connecting", "'}' as an escape character. Escapes are processed in this", "to the buffer and let it be # processed when", "if escape_char_index: buffer.extend(data[: escape_char_index - 1]) unescaped = data[escape_char_index +", "pkt.parse(buffer) buffer = buffer[bytes_consumed:] if not bytes_consumed: break total_bytes_consumed +=", "continue if buffer[0] == 0x03: logger.info(f\"{log_prefix} <<Interrupt request>>\") total_bytes_consumed +=", "GDBPacket from net import ip_transport logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport):", "continue leader = buffer.find(GDBPacket.PACKET_LEADER) if leader > 0: logger.warning( f\"{log_prefix}", "are processed in this method # before adding to the", ":] buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int:", "# before adding to the read buffer to simplify parsing.", "escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0: if escape_char_index ==", "= buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer = buffer[bytes_consumed:] if not", "= data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0: if", "See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__ import annotations import", "= logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial Protocol proxy.\"\"\" def", "self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport] = None if colorize: self.target_color", "= buffer.find(GDBPacket.PACKET_LEADER) if leader > 0: logger.warning( f\"{log_prefix} Skipping {leader}", "int: total_bytes_consumed = 0 pkt = GDBPacket() buffer_len = len(buffer)", "buffer[0] == ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\") total_bytes_consumed += 1", "addr) logger.debug(f\"{self.target_color}Connecting to target at {self.target_addr}\") try: target_sock = socket.create_connection(self.target_addr)", "GDBProxy(ip_transport.IPTransport): \"\"\"GDB Remote Serial Protocol proxy.\"\"\" def __init__(self, target_addr: Tuple[str,", "def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.gdb_color}GDB :\",", "\"\"\"Provides a GDB logging proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\"", "to target at {self.target_addr}\") try: target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError:", "int], colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr", "over escapes. if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] =", "bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET :\", self._target_read_buffer) if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:])", "GDB logging proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html \"\"\" from __future__", "bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f\"{self.target_color}TARGET", "{self.target_addr}\") try: target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f\"{self.target_color}Connection to Target@{self.target_addr}", "< buffer_len: if buffer[0] == ord(\"+\"): if self.log_acks: logger.info(f\"{log_prefix} <<ack>>\")" ]
[ "= Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd) #C.add(patch)", "S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere = Matter('sphere') #patch = Matter('patch')", "Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc)", "simulation # # FDTD Simulation Setting # F = FDTD()", "# # CSX (Geometry setting) # C = CSX() #", "#dump = DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump)", "A simple simulation # # FDTD Simulation Setting # F", "C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd =", "#patch = Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1", "# CSX (Geometry setting) # C = CSX() # The", "import * # A simple simulation # # FDTD Simulation", "C = CSX() # The Box is added as a", "= Matter('gnd') #sphere = Matter('sphere') #patch = Matter('patch') #substrate =", "# # FDTD Simulation Setting # F = FDTD() F.add(Exc(typ='Sinus',f0=100000))", "#C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2) #C.set('Et',b4) #C.save(filename='structure.xml')", "#C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2) #C.set('Et',b4) #C.save(filename='structure.xml') ##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10) ##C.AddCylinder(prop='Metal',name='cyl0',P1=[0,0,0],P2=[0,0,100],Rad=50,Pri=10) #", "F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry setting) # C =", "#gnd = Matter('gnd') #sphere = Matter('sphere') #patch = Matter('patch') #substrate", "is added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S", "Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox()", "#b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1", "= OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere = Matter('sphere') #patch", "#sphere = Matter('sphere') #patch = Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht", "#s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere)", "Setting # F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX", "setting) # C = CSX() # The Box is added", "OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere = Matter('sphere') #patch =", "= FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry setting) #", "# FDTD Simulation Setting # F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR']))", "= Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 =", "FDTD Simulation Setting # F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) #", "#C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2) #C.set('Et',b4) #C.save(filename='structure.xml') ##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10) ##C.AddCylinder(prop='Metal',name='cyl0',P1=[0,0,0],P2=[0,0,100],Rad=50,Pri=10)", "a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml')", "#C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1)", "#b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd)", "CSX() # The Box is added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0))", "# C = CSX() # The Box is added as", "= Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 =", "The Box is added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1)))", "F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry setting)", "= Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 =", "Matter('sphere') #patch = Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\")", "Matter('gnd') #sphere = Matter('sphere') #patch = Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\")", "#b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump", "#substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2", "#cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4", "DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2)", "# F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry", "= DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1)", "<filename>pylayers/em/openems/test/Rect_Waveguide.py<gh_stars>100-1000 from openems.openems import * # A simple simulation #", "CSX (Geometry setting) # C = CSX() # The Box", "#C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2) #C.set('Et',b4)", "F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry setting) # C = CSX()", "# The Box is added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0))", "C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere", "* # A simple simulation # # FDTD Simulation Setting", "openems.openems import * # A simple simulation # # FDTD", "= Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht)", "C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere =", "Simulation Setting # F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # #", "Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50)", "= Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump =", "Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)", "= Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 =", "Box is added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron())", "Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0)", "from openems.openems import * # A simple simulation # #", "as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C)", "S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere = Matter('sphere')", "# A simple simulation # # FDTD Simulation Setting #", "FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry setting) # C", "= Matter('sphere') #patch = Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht =", "(Geometry setting) # C = CSX() # The Box is", "added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S =", "= CSX() # The Box is added as a property", "property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd", "C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd')", "Matter('substrate',typ='Ma',Epsilon=\"3.38\",Kappa=\"0.00046\") #cdgsht = Matter('copper',typ='Cs',conductivity=\"56e6\",thickness=\"40e-6\") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10)", "#C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2) #C.set('Et',b4) #C.save(filename='structure.xml') ##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10)", "#C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1)", "Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate)", "simple simulation # # FDTD Simulation Setting # F =", "#C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2)" ]
[ "= Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile, axis = 0) bigger_mask", "= (Y > Y_quantile).copy() smaller_mask = (Y <= Y_quantile).copy() Y[bigger_mask]", "params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\") print(\"segmenting Y\") Y = Y.values.reshape(-1) Y_quantile", "0) bigger_mask = (Y > Y_quantile).copy() smaller_mask = (Y <=", "= params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\") print(\"segmenting Y\") Y = Y.values.reshape(-1)", "(Y <= Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask] = 0 Y", "np def segment_Y(Y, **params): Y_segments = params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\")", "> Y_quantile).copy() smaller_mask = (Y <= Y_quantile).copy() Y[bigger_mask] = 1", "axis = 0) bigger_mask = (Y > Y_quantile).copy() smaller_mask =", "= 0) bigger_mask = (Y > Y_quantile).copy() smaller_mask = (Y", "Y_quantile, axis = 0) bigger_mask = (Y > Y_quantile).copy() smaller_mask", "**params): Y_segments = params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\") print(\"segmenting Y\") Y", "smaller_mask = (Y <= Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask] =", "Y_quantile).copy() smaller_mask = (Y <= Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask]", "def segment_Y(Y, **params): Y_segments = params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\") print(\"segmenting", "Y\") Y = Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile, axis =", "Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile, axis = 0) bigger_mask =", "np.quantile(Y, Y_quantile, axis = 0) bigger_mask = (Y > Y_quantile).copy()", "segment_Y(Y, **params): Y_segments = params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\") print(\"segmenting Y\")", "= np.quantile(Y, Y_quantile, axis = 0) bigger_mask = (Y >", "= (Y <= Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask] = 0", "Y_segments = params.get(\"Y_segments\") Y_quantile = params.get(\"Y_quantile\") print(\"segmenting Y\") Y =", "= params.get(\"Y_quantile\") print(\"segmenting Y\") Y = Y.values.reshape(-1) Y_quantile = np.quantile(Y,", "print(\"segmenting Y\") Y = Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile, axis", "bigger_mask = (Y > Y_quantile).copy() smaller_mask = (Y <= Y_quantile).copy()", "Y = Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile, axis = 0)", "params.get(\"Y_quantile\") print(\"segmenting Y\") Y = Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile,", "import numpy as np def segment_Y(Y, **params): Y_segments = params.get(\"Y_segments\")", "Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask] = 0 Y = Y.astype(int)", "<= Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask] = 0 Y =", "as np def segment_Y(Y, **params): Y_segments = params.get(\"Y_segments\") Y_quantile =", "numpy as np def segment_Y(Y, **params): Y_segments = params.get(\"Y_segments\") Y_quantile", "Y_quantile = params.get(\"Y_quantile\") print(\"segmenting Y\") Y = Y.values.reshape(-1) Y_quantile =", "= 1 Y[smaller_mask] = 0 Y = Y.astype(int) return Y", "Y_quantile = np.quantile(Y, Y_quantile, axis = 0) bigger_mask = (Y", "(Y > Y_quantile).copy() smaller_mask = (Y <= Y_quantile).copy() Y[bigger_mask] =", "Y[bigger_mask] = 1 Y[smaller_mask] = 0 Y = Y.astype(int) return" ]
[ "print(\"PROPOSITION 4 : \", proposition4) print(\"REPONSE : \", reponse) else:", "\") print(\"*******************************************************************************\") print(\"ENONCE : \", result[0]) print(\"PROPOSITION 1 : \",", "cursor = conn.cursor() #EXECUTER LA REQUETE AVEC LA BDD query", "rows = cursor.fetchall() if rows: nb_rows = len(rows) num_question =", "= line[2] proposition2 = line[3] proposition3 = line[4] proposition4 =", "\".format(i) num_prop = ''.join(num_prop) line = ''.join(question[i]) line = num_prop", "line = ''.join(question[i]) line = num_prop + line synthetize_voice(line) delete_wav()", "\", result[4]) print(\"REPONSE : \", result[5]) #complete_question = ''.join(complete_question) #Convert", "while(counter <= 5): questionAI(1) if (__name__ == '__main__'): result =", "line[2] proposition2 = line[3] proposition3 = line[4] proposition4 = line[5]", "AVEC LA BDD query = (\"SELECT * FROM Question INNER", "DES INFORMATIONS rows = cursor.fetchall() if rows: nb_rows = len(rows)", "else: print(\"Ce thème ne contient pas de questions\") def questionAI(id_theme):", "the query results #RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4])", "contient pas de questions\") def questionAI(id_theme): i = 0 #CONNEXION", "print(\"*******************************************************************************\") print(\" QUESTION \",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", enonce)", "\", result[2]) print(\"PROPOSITION 3 : \", result[3]) print(\"PROPOSITION 4 :", "= (\"SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION", "result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last one is the answer", "proposition4) print(\"REPONSE : \", reponse) else: print(\"Ce thème ne contient", "REQUETE AVEC LA BDD query = (\"SELECT * FROM Question", "\", result[3]) print(\"PROPOSITION 4 : \", result[4]) print(\"REPONSE : \",", "{} \".format(i) num_prop = ''.join(num_prop) line = ''.join(question[i]) line =", "\", enonce) print(\"PROPOSITION 1 : \", proposition1) print(\"PROPOSITION 2 :", "i in range(1,5) : num_prop = \"Proposition {} \".format(i) num_prop", "delete_wav() def quiz(): counter = 1 while(counter <= 5): questionAI(1)", "cursor.fetchall() if rows: nb_rows = len(rows) num_question = random.randint(1, nb_rows)", "BDD conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor = conn.cursor()", "de questions\") def tell_question(question): synthetize_voice(question[0]) for i in range(1,5) :", "password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor = conn.cursor() #EXECUTER LA REQUETE AVEC LA", "5): questionAI(1) if (__name__ == '__main__'): result = questionAI(1) tell_question(result)", ": \", proposition2) print(\"PROPOSITION 3 : \", proposition3) print(\"PROPOSITION 4", "print(\"PROPOSITION 4 : \", result[4]) print(\"REPONSE : \", result[5]) #complete_question", "line[5] reponse = line[5] print(\"*******************************************************************************\") print(\" QUESTION \",i,\" \") print(\"*******************************************************************************\")", ": \", proposition4) print(\"REPONSE : \", reponse) else: print(\"Ce thème", "= [] #Tab which stores the query results #RECUPERATION DES", "reponse = line[5] print(\"*******************************************************************************\") print(\" QUESTION \",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE", "print(\"Ce thème ne contient pas de questions\") def questionAI(id_theme): i", "= random.randint(1, nb_rows) #L'index de la liste commence à zéro,", "il faut donc décaler d'un le numéro num_question = num_question", "themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query, (id_theme, ))", "rows[num_question] result = [] #Tab which stores the query results", "i = 0 #CONNEXION A LA BDD conn = mysql.connector.connect(host=\"localhost\",", "2 : \", proposition2) print(\"PROPOSITION 3 : \", proposition3) print(\"PROPOSITION", "result = [] #Tab which stores the query results #RECUPERATION", "= ''.join(complete_question) #Convert tuple into string return result else: print(\"Ce", "counter = 1 while(counter <= 5): questionAI(1) if (__name__ ==", "in rows: i += 1 enonce = line[1] proposition1 =", "is the answer print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE", "thème ne contient pas de questions\") def questionAI(id_theme): i =", ": \", result[0]) print(\"PROPOSITION 1 : \", result[1]) print(\"PROPOSITION 2", "- 1 question = rows[num_question] result = [] #Tab which", ")) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows: nb_rows", "user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor = conn.cursor() #EXECUTER LA REQUETE AVEC", "print(\"REPONSE : \", result[5]) #complete_question = ''.join(complete_question) #Convert tuple into", "in range(1,5) : num_prop = \"Proposition {} \".format(i) num_prop =", "import synthetize_voice, delete_wav def AllQuestionAI(id_theme): i = 0 #CONNEXION A", "proposition1 = line[2] proposition2 = line[3] proposition3 = line[4] proposition4", "enonce) print(\"PROPOSITION 1 : \", proposition1) print(\"PROPOSITION 2 : \",", "into string return result else: print(\"Ce thème ne contient pas", "cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if", "1 while(counter <= 5): questionAI(1) if (__name__ == '__main__'): result", "= line[5] print(\"*******************************************************************************\") print(\" QUESTION \",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE :", "\", result[5]) #complete_question = ''.join(complete_question) #Convert tuple into string return", "INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query,", "questionAI(id_theme): i = 0 #CONNEXION A LA BDD conn =", "BDD query = (\"SELECT * FROM Question INNER JOIN themes_questions", "INFORMATIONS rows = cursor.fetchall() if rows: for line in rows:", "proposition3) print(\"PROPOSITION 4 : \", proposition4) print(\"REPONSE : \", reponse)", "4 : \", proposition4) print(\"REPONSE : \", reponse) else: print(\"Ce", "#L'index de la liste commence à zéro, il faut donc", "question = rows[num_question] result = [] #Tab which stores the", "\",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", enonce) print(\"PROPOSITION 1 :", "proposition2 = line[3] proposition3 = line[4] proposition4 = line[5] reponse", "result[2]) print(\"PROPOSITION 3 : \", result[3]) print(\"PROPOSITION 4 : \",", "= rows[num_question] result = [] #Tab which stores the query", "tell_question(question): synthetize_voice(question[0]) for i in range(1,5) : num_prop = \"Proposition", "de questions\") def questionAI(id_theme): i = 0 #CONNEXION A LA", "synthetize_voice(question[0]) for i in range(1,5) : num_prop = \"Proposition {}", "result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last one is the answer print(\"*******************************************************************************\")", "result[1]) print(\"PROPOSITION 2 : \", result[2]) print(\"PROPOSITION 3 : \",", "results #RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5])", "= cursor.fetchall() if rows: nb_rows = len(rows) num_question = random.randint(1,", "print(\"PROPOSITION 2 : \", result[2]) print(\"PROPOSITION 3 : \", result[3])", "contient pas de questions\") def tell_question(question): synthetize_voice(question[0]) for i in", "ID_THEME=%s\") cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall()", ": \", reponse) else: print(\"Ce thème ne contient pas de", "result.append(question[5]) #This last one is the answer print(\"*******************************************************************************\") print(\" QUESTION", ": \", result[3]) print(\"PROPOSITION 4 : \", result[4]) print(\"REPONSE :", "database=\"Puzzlebox\") cursor = conn.cursor() #EXECUTER LA REQUETE AVEC LA BDD", "random from voice import synthetize_voice, delete_wav def AllQuestionAI(id_theme): i =", "result.append(question[5]) result.append(question[5]) #This last one is the answer print(\"*******************************************************************************\") print(\"", "print(\" QUESTION \",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", enonce) print(\"PROPOSITION", "line synthetize_voice(line) delete_wav() def quiz(): counter = 1 while(counter <=", "LA BDD conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor =", "LA BDD query = (\"SELECT * FROM Question INNER JOIN", "* FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION", "conn.cursor() #EXECUTER LA REQUETE AVEC LA BDD query = (\"SELECT", "d'un le numéro num_question = num_question - 1 question =", "3 : \", result[3]) print(\"PROPOSITION 4 : \", result[4]) print(\"REPONSE", ": \", result[5]) #complete_question = ''.join(complete_question) #Convert tuple into string", "#RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This", "\", result[1]) print(\"PROPOSITION 2 : \", result[2]) print(\"PROPOSITION 3 :", "print(\"PROPOSITION 2 : \", proposition2) print(\"PROPOSITION 3 : \", proposition3)", ": \", result[1]) print(\"PROPOSITION 2 : \", result[2]) print(\"PROPOSITION 3", "range(1,5) : num_prop = \"Proposition {} \".format(i) num_prop = ''.join(num_prop)", "1 enonce = line[1] proposition1 = line[2] proposition2 = line[3]", "= num_question - 1 question = rows[num_question] result = []", "themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS rows", "num_question - 1 question = rows[num_question] result = [] #Tab", ": num_prop = \"Proposition {} \".format(i) num_prop = ''.join(num_prop) line", "from voice import synthetize_voice, delete_wav def AllQuestionAI(id_theme): i = 0", "line[1] proposition1 = line[2] proposition2 = line[3] proposition3 = line[4]", "\", proposition2) print(\"PROPOSITION 3 : \", proposition3) print(\"PROPOSITION 4 :", "#complete_question = ''.join(complete_question) #Convert tuple into string return result else:", "line in rows: i += 1 enonce = line[1] proposition1", "line = num_prop + line synthetize_voice(line) delete_wav() def quiz(): counter", "1 : \", result[1]) print(\"PROPOSITION 2 : \", result[2]) print(\"PROPOSITION", "''.join(complete_question) #Convert tuple into string return result else: print(\"Ce thème", "la liste commence à zéro, il faut donc décaler d'un", "liste commence à zéro, il faut donc décaler d'un le", "random.randint(1, nb_rows) #L'index de la liste commence à zéro, il", "WHERE ID_THEME=%s\") cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS rows =", "def questionAI(id_theme): i = 0 #CONNEXION A LA BDD conn", ": \", result[2]) print(\"PROPOSITION 3 : \", result[3]) print(\"PROPOSITION 4", "''.join(question[i]) line = num_prop + line synthetize_voice(line) delete_wav() def quiz():", "len(rows) num_question = random.randint(1, nb_rows) #L'index de la liste commence", "INFORMATIONS rows = cursor.fetchall() if rows: nb_rows = len(rows) num_question", "décaler d'un le numéro num_question = num_question - 1 question", "TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last one", "ne contient pas de questions\") def tell_question(question): synthetize_voice(question[0]) for i", "result[4]) print(\"REPONSE : \", result[5]) #complete_question = ''.join(complete_question) #Convert tuple", "print(\"PROPOSITION 1 : \", proposition1) print(\"PROPOSITION 2 : \", proposition2)", "one is the answer print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\")", "questions\") def tell_question(question): synthetize_voice(question[0]) for i in range(1,5) : num_prop", "AllQuestionAI(id_theme): i = 0 #CONNEXION A LA BDD conn =", "tuple into string return result else: print(\"Ce thème ne contient", "= ''.join(question[i]) line = num_prop + line synthetize_voice(line) delete_wav() def", "LA REQUETE AVEC LA BDD query = (\"SELECT * FROM", "print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", result[0])", "delete_wav def AllQuestionAI(id_theme): i = 0 #CONNEXION A LA BDD", "proposition3 = line[4] proposition4 = line[5] reponse = line[5] print(\"*******************************************************************************\")", "= 0 #CONNEXION A LA BDD conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\",", "numéro num_question = num_question - 1 question = rows[num_question] result", "(\"SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION =", "voice import synthetize_voice, delete_wav def AllQuestionAI(id_theme): i = 0 #CONNEXION", "= ''.join(num_prop) line = ''.join(question[i]) line = num_prop + line", "= line[4] proposition4 = line[5] reponse = line[5] print(\"*******************************************************************************\") print(\"", "= len(rows) num_question = random.randint(1, nb_rows) #L'index de la liste", "query = (\"SELECT * FROM Question INNER JOIN themes_questions ON", "enonce = line[1] proposition1 = line[2] proposition2 = line[3] proposition3", "\") print(\"*******************************************************************************\") print(\"ENONCE : \", enonce) print(\"PROPOSITION 1 : \",", "#This last one is the answer print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\"", "\",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", result[0]) print(\"PROPOSITION 1 :", "for i in range(1,5) : num_prop = \"Proposition {} \".format(i)", "nb_rows) #L'index de la liste commence à zéro, il faut", "= line[3] proposition3 = line[4] proposition4 = line[5] reponse =", "\", proposition4) print(\"REPONSE : \", reponse) else: print(\"Ce thème ne", "result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last one is the", ": \", proposition1) print(\"PROPOSITION 2 : \", proposition2) print(\"PROPOSITION 3", "= conn.cursor() #EXECUTER LA REQUETE AVEC LA BDD query =", "string return result else: print(\"Ce thème ne contient pas de", "def AllQuestionAI(id_theme): i = 0 #CONNEXION A LA BDD conn", "def tell_question(question): synthetize_voice(question[0]) for i in range(1,5) : num_prop =", "ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query, (id_theme, )) #RECUPERATION", "print(\"PROPOSITION 3 : \", proposition3) print(\"PROPOSITION 4 : \", proposition4)", ": \", enonce) print(\"PROPOSITION 1 : \", proposition1) print(\"PROPOSITION 2", "= line[5] reponse = line[5] print(\"*******************************************************************************\") print(\" QUESTION \",i,\" \")", "A LA BDD conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor", "line[5] print(\"*******************************************************************************\") print(\" QUESTION \",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \",", "1 question = rows[num_question] result = [] #Tab which stores", "Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query, (id_theme, )) #RECUPERATION DES", "cursor.fetchall() if rows: for line in rows: i += 1", "<= 5): questionAI(1) if (__name__ == '__main__'): result = questionAI(1)", "num_question = random.randint(1, nb_rows) #L'index de la liste commence à", "= themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS", "thème ne contient pas de questions\") def tell_question(question): synthetize_voice(question[0]) for", "à zéro, il faut donc décaler d'un le numéro num_question", "import mysql.connector import random from voice import synthetize_voice, delete_wav def", "which stores the query results #RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2])", "\", proposition3) print(\"PROPOSITION 4 : \", proposition4) print(\"REPONSE : \",", "''.join(num_prop) line = ''.join(question[i]) line = num_prop + line synthetize_voice(line)", "result[5]) #complete_question = ''.join(complete_question) #Convert tuple into string return result", "result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last one is", "nb_rows = len(rows) num_question = random.randint(1, nb_rows) #L'index de la", "Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s\")", "proposition1) print(\"PROPOSITION 2 : \", proposition2) print(\"PROPOSITION 3 : \",", "num_question = num_question - 1 question = rows[num_question] result =", "DES INFORMATIONS rows = cursor.fetchall() if rows: for line in", "proposition2) print(\"PROPOSITION 3 : \", proposition3) print(\"PROPOSITION 4 : \",", "rows: nb_rows = len(rows) num_question = random.randint(1, nb_rows) #L'index de", "print(\"PROPOSITION 1 : \", result[1]) print(\"PROPOSITION 2 : \", result[2])", ": \", result[4]) print(\"REPONSE : \", result[5]) #complete_question = ''.join(complete_question)", "+ line synthetize_voice(line) delete_wav() def quiz(): counter = 1 while(counter", "synthetize_voice(line) delete_wav() def quiz(): counter = 1 while(counter <= 5):", "quiz(): counter = 1 while(counter <= 5): questionAI(1) if (__name__", "pas de questions\") def tell_question(question): synthetize_voice(question[0]) for i in range(1,5)", "print(\"*******************************************************************************\") print(\"ENONCE : \", enonce) print(\"PROPOSITION 1 : \", proposition1)", "print(\"Ce thème ne contient pas de questions\") def tell_question(question): synthetize_voice(question[0])", "synthetize_voice, delete_wav def AllQuestionAI(id_theme): i = 0 #CONNEXION A LA", "2 : \", result[2]) print(\"PROPOSITION 3 : \", result[3]) print(\"PROPOSITION", "num_prop = \"Proposition {} \".format(i) num_prop = ''.join(num_prop) line =", "#CONNEXION A LA BDD conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\")", "the answer print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE :", "#RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows: nb_rows =", "mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor = conn.cursor() #EXECUTER LA REQUETE", "num_prop + line synthetize_voice(line) delete_wav() def quiz(): counter = 1", "print(\" QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", result[0]) print(\"PROPOSITION", "def quiz(): counter = 1 while(counter <= 5): questionAI(1) if", "= mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor = conn.cursor() #EXECUTER LA", "commence à zéro, il faut donc décaler d'un le numéro", "= 1 while(counter <= 5): questionAI(1) if (__name__ == '__main__'):", "print(\"*******************************************************************************\") print(\"ENONCE : \", result[0]) print(\"PROPOSITION 1 : \", result[1])", "ne contient pas de questions\") def questionAI(id_theme): i = 0", "conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\", database=\"Puzzlebox\") cursor = conn.cursor() #EXECUTER", "query results #RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5])", "proposition4 = line[5] reponse = line[5] print(\"*******************************************************************************\") print(\" QUESTION \",i,\"", "pas de questions\") def questionAI(id_theme): i = 0 #CONNEXION A", "if rows: for line in rows: i += 1 enonce", "zéro, il faut donc décaler d'un le numéro num_question =", "4 : \", result[4]) print(\"REPONSE : \", result[5]) #complete_question =", "import random from voice import synthetize_voice, delete_wav def AllQuestionAI(id_theme): i", "= num_prop + line synthetize_voice(line) delete_wav() def quiz(): counter =", "#RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows: for line", "mysql.connector import random from voice import synthetize_voice, delete_wav def AllQuestionAI(id_theme):", "#EXECUTER LA REQUETE AVEC LA BDD query = (\"SELECT *", "for line in rows: i += 1 enonce = line[1]", "= cursor.fetchall() if rows: for line in rows: i +=", "JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s\") cursor.execute(query, (id_theme,", "if rows: nb_rows = len(rows) num_question = random.randint(1, nb_rows) #L'index", "print(\"ENONCE : \", enonce) print(\"PROPOSITION 1 : \", proposition1) print(\"PROPOSITION", "+= 1 enonce = line[1] proposition1 = line[2] proposition2 =", "\", reponse) else: print(\"Ce thème ne contient pas de questions\")", "de la liste commence à zéro, il faut donc décaler", "#Tab which stores the query results #RECUPERATION DES TUPLES result.append(question[1])", "rows = cursor.fetchall() if rows: for line in rows: i", "rows: for line in rows: i += 1 enonce =", "faut donc décaler d'un le numéro num_question = num_question -", "last one is the answer print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\" \")", "result[0]) print(\"PROPOSITION 1 : \", result[1]) print(\"PROPOSITION 2 : \",", "\"Proposition {} \".format(i) num_prop = ''.join(num_prop) line = ''.join(question[i]) line", "#Convert tuple into string return result else: print(\"Ce thème ne", "stores the query results #RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3])", "print(\"REPONSE : \", reponse) else: print(\"Ce thème ne contient pas", "DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last", "questions\") def questionAI(id_theme): i = 0 #CONNEXION A LA BDD", "\", proposition1) print(\"PROPOSITION 2 : \", proposition2) print(\"PROPOSITION 3 :", "3 : \", proposition3) print(\"PROPOSITION 4 : \", proposition4) print(\"REPONSE", "QUESTION \",i,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", enonce) print(\"PROPOSITION 1", "result[3]) print(\"PROPOSITION 4 : \", result[4]) print(\"REPONSE : \", result[5])", "return result else: print(\"Ce thème ne contient pas de questions\")", "donc décaler d'un le numéro num_question = num_question - 1", "FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE", "answer print(\"*******************************************************************************\") print(\" QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \",", "QUESTION \",num_question+1,\" \") print(\"*******************************************************************************\") print(\"ENONCE : \", result[0]) print(\"PROPOSITION 1", "[] #Tab which stores the query results #RECUPERATION DES TUPLES", "rows: i += 1 enonce = line[1] proposition1 = line[2]", "1 : \", proposition1) print(\"PROPOSITION 2 : \", proposition2) print(\"PROPOSITION", "print(\"ENONCE : \", result[0]) print(\"PROPOSITION 1 : \", result[1]) print(\"PROPOSITION", "line[4] proposition4 = line[5] reponse = line[5] print(\"*******************************************************************************\") print(\" QUESTION", "(id_theme, )) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows:", "\", result[0]) print(\"PROPOSITION 1 : \", result[1]) print(\"PROPOSITION 2 :", "result else: print(\"Ce thème ne contient pas de questions\") def", ": \", proposition3) print(\"PROPOSITION 4 : \", proposition4) print(\"REPONSE :", "i += 1 enonce = line[1] proposition1 = line[2] proposition2", "else: print(\"Ce thème ne contient pas de questions\") def tell_question(question):", ")) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows: for", "= line[1] proposition1 = line[2] proposition2 = line[3] proposition3 =", "print(\"PROPOSITION 3 : \", result[3]) print(\"PROPOSITION 4 : \", result[4])", "reponse) else: print(\"Ce thème ne contient pas de questions\") def", "num_prop = ''.join(num_prop) line = ''.join(question[i]) line = num_prop +", "le numéro num_question = num_question - 1 question = rows[num_question]", "0 #CONNEXION A LA BDD conn = mysql.connector.connect(host=\"localhost\", user=\"phpmyadmin\", password=\"<PASSWORD>\",", "line[3] proposition3 = line[4] proposition4 = line[5] reponse = line[5]", "= \"Proposition {} \".format(i) num_prop = ''.join(num_prop) line = ''.join(question[i])" ]
[ "patch): flux = numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2],", "simulation, tl): alpha = tl.grid.dx / tl.dt flux = numpy.zeros_like(cons_minus)", "simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim) f_minus =", "prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:, 1:-1]", ") flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1])", "= simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus )", "aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:, 1:-1] =", "= numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2], cons_minus[:, 1:-1])", "simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:, 1:-1] = 0.5 * (", "upwind(cons_minus, cons_plus, simulation, patch): flux = numpy.zeros_like(cons_minus) flux[:, 1:-1] =", "f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:, 1:-1] = 0.5", "aux_plus = simulation.model.cons2all(cons_plus , tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus)", "f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus", "flux def upwind(cons_minus, cons_plus, simulation, patch): flux = numpy.zeros_like(cons_minus) flux[:,", "alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return flux def upwind(cons_minus,", "flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2], cons_minus[:, 1:-1]) return flux", "prim_plus, aux_plus ) flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2]", "return flux def upwind(cons_minus, cons_plus, simulation, patch): flux = numpy.zeros_like(cons_minus)", "\\ alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return flux def", "import numpy def lax_friedrichs(cons_minus, cons_plus, simulation, tl): alpha = tl.grid.dx", "cons_plus, simulation, tl): alpha = tl.grid.dx / tl.dt flux =", "tl.grid.dx / tl.dt flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus,", "tl.dt flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus,", "aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim)", "f_minus[:,1:-1]) + \\ alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return", "+ f_minus[:,1:-1]) + \\ alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) )", "= numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus =", "= simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim) f_minus", "numpy def lax_friedrichs(cons_minus, cons_plus, simulation, tl): alpha = tl.grid.dx /", "= simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:, 1:-1] = 0.5 *", "aux_plus ) flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] +", "0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \\ alpha *", "+ \\ alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return flux", "tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus,", "prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus ,", "cons_minus[:,1:-1]) ) return flux def upwind(cons_minus, cons_plus, simulation, patch): flux", "prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus,", "flux = numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2], cons_minus[:,", "numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus", "def lax_friedrichs(cons_minus, cons_plus, simulation, tl): alpha = tl.grid.dx / tl.dt", "flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus", "numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2], cons_minus[:, 1:-1]) return", "lax_friedrichs(cons_minus, cons_plus, simulation, tl): alpha = tl.grid.dx / tl.dt flux", "alpha = tl.grid.dx / tl.dt flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus", "* ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \\ alpha * (cons_plus[:,0:-2]", "simulation.model.cons2all(cons_plus , tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus =", "simulation, patch): flux = numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:,", "(f_plus[:,0:-2] + f_minus[:,1:-1]) + \\ alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1])", "cons_plus, simulation, patch): flux = numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus", "def upwind(cons_minus, cons_plus, simulation, patch): flux = numpy.zeros_like(cons_minus) flux[:, 1:-1]", "tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim) f_minus = simulation.model.flux(cons_minus,", "( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \\ alpha * (cons_plus[:,0:-2] -", "* (cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return flux def upwind(cons_minus, cons_plus,", ", tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus,", "flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) +", ") return flux def upwind(cons_minus, cons_plus, simulation, patch): flux =", "(cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return flux def upwind(cons_minus, cons_plus, simulation,", "= simulation.model.cons2all(cons_plus , tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus", "= tl.grid.dx / tl.dt flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus =", "tl): alpha = tl.grid.dx / tl.dt flux = numpy.zeros_like(cons_minus) prim_minus,", "= 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \\ alpha", "- cons_minus[:,1:-1]) ) return flux def upwind(cons_minus, cons_plus, simulation, patch):", "1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \\", "simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:,", "/ tl.dt flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim)" ]
[ "None: return _decode_auth(auth_data, server) creds_store = config.get('credsStore') if creds_store is", "data = json.loads(stdout) return { 'Username': data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress':", "'Username': data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress': server, } def _decode_auth(auth_data, server):", "path = os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return {} with codecs.open(path,", "is None: return None server_auth = config_auths.get(server) if server_auth is", "not name: return 'docker.io' else: return registry def encode_header(auth): json_data", "asyncio import subprocess _PREFIX = 'docker-credential-' def read_config(): path =", "import asyncio import subprocess _PREFIX = 'docker-credential-' def read_config(): path", "return _decode_auth(auth_data, server) creds_store = config.get('credsStore') if creds_store is not", "def read_config(): path = os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return {}", "f.read() return json.loads(json_data) async def _read_creds(creds_store, server): if not re.match(r'^\\w+$',", "0: return None else: data = json.loads(stdout) return { 'Username':", "json_data = f.read() return json.loads(json_data) async def _read_creds(creds_store, server): if", "os.path import asyncio import subprocess _PREFIX = 'docker-credential-' def read_config():", "auth_data_decoded.partition(':') return { 'Username': username, 'Password': password, 'ServerAddress': server, }", "def _decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _, password =", "stdout, stderr = await proc.communicate(server.encode('ascii')) if proc.returncode != 0: return", "server): config_auths = config.get('auths') if config_auths is None: return None", "'docker-credential-' def read_config(): path = os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return", "return { 'Username': data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress': server, } def", "config.get('auths') if config_auths is None: return None server_auth = config_auths.get(server)", "} async def resolve_auth(config, server): config_auths = config.get('auths') if config_auths", "return {} with codecs.open(path, encoding='utf-8') as f: json_data = f.read()", "read_config(): path = os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return {} with", "server_auth = config_auths.get(server) if server_auth is not None: auth_data =", "_decode_auth(auth_data, server) creds_store = config.get('credsStore') if creds_store is not None:", "data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress': server, } def _decode_auth(auth_data, server): auth_data_decoded", "None server_auth = config_auths.get(server) if server_auth is not None: auth_data", "return None server_auth = config_auths.get(server) if server_auth is not None:", "else: return registry def encode_header(auth): json_data = json.dumps(auth) return base64.urlsafe_b64encode(json_data.encode('ascii'))", "os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return {} with codecs.open(path, encoding='utf-8') as", "if proc.returncode != 0: return None else: data = json.loads(stdout)", "if auth_data is not None: return _decode_auth(auth_data, server) creds_store =", "= config.get('auths') if config_auths is None: return None server_auth =", "if not re.match(r'^\\w+$', creds_store, re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc", "registry, _, name = image_name.partition('/') if not name: return 'docker.io'", "import codecs import os.path import asyncio import subprocess _PREFIX =", "codecs import os.path import asyncio import subprocess _PREFIX = 'docker-credential-'", "json import base64 import codecs import os.path import asyncio import", "if not os.path.exists(path): return {} with codecs.open(path, encoding='utf-8') as f:", "await _read_creds(creds_store, server) return None def server_name(image_name): registry, _, name", "= image_name.partition('/') if not name: return 'docker.io' else: return registry", "creds_store is not None: return await _read_creds(creds_store, server) return None", "proc = await asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE,", "config_auths is None: return None server_auth = config_auths.get(server) if server_auth", "return None else: data = json.loads(stdout) return { 'Username': data['Username'],", "stderr=subprocess.PIPE, ) stdout, stderr = await proc.communicate(server.encode('ascii')) if proc.returncode !=", "{} with codecs.open(path, encoding='utf-8') as f: json_data = f.read() return", "username, _, password = auth_data_decoded.partition(':') return { 'Username': username, 'Password':", "return await _read_creds(creds_store, server) return None def server_name(image_name): registry, _,", "_, password = auth_data_decoded.partition(':') return { 'Username': username, 'Password': password,", "= config_auths.get(server) if server_auth is not None: auth_data = server_auth.get('auth')", "with codecs.open(path, encoding='utf-8') as f: json_data = f.read() return json.loads(json_data)", "else: data = json.loads(stdout) return { 'Username': data['Username'], 'Password': data['<PASSWORD>'],", "not None: return _decode_auth(auth_data, server) creds_store = config.get('credsStore') if creds_store", "not None: auth_data = server_auth.get('auth') if auth_data is not None:", "if not name: return 'docker.io' else: return registry def encode_header(auth):", "if creds_store is not None: return await _read_creds(creds_store, server) return", "async def _read_creds(creds_store, server): if not re.match(r'^\\w+$', creds_store, re.ASCII): raise", "ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec( _PREFIX + creds_store,", "import os.path import asyncio import subprocess _PREFIX = 'docker-credential-' def", "is not None: return _decode_auth(auth_data, server) creds_store = config.get('credsStore') if", "is not None: return await _read_creds(creds_store, server) return None def", "subprocess _PREFIX = 'docker-credential-' def read_config(): path = os.path.expanduser('~/.docker/config.json') if", "{ 'Username': username, 'Password': password, 'ServerAddress': server, } async def", "stderr = await proc.communicate(server.encode('ascii')) if proc.returncode != 0: return None", "= await proc.communicate(server.encode('ascii')) if proc.returncode != 0: return None else:", "base64.b64decode(auth_data).decode('utf-8') username, _, password = auth_data_decoded.partition(':') return { 'Username': username,", "_read_creds(creds_store, server): if not re.match(r'^\\w+$', creds_store, re.ASCII): raise ValueError('Invalid credsStore:", "<gh_stars>1-10 import re import json import base64 import codecs import", "'Username': username, 'Password': password, 'ServerAddress': server, } async def resolve_auth(config,", "creds_store = config.get('credsStore') if creds_store is not None: return await", "async def resolve_auth(config, server): config_auths = config.get('auths') if config_auths is", "{ 'Username': data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress': server, } def _decode_auth(auth_data,", "password = auth_data_decoded.partition(':') return { 'Username': username, 'Password': password, 'ServerAddress':", "None else: data = json.loads(stdout) return { 'Username': data['Username'], 'Password':", "return 'docker.io' else: return registry def encode_header(auth): json_data = json.dumps(auth)", "as f: json_data = f.read() return json.loads(json_data) async def _read_creds(creds_store,", "None: auth_data = server_auth.get('auth') if auth_data is not None: return", "json.loads(json_data) async def _read_creds(creds_store, server): if not re.match(r'^\\w+$', creds_store, re.ASCII):", "= 'docker-credential-' def read_config(): path = os.path.expanduser('~/.docker/config.json') if not os.path.exists(path):", "'ServerAddress': server, } async def resolve_auth(config, server): config_auths = config.get('auths')", "server) return None def server_name(image_name): registry, _, name = image_name.partition('/')", "import subprocess _PREFIX = 'docker-credential-' def read_config(): path = os.path.expanduser('~/.docker/config.json')", "creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = await", "f: json_data = f.read() return json.loads(json_data) async def _read_creds(creds_store, server):", "} def _decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _, password", "config_auths = config.get('auths') if config_auths is None: return None server_auth", "not None: return await _read_creds(creds_store, server) return None def server_name(image_name):", "= f.read() return json.loads(json_data) async def _read_creds(creds_store, server): if not", ") stdout, stderr = await proc.communicate(server.encode('ascii')) if proc.returncode != 0:", "name = image_name.partition('/') if not name: return 'docker.io' else: return", "= json.loads(stdout) return { 'Username': data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress': server,", "encoding='utf-8') as f: json_data = f.read() return json.loads(json_data) async def", "= server_auth.get('auth') if auth_data is not None: return _decode_auth(auth_data, server)", "if config_auths is None: return None server_auth = config_auths.get(server) if", "_decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _, password = auth_data_decoded.partition(':')", "return None def server_name(image_name): registry, _, name = image_name.partition('/') if", "auth_data is not None: return _decode_auth(auth_data, server) creds_store = config.get('credsStore')", "asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout,", "None: return None server_auth = config_auths.get(server) if server_auth is not", "server_auth is not None: auth_data = server_auth.get('auth') if auth_data is", "import json import base64 import codecs import os.path import asyncio", "base64 import codecs import os.path import asyncio import subprocess _PREFIX", "server, } async def resolve_auth(config, server): config_auths = config.get('auths') if", "proc.communicate(server.encode('ascii')) if proc.returncode != 0: return None else: data =", "re import json import base64 import codecs import os.path import", "username, 'Password': password, 'ServerAddress': server, } async def resolve_auth(config, server):", "raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec( _PREFIX +", "_PREFIX + creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr", "os.path.exists(path): return {} with codecs.open(path, encoding='utf-8') as f: json_data =", "= base64.b64decode(auth_data).decode('utf-8') username, _, password = auth_data_decoded.partition(':') return { 'Username':", "server): if not re.match(r'^\\w+$', creds_store, re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store))", "codecs.open(path, encoding='utf-8') as f: json_data = f.read() return json.loads(json_data) async", "json.loads(stdout) return { 'Username': data['Username'], 'Password': data['<PASSWORD>'], 'ServerAddress': server, }", "if server_auth is not None: auth_data = server_auth.get('auth') if auth_data", "return json.loads(json_data) async def _read_creds(creds_store, server): if not re.match(r'^\\w+$', creds_store,", "is not None: auth_data = server_auth.get('auth') if auth_data is not", "import base64 import codecs import os.path import asyncio import subprocess", "= os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return {} with codecs.open(path, encoding='utf-8')", "= config.get('credsStore') if creds_store is not None: return await _read_creds(creds_store,", "{!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get', stdin=subprocess.PIPE,", "import re import json import base64 import codecs import os.path", "'Password': password, 'ServerAddress': server, } async def resolve_auth(config, server): config_auths", "+ creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr =", "return { 'Username': username, 'Password': password, 'ServerAddress': server, } async", "resolve_auth(config, server): config_auths = config.get('auths') if config_auths is None: return", "!= 0: return None else: data = json.loads(stdout) return {", "config.get('credsStore') if creds_store is not None: return await _read_creds(creds_store, server)", "server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _, password = auth_data_decoded.partition(':') return", "credsStore: {!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get',", "name: return 'docker.io' else: return registry def encode_header(auth): json_data =", "re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec( _PREFIX", "not re.match(r'^\\w+$', creds_store, re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc =", "= auth_data_decoded.partition(':') return { 'Username': username, 'Password': password, 'ServerAddress': server,", "await asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, )", "_, name = image_name.partition('/') if not name: return 'docker.io' else:", "await proc.communicate(server.encode('ascii')) if proc.returncode != 0: return None else: data", "password, 'ServerAddress': server, } async def resolve_auth(config, server): config_auths =", "def server_name(image_name): registry, _, name = image_name.partition('/') if not name:", "image_name.partition('/') if not name: return 'docker.io' else: return registry def", "auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _, password = auth_data_decoded.partition(':') return {", "data['<PASSWORD>'], 'ServerAddress': server, } def _decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8')", "None: return await _read_creds(creds_store, server) return None def server_name(image_name): registry,", "config_auths.get(server) if server_auth is not None: auth_data = server_auth.get('auth') if", "def _read_creds(creds_store, server): if not re.match(r'^\\w+$', creds_store, re.ASCII): raise ValueError('Invalid", "proc.returncode != 0: return None else: data = json.loads(stdout) return", "'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = await proc.communicate(server.encode('ascii'))", "re.match(r'^\\w+$', creds_store, re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc = await", "server_auth.get('auth') if auth_data is not None: return _decode_auth(auth_data, server) creds_store", "def resolve_auth(config, server): config_auths = config.get('auths') if config_auths is None:", "'docker.io' else: return registry def encode_header(auth): json_data = json.dumps(auth) return", "_PREFIX = 'docker-credential-' def read_config(): path = os.path.expanduser('~/.docker/config.json') if not", "None def server_name(image_name): registry, _, name = image_name.partition('/') if not", "server, } def _decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _,", "'Password': data['<PASSWORD>'], 'ServerAddress': server, } def _decode_auth(auth_data, server): auth_data_decoded =", "auth_data = server_auth.get('auth') if auth_data is not None: return _decode_auth(auth_data,", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = await proc.communicate(server.encode('ascii')) if proc.returncode", "'ServerAddress': server, } def _decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username,", "server) creds_store = config.get('credsStore') if creds_store is not None: return", "server_name(image_name): registry, _, name = image_name.partition('/') if not name: return", "_read_creds(creds_store, server) return None def server_name(image_name): registry, _, name =", "stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = await proc.communicate(server.encode('ascii')) if", "creds_store, re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec(", "= await asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "not os.path.exists(path): return {} with codecs.open(path, encoding='utf-8') as f: json_data" ]
[ "changes': 3, 'unknown': 999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag):", "map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag'])", "ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name']", "= ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] =", "mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG']", "{ 'dedicated monitoring/observation': 1, 'other': 2 } map_well_purpose = mapping_factory(WELL_PURPOSE)", "if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + \".0Z\" if re.match(r\".*:\\d\\dZ$\",", "ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes']", "= to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] =", "= ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] =", "(AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd']", "to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose'])", "if ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method']", "ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD']", "with the WELL_REGISTRY_STG table. \"\"\" import re def mapping_factory(mapping): def", "key values. \"\"\" mapped_data = dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM']", "{ 'surveillance': 1, 'trend': 2, 'special': 3, } map_well_type =", "mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None", "} map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background': 1, 'suspected/anticipated", "if flag else '0' def transform_mon_loc_data(ml_data): \"\"\" Map the fields", "= None return ora_val return map_func WELL_TYPES = { 'surveillance':", "dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med']", "= ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD']", "mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD']", "mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA']", "999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1' if", "= None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError):", "= { 'dedicated monitoring/observation': 1, 'other': 2 } map_well_purpose =", "= ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] =", "= ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] =", "= { 'surveillance': 1, 'trend': 2, 'special': 3, } map_well_type", "= ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC']", "= None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] =", "changes': 3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background':", "3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background': 1,", "ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag'])", "= ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] =", "None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] = None", "except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] = None", "# fix missing fractions of a second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']):", "not None: ora_val = mapping.get(key.lower()) else: ora_val = None return", "into a form that works with the WELL_REGISTRY_STG table. \"\"\"", "ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars'])", "= ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS']", "= ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] =", "= None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS']", "None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type']", "mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM']", "QW_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2, 'known changes':", "mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE']", "\"\"\" mapped_data = dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm']", "= None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] =", "map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag'])", "'suspected/anticipated changes': 2, 'known changes': 3, 'unknown': 999 } map_wl_well_chars", "foreign key values. \"\"\" mapped_data = dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd']", "ml_data['alt_acy'] return mapped_data def date_format(mapped_data): # fix missing fractions of", "table with appropriate foreign key values. \"\"\" mapped_data = dict()", "None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date']", "mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2, 'known", "fractions of a second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1]", "mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA']", "(AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME']", "None return ora_val return map_func WELL_TYPES = { 'surveillance': 1,", "Transform the data into a form that works with the", "ora_val = mapping.get(key.lower()) else: ora_val = None return ora_val return", "WELL_REGISTRY_STG table. \"\"\" import re def mapping_factory(mapping): def map_func(key): if", "mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG']", "date_format(mapped_data): # fix missing fractions of a second if re.match(r\".*:\\d\\dZ$\",", "the API JSON response to the fields in the WELL_REGISTRY_STG", "fields in the WELL_REGISTRY_STG table with appropriate foreign key values.", "+ \".0Z\" if re.match(r\".*:\\d\\dZ$\", mapped_data['UPDATE_DATE']): mapped_data['UPDATE_DATE'] = mapped_data['UPDATE_DATE'][:-1] + \".0Z\"", "mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + \".0Z\" if re.match(r\".*:\\d\\dZ$\", mapped_data['UPDATE_DATE']): mapped_data['UPDATE_DATE']", "Map the fields from the API JSON response to the", "a form that works with the WELL_REGISTRY_STG table. \"\"\" import", "if key is not None: ora_val = mapping.get(key.lower()) else: ora_val", "ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars'])", "ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type'])", "'known changes': 3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = {", "mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER']", "try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] =", "= mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated monitoring/observation': 1, 'other': 2", "mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError):", "mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO']", "= ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] =", "ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] =", "mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE']", "except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD'] =", "mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd']", "mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] = None try:", "mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME']", "None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD']", "1, 'other': 2 } map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = {", "= ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] =", "data into a form that works with the WELL_REGISTRY_STG table.", "map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD'] =", "that works with the WELL_REGISTRY_STG table. \"\"\" import re def", "mapping_factory(mapping): def map_func(key): if key is not None: ora_val =", "\"\"\" Map the fields from the API JSON response to", "mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER']", "mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return", "ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError,", "= None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError):", "(AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd']", "API JSON response to the fields in the WELL_REGISTRY_STG table", "return mapped_data def date_format(mapped_data): # fix missing fractions of a", "if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else", "mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2, 'known", "mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] = None", "map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] = None", "map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag'])", "map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes':", "mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS']", "fix missing fractions of a second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE']", "to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name']", "def date_format(mapped_data): # fix missing fractions of a second if", "ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type'])", "= map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] =", "WELL_TYPES = { 'surveillance': 1, 'trend': 2, 'special': 3, }", "= None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] =", "ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data def", "mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated monitoring/observation': 1, 'other': 2 }", "KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] =", "mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER']", "KeyError, TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units']", "map_func WELL_TYPES = { 'surveillance': 1, 'trend': 2, 'special': 3,", "'special': 3, } map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated", "ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user']", "return map_func WELL_TYPES = { 'surveillance': 1, 'trend': 2, 'special':", "fields from the API JSON response to the fields in", "is not None: ora_val = mapping.get(key.lower()) else: ora_val = None", "= ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] =", "else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None mapped_data['SITE_TYPE']", "mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG']", "form that works with the WELL_REGISTRY_STG table. \"\"\" import re", "table. \"\"\" import re def mapping_factory(mapping): def map_func(key): if key", "None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD']", "None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] = None", "mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc']", "= mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2,", "mapped_data def date_format(mapped_data): # fix missing fractions of a second", "changes': 2, 'known changes': 3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS", "re def mapping_factory(mapping): def map_func(key): if key is not None:", "mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD']", "= ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] = None try:", "'known changes': 3, 'unknown': 999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def", "map_func(key): if key is not None: ora_val = mapping.get(key.lower()) else:", "mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1' if flag else '0' def", "mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR']", "with appropriate foreign key values. \"\"\" mapped_data = dict() mapped_data['AGENCY_CD']", "= ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] =", "mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG']", "values. \"\"\" mapped_data = dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] =", "to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose'])", "ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] =", "the WELL_REGISTRY_STG table. \"\"\" import re def mapping_factory(mapping): def map_func(key):", "mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH']", "mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd']", "= ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except", "2, 'known changes': 3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS =", "the fields in the WELL_REGISTRY_STG table with appropriate foreign key", "to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name']", "else '0' def transform_mon_loc_data(ml_data): \"\"\" Map the fields from the", "of a second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] +", "ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method']", "= ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD']", "mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES']", "try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] =", "= ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] =", "map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes':", "{ 'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3, 'unknown':", "'surveillance': 1, 'trend': 2, 'special': 3, } map_well_type = mapping_factory(WELL_TYPES)", "{ 'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3 }", "= to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] =", "transform_mon_loc_data(ml_data): \"\"\" Map the fields from the API JSON response", "'suspected/anticipated changes': 2, 'known changes': 3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS)", "1, 'trend': 2, 'special': 3, } map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE", "= ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] = None try:", "map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated monitoring/observation': 1, 'other':", "} map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background': 1, 'suspected/anticipated", "ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] =", "map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1' if flag else", "in the WELL_REGISTRY_STG table with appropriate foreign key values. \"\"\"", "JSON response to the fields in the WELL_REGISTRY_STG table with", "WL_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2, 'known changes':", "= to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] =", "'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3 } map_qw_well_chars", "'trend': 2, 'special': 3, } map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE =", "the WELL_REGISTRY_STG table with appropriate foreign key values. \"\"\" mapped_data", "monitoring/observation': 1, 'other': 2 } map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS =", "= ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] =", "\"\"\" import re def mapping_factory(mapping): def map_func(key): if key is", "= map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] =", "= dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] =", "mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG']", "ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy']", "= to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] =", "'0' def transform_mon_loc_data(ml_data): \"\"\" Map the fields from the API", "works with the WELL_REGISTRY_STG table. \"\"\" import re def mapping_factory(mapping):", "to the fields in the WELL_REGISTRY_STG table with appropriate foreign", "ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data def date_format(mapped_data): # fix", "mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE']", "def transform_mon_loc_data(ml_data): \"\"\" Map the fields from the API JSON", "mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None", "= map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] =", "None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD']", "= mapping.get(key.lower()) else: ora_val = None return ora_val return map_func", "return ora_val return map_func WELL_TYPES = { 'surveillance': 1, 'trend':", "None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] =", "ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date']", "except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id']", "= { 'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3,", "'dedicated monitoring/observation': 1, 'other': 2 } map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS", "ora_val = None return ora_val return map_func WELL_TYPES = {", "mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] = None", "= ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] =", "ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] =", "None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy']", "mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id']", "mapped_data = dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED']", "ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum']", "mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE']", "TypeError): mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError,", "changes': 2, 'known changes': 3, 'unknown': 999 } map_wl_well_chars =", "None mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError,", "mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] = None", "the data into a form that works with the WELL_REGISTRY_STG", "= ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] =", "map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None", "mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA']", "try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError,", "= ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD']", "None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] =", "mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME']", "\"\"\" Transform the data into a form that works with", "mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError, KeyError,", "= ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if", "'other': 2 } map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background':", "TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name']", "= None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] =", "mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE']", "mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID']", "= ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] =", "mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG']", "ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] =", "mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data def date_format(mapped_data):", "= map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] =", "mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD']", "ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD']", "else: ora_val = None return ora_val return map_func WELL_TYPES =", "mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE']", "KeyError, TypeError): mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except", "ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum']", "3, } map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated monitoring/observation':", "re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + \".0Z\" if re.match(r\".*:\\d\\dZ$\", mapped_data['UPDATE_DATE']):", "to_flag(flag): return '1' if flag else '0' def transform_mon_loc_data(ml_data): \"\"\"", "except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD'] =", "ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes']", "= mapped_data['INSERT_DATE'][:-1] + \".0Z\" if re.match(r\".*:\\d\\dZ$\", mapped_data['UPDATE_DATE']): mapped_data['UPDATE_DATE'] = mapped_data['UPDATE_DATE'][:-1]", "= map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] =", "= to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] =", "mapping.get(key.lower()) else: ora_val = None return ora_val return map_func WELL_TYPES", "ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None", "= ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data", "response to the fields in the WELL_REGISTRY_STG table with appropriate", "ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va']", "flag else '0' def transform_mon_loc_data(ml_data): \"\"\" Map the fields from", "mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY']", "from the API JSON response to the fields in the", "mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID']", "= ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data def date_format(mapped_data): #", "= None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] =", "def map_func(key): if key is not None: ora_val = mapping.get(key.lower())", "2, 'known changes': 3, 'unknown': 999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS)", "mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER']", "mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER']", "= ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] =", "appropriate foreign key values. \"\"\" mapped_data = dict() mapped_data['AGENCY_CD'] =", "key is not None: ora_val = mapping.get(key.lower()) else: ora_val =", "'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3, 'unknown': 999", "a second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + \".0Z\"", "= ml_data['alt_acy'] return mapped_data def date_format(mapped_data): # fix missing fractions", "= mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1' if flag else '0'", "None: ora_val = mapping.get(key.lower()) else: ora_val = None return ora_val", "ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va']", "TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else", "= ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] =", "try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] =", "ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units']", "missing fractions of a second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] =", "TypeError): mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError,", "ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None", "= ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] =", "mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try:", "= None mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except", "WELL_REGISTRY_STG table with appropriate foreign key values. \"\"\" mapped_data =", "} map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1' if flag", "'unknown': 999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1'", "None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] = ml_data['well_depth']", "mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError, KeyError,", "ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va']", "mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS']", "(AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if", "} map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated monitoring/observation': 1,", "= mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2,", "1, 'suspected/anticipated changes': 2, 'known changes': 3, 'unknown': 999 }", "= None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] =", "None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] = ml_data['link']", "2, 'special': 3, } map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE = {", "2 } map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background': 1,", "mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME']", "import re def mapping_factory(mapping): def map_func(key): if key is not", "to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] = None", "= { 'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3", "WELL_PURPOSE = { 'dedicated monitoring/observation': 1, 'other': 2 } map_well_purpose", "= map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD']", "second if re.match(r\".*:\\d\\dZ$\", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + \".0Z\" if", "mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES']", "3, 'unknown': 999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag): return", "def mapping_factory(mapping): def map_func(key): if key is not None: ora_val", "= ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] =", "ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user']", "KeyError, TypeError): mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except", "else None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] =", "ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] =", "mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] = ml_data['site_type']", "the fields from the API JSON response to the fields", "ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no']", "mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK']", "= None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError):", "return '1' if flag else '0' def transform_mon_loc_data(ml_data): \"\"\" Map", "1, 'suspected/anticipated changes': 2, 'known changes': 3 } map_qw_well_chars =", "= ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] =", "= ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC']", "= None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] =", "None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag'])", "mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError, KeyError,", "mapped_data['INSERT_DATE'][:-1] + \".0Z\" if re.match(r\".*:\\d\\dZ$\", mapped_data['UPDATE_DATE']): mapped_data['UPDATE_DATE'] = mapped_data['UPDATE_DATE'][:-1] +", "mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + \".0Z\" if re.match(r\".*:\\d\\dZ$\", mapped_data['UPDATE_DATE']): mapped_data['UPDATE_DATE'] =", "ora_val return map_func WELL_TYPES = { 'surveillance': 1, 'trend': 2,", "= ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] =", "ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY']", "def to_flag(flag): return '1' if flag else '0' def transform_mon_loc_data(ml_data):", "<filename>etl/transform.py \"\"\" Transform the data into a form that works", "'1' if flag else '0' def transform_mon_loc_data(ml_data): \"\"\" Map the", "mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data def date_format(mapped_data): # fix missing", "ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None" ]
[ "<reponame>shamilison/django-reporter-pro # Created by shamilsakib at 04/10/20 BASE_MODEL = None" ]
[ "2019 Tealab@SBU\" __license__ = \"MIT\" __version__ = \"1.0.0\" __maintainer__ =", "update_iter(x_block, x_block, n, I_, J_, I_) def funcX_iter(block_info, u_block_info, n):", "K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1, -1, -1): J = J_*x_block.shape[0]+j", "update_iter(u_block, x_block, n, I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n,", "i in range(x_block.shape[0]-1, -1, -1): I = I_*x_block.shape[0]+i min1 =", "J = J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1, -1, -1): I", "<= min1) and (J >= I+1) and (I <= min2)):", "K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block, n, I_, J_, K_): #", "than passing f_matrix_broadcast, we call this function def f_matrix(i, j):", "= \"<EMAIL>\" __status__ = \"Development\" import numpy as np import", "3) and (J <= min1) and (J >= I+1) and", "n-4) if ((K < n) and (K >= 3) and", ">= 3) and (J <= min1) and (J >= I+1)", "funcX_iter(block_info, u_block_info, n): ((I_, J_), x_block) = block_info ((UI_, UJ_),", "n-3) min2 = min(J-1, n-4) if ((K < n) and", "min2 = min(J-1, n-4) if ((K < n) and (K", "-1, -1): I = I_*x_block.shape[0]+i min1 = min(K-2, n-3) min2", "<filename>DPSparkImplementations/paf_kernels.py __author__ = \"<NAME>, <NAME>\" __copyright__ = \"Copyright (c) 2019", "def _update_iter(u_block, x_block, n, I_, J_, K_): # For testing", "''' Iterative kernels ''' def update_iter(u_block, x_block, n, I_, J_,", "J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_) @nb.jit(nopython=True)", "numba as nb ''' Iterative kernels ''' def update_iter(u_block, x_block,", "= max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1))) return", "import numpy as np import numba as nb ''' Iterative", "j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1))) return x_block def", "min2)): x_block[i, j] = max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1,", "n, I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_,", "we call this function def f_matrix(i, j): return float(i+j) for", "min(K, 2*J-I+1))) return x_block def funcA_iter(block_info, n): ((I_, J_), x_block)", "I_, J_, K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block, n, I_, J_,", "(K >= 3) and (J <= min1) and (J >=", "<NAME>\" __copyright__ = \"Copyright (c) 2019 Tealab@SBU\" __license__ = \"MIT\"", "-1): I = I_*x_block.shape[0]+i min1 = min(K-2, n-3) min2 =", "return float(i+j) for k in range(x_block.shape[0]-1, -1, -1): K =", "u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1))) return x_block def funcA_iter(block_info,", "np import numba as nb ''' Iterative kernels ''' def", "return update_iter(x_block, x_block, n, I_, J_, I_) def funcX_iter(block_info, u_block_info,", "max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1))) return x_block", "I_) def funcX_iter(block_info, u_block_info, n): ((I_, J_), x_block) = block_info", "__version__ = \"1.0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__", "((I_, J_), x_block) = block_info return update_iter(x_block, x_block, n, I_,", "J_), x_block) = block_info return update_iter(x_block, x_block, n, I_, J_,", "-1, -1): J = J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1, -1,", "u_block_info, n): ((I_, J_), x_block) = block_info ((UI_, UJ_), u_block)", "and (K >= 3) and (J <= min1) and (J", "f_matrix(i, j): return float(i+j) for k in range(x_block.shape[0]-1, -1, -1):", "__email__ = \"<EMAIL>\" __status__ = \"Development\" import numpy as np", "x_block) = block_info return update_iter(x_block, x_block, n, I_, J_, I_)", "\"MIT\" __version__ = \"1.0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "2*J-I+1))) return x_block def funcA_iter(block_info, n): ((I_, J_), x_block) =", "x_block[i, j] = max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K,", "funcA_iter(block_info, n): ((I_, J_), x_block) = block_info return update_iter(x_block, x_block,", "def update_iter(u_block, x_block, n, I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block),", "passing f_matrix_broadcast, we call this function def f_matrix(i, j): return", "n, I_, J_, I_) def funcX_iter(block_info, u_block_info, n): ((I_, J_),", "x_block def funcA_iter(block_info, n): ((I_, J_), x_block) = block_info return", "x_block, n, I_, J_, I_) def funcX_iter(block_info, u_block_info, n): ((I_,", "numpy as np import numba as nb ''' Iterative kernels", "range(x_block.shape[0]-1, -1, -1): I = I_*x_block.shape[0]+i min1 = min(K-2, n-3)", "= \"Copyright (c) 2019 Tealab@SBU\" __license__ = \"MIT\" __version__ =", "as nb ''' Iterative kernels ''' def update_iter(u_block, x_block, n,", "I = I_*x_block.shape[0]+i min1 = min(K-2, n-3) min2 = min(J-1,", "and (J <= min1) and (J >= I+1) and (I", "(I <= min2)): x_block[i, j] = max(x_block[i, j], u_block[j+1, k]", "n): ((I_, J_), x_block) = block_info ((UI_, UJ_), u_block) =", "(J <= min1) and (J >= I+1) and (I <=", "for i in range(x_block.shape[0]-1, -1, -1): I = I_*x_block.shape[0]+i min1", "= I_*x_block.shape[0]+i min1 = min(K-2, n-3) min2 = min(J-1, n-4)", "\"<NAME>, <NAME>\" __copyright__ = \"Copyright (c) 2019 Tealab@SBU\" __license__ =", "and (J >= I+1) and (I <= min2)): x_block[i, j]", "range(x_block.shape[0]-1, -1, -1): J = J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1,", "\"<EMAIL>\" __status__ = \"Development\" import numpy as np import numba", "_update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block,", "= block_info return update_iter(x_block, x_block, n, I_, J_, I_) def", "''' def update_iter(u_block, x_block, n, I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block),", "block_info ((UI_, UJ_), u_block) = u_block_info return update_iter(u_block, x_block, n,", "K = K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1, -1, -1): J", "-1): J = J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1, -1, -1):", "x_block, n, I_, J_, K_): # For testing purposes, rather", "J_, K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block, n, I_, J_, K_):", "__copyright__ = \"Copyright (c) 2019 Tealab@SBU\" __license__ = \"MIT\" __version__", "((K < n) and (K >= 3) and (J <=", "u_block) = u_block_info return update_iter(u_block, x_block, n, I_, J_, UJ_)", "I_, J_, K_): # For testing purposes, rather than passing", "(c) 2019 Tealab@SBU\" __license__ = \"MIT\" __version__ = \"1.0.0\" __maintainer__", "min(J-1, n-4) if ((K < n) and (K >= 3)", "I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_)", "as np import numba as nb ''' Iterative kernels '''", "n) and (K >= 3) and (J <= min1) and", "def funcX_iter(block_info, u_block_info, n): ((I_, J_), x_block) = block_info ((UI_,", "block_info return update_iter(x_block, x_block, n, I_, J_, I_) def funcX_iter(block_info,", "\"1.0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\"", "f_matrix(J+1, min(K, 2*J-I+1))) return x_block def funcA_iter(block_info, n): ((I_, J_),", "j] = max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1)))", "For testing purposes, rather than passing f_matrix_broadcast, we call this", "in range(x_block.shape[0]-1, -1, -1): I = I_*x_block.shape[0]+i min1 = min(K-2,", "\"Copyright (c) 2019 Tealab@SBU\" __license__ = \"MIT\" __version__ = \"1.0.0\"", "k] + f_matrix(J+1, min(K, 2*J-I+1))) return x_block def funcA_iter(block_info, n):", "j): return float(i+j) for k in range(x_block.shape[0]-1, -1, -1): K", "K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_) @nb.jit(nopython=True) def", "I_, J_, I_) def funcX_iter(block_info, u_block_info, n): ((I_, J_), x_block)", "_update_iter(u_block, x_block, n, I_, J_, K_): # For testing purposes,", "-1): K = K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1, -1, -1):", "return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_) @nb.jit(nopython=True) def _update_iter(u_block,", "import numba as nb ''' Iterative kernels ''' def update_iter(u_block,", "-1, -1): K = K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1, -1,", "f_matrix_broadcast, we call this function def f_matrix(i, j): return float(i+j)", "J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1, -1, -1): I = I_*x_block.shape[0]+i", "function def f_matrix(i, j): return float(i+j) for k in range(x_block.shape[0]-1,", "kernels ''' def update_iter(u_block, x_block, n, I_, J_, K_): return", "n, I_, J_, K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block, n, I_,", "__license__ = \"MIT\" __version__ = \"1.0.0\" __maintainer__ = \"<NAME>\" __email__", "__author__ = \"<NAME>, <NAME>\" __copyright__ = \"Copyright (c) 2019 Tealab@SBU\"", "= J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1, -1, -1): I =", "(J >= I+1) and (I <= min2)): x_block[i, j] =", "\"Development\" import numpy as np import numba as nb '''", "I+1) and (I <= min2)): x_block[i, j] = max(x_block[i, j],", "x_block) = block_info ((UI_, UJ_), u_block) = u_block_info return update_iter(u_block,", "= min(K-2, n-3) min2 = min(J-1, n-4) if ((K <", "def funcA_iter(block_info, n): ((I_, J_), x_block) = block_info return update_iter(x_block,", "in range(x_block.shape[0]-1, -1, -1): K = K_*x_block.shape[0]+k for j in", "((UI_, UJ_), u_block) = u_block_info return update_iter(u_block, x_block, n, I_,", "= \"MIT\" __version__ = \"1.0.0\" __maintainer__ = \"<NAME>\" __email__ =", "@nb.jit(nopython=True) def _update_iter(u_block, x_block, n, I_, J_, K_): # For", "n): ((I_, J_), x_block) = block_info return update_iter(x_block, x_block, n,", "UJ_), u_block) = u_block_info return update_iter(u_block, x_block, n, I_, J_,", "= K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1, -1, -1): J =", "nb ''' Iterative kernels ''' def update_iter(u_block, x_block, n, I_,", "x_block, n, I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_,", "I_*x_block.shape[0]+i min1 = min(K-2, n-3) min2 = min(J-1, n-4) if", "for k in range(x_block.shape[0]-1, -1, -1): K = K_*x_block.shape[0]+k for", "n, I_, J_, K_): # For testing purposes, rather than", "k in range(x_block.shape[0]-1, -1, -1): K = K_*x_block.shape[0]+k for j", "\"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" import numpy as", "= \"1.0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ =", "= \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" import numpy", "j in range(x_block.shape[0]-1, -1, -1): J = J_*x_block.shape[0]+j for i", "this function def f_matrix(i, j): return float(i+j) for k in", "Tealab@SBU\" __license__ = \"MIT\" __version__ = \"1.0.0\" __maintainer__ = \"<NAME>\"", "K_): # For testing purposes, rather than passing f_matrix_broadcast, we", "__status__ = \"Development\" import numpy as np import numba as", "if ((K < n) and (K >= 3) and (J", "= min(J-1, n-4) if ((K < n) and (K >=", "Iterative kernels ''' def update_iter(u_block, x_block, n, I_, J_, K_):", "min1) and (J >= I+1) and (I <= min2)): x_block[i,", "min1 = min(K-2, n-3) min2 = min(J-1, n-4) if ((K", "np.ascontiguousarray(x_block), n, I_, J_, K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block, n,", "J_, K_): # For testing purposes, rather than passing f_matrix_broadcast,", "= \"Development\" import numpy as np import numba as nb", "+ f_matrix(J+1, min(K, 2*J-I+1))) return x_block def funcA_iter(block_info, n): ((I_,", "purposes, rather than passing f_matrix_broadcast, we call this function def", "return x_block def funcA_iter(block_info, n): ((I_, J_), x_block) = block_info", "range(x_block.shape[0]-1, -1, -1): K = K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1,", "and (I <= min2)): x_block[i, j] = max(x_block[i, j], u_block[j+1,", "<= min2)): x_block[i, j] = max(x_block[i, j], u_block[j+1, k] +", "< n) and (K >= 3) and (J <= min1)", "= \"<NAME>, <NAME>\" __copyright__ = \"Copyright (c) 2019 Tealab@SBU\" __license__", "J_, I_) def funcX_iter(block_info, u_block_info, n): ((I_, J_), x_block) =", "testing purposes, rather than passing f_matrix_broadcast, we call this function", "for j in range(x_block.shape[0]-1, -1, -1): J = J_*x_block.shape[0]+j for", "((I_, J_), x_block) = block_info ((UI_, UJ_), u_block) = u_block_info", "min(K-2, n-3) min2 = min(J-1, n-4) if ((K < n)", "J_), x_block) = block_info ((UI_, UJ_), u_block) = u_block_info return", "# For testing purposes, rather than passing f_matrix_broadcast, we call", "in range(x_block.shape[0]-1, -1, -1): J = J_*x_block.shape[0]+j for i in", "= block_info ((UI_, UJ_), u_block) = u_block_info return update_iter(u_block, x_block,", "def f_matrix(i, j): return float(i+j) for k in range(x_block.shape[0]-1, -1,", ">= I+1) and (I <= min2)): x_block[i, j] = max(x_block[i,", "call this function def f_matrix(i, j): return float(i+j) for k", "rather than passing f_matrix_broadcast, we call this function def f_matrix(i,", "__maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" import", "float(i+j) for k in range(x_block.shape[0]-1, -1, -1): K = K_*x_block.shape[0]+k" ]
[ "\"simulation\", \"offer_asset\", \"return_amount\") query_msg = { desc: { action: {", "= { desc: { action: { \"amount\": str(amount), \"info\": {\"token\":", "\"contract_addr\": token_contract } } } } } try: result =", "inverse price. \"\"\" desc, action, result_key = (\"reverse_simulation\", \"ask_asset\", \"offer_amount\")", "\"\"\" Returns the price for `amount` of the token `pair`", "rates. \"\"\" def __init__(self, client: ClientContainer): self.client = client def", "recent rates. \"\"\" def __init__(self, client: ClientContainer): self.client = client", "is included in pair). Set `reverse` to true to get", "} } } } try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return", "str(amount), \"info\": {\"token\": { \"contract_addr\": token_contract } } } }", "{ \"amount\": str(amount), \"info\": {\"token\": { \"contract_addr\": token_contract } }", "str, amount: int = 1000000, reverse: bool = False): \"\"\"", "amount: int = 1000000, reverse: bool = False): \"\"\" Returns", "terrakg import logger # Logging from terrakg.client import ClientContainer logger", "{ desc: { action: { \"amount\": str(amount), \"info\": {\"token\": {", "logger # Logging from terrakg.client import ClientContainer logger = logger.get_logger(__name__)", "Returns the price for `amount` of the token `pair` (exchange", "= False): \"\"\" Returns the price for `amount` of the", "`reverse` to true to get the inverse price. \"\"\" desc,", "__init__(self, client: ClientContainer): self.client = client def get_token_quote_and_fees(self, token_contract: str,", "class Rates: \"\"\" Access the most recent rates. \"\"\" def", "for `amount` of the token `pair` (exchange is included in", "import logger # Logging from terrakg.client import ClientContainer logger =", "most recent rates. \"\"\" def __init__(self, client: ClientContainer): self.client =", "def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int = 1000000,", "result_key = (\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if reverse else ( \"simulation\",", "terrakg.client import ClientContainer logger = logger.get_logger(__name__) class Rates: \"\"\" Access", "import LCDResponseError from terrakg import logger # Logging from terrakg.client", "from terra_sdk.exceptions import LCDResponseError from terrakg import logger # Logging", "query_msg = { desc: { action: { \"amount\": str(amount), \"info\":", "} } } try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key],", "in pair). Set `reverse` to true to get the inverse", "{\"token\": { \"contract_addr\": token_contract } } } } } try:", "logger = logger.get_logger(__name__) class Rates: \"\"\" Access the most recent", "= logger.get_logger(__name__) class Rates: \"\"\" Access the most recent rates.", "def __init__(self, client: ClientContainer): self.client = client def get_token_quote_and_fees(self, token_contract:", "to get the inverse price. \"\"\" desc, action, result_key =", "the most recent rates. \"\"\" def __init__(self, client: ClientContainer): self.client", "the token `pair` (exchange is included in pair). Set `reverse`", "token `pair` (exchange is included in pair). Set `reverse` to", "included in pair). Set `reverse` to true to get the", "try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount'] except LCDResponseError", "\"\"\" desc, action, result_key = (\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if reverse", "{ \"contract_addr\": token_contract } } } } } try: result", "logger.get_logger(__name__) class Rates: \"\"\" Access the most recent rates. \"\"\"", "reverse: bool = False): \"\"\" Returns the price for `amount`", "bool = False): \"\"\" Returns the price for `amount` of", "desc: { action: { \"amount\": str(amount), \"info\": {\"token\": { \"contract_addr\":", "except LCDResponseError as e: logger.warning(f\"Issue with price query: {e}\") return", "} try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount'] except", "token_contract: str, pair: str, amount: int = 1000000, reverse: bool", "self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount'] except LCDResponseError as e: logger.warning(f\"Issue", "(exchange is included in pair). Set `reverse` to true to", "of the token `pair` (exchange is included in pair). Set", "str, pair: str, amount: int = 1000000, reverse: bool =", "get the inverse price. \"\"\" desc, action, result_key = (\"reverse_simulation\",", "\"amount\": str(amount), \"info\": {\"token\": { \"contract_addr\": token_contract } } }", "to true to get the inverse price. \"\"\" desc, action,", "} } try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount']", "result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount'] except LCDResponseError as", "client def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int =", "pair: str, amount: int = 1000000, reverse: bool = False):", "= client def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int", "`amount` of the token `pair` (exchange is included in pair).", "action: { \"amount\": str(amount), \"info\": {\"token\": { \"contract_addr\": token_contract }", "{ action: { \"amount\": str(amount), \"info\": {\"token\": { \"contract_addr\": token_contract", "\"return_amount\") query_msg = { desc: { action: { \"amount\": str(amount),", "LCDResponseError from terrakg import logger # Logging from terrakg.client import", "from terrakg.client import ClientContainer logger = logger.get_logger(__name__) class Rates: \"\"\"", "pair). Set `reverse` to true to get the inverse price.", "= 1000000, reverse: bool = False): \"\"\" Returns the price", "self.client = client def get_token_quote_and_fees(self, token_contract: str, pair: str, amount:", "desc, action, result_key = (\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if reverse else", "\"\"\" Access the most recent rates. \"\"\" def __init__(self, client:", "ClientContainer): self.client = client def get_token_quote_and_fees(self, token_contract: str, pair: str,", "Rates: \"\"\" Access the most recent rates. \"\"\" def __init__(self,", "Set `reverse` to true to get the inverse price. \"\"\"", "1000000, reverse: bool = False): \"\"\" Returns the price for", "action, result_key = (\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if reverse else (", "False): \"\"\" Returns the price for `amount` of the token", "LCDResponseError as e: logger.warning(f\"Issue with price query: {e}\") return None", "else ( \"simulation\", \"offer_asset\", \"return_amount\") query_msg = { desc: {", "# Logging from terrakg.client import ClientContainer logger = logger.get_logger(__name__) class", "\"ask_asset\", \"offer_amount\") if reverse else ( \"simulation\", \"offer_asset\", \"return_amount\") query_msg", "(\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if reverse else ( \"simulation\", \"offer_asset\", \"return_amount\")", "} } } } } try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg)", "int = 1000000, reverse: bool = False): \"\"\" Returns the", "price for `amount` of the token `pair` (exchange is included", "token_contract } } } } } try: result = self.client.lcd_client.wasm.contract_query(pair,", "client: ClientContainer): self.client = client def get_token_quote_and_fees(self, token_contract: str, pair:", "get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int = 1000000, reverse:", "\"offer_amount\") if reverse else ( \"simulation\", \"offer_asset\", \"return_amount\") query_msg =", "price. \"\"\" desc, action, result_key = (\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if", "return result[result_key], result['commission_amount'] except LCDResponseError as e: logger.warning(f\"Issue with price", "the price for `amount` of the token `pair` (exchange is", "( \"simulation\", \"offer_asset\", \"return_amount\") query_msg = { desc: { action:", "Logging from terrakg.client import ClientContainer logger = logger.get_logger(__name__) class Rates:", "true to get the inverse price. \"\"\" desc, action, result_key", "result['commission_amount'] except LCDResponseError as e: logger.warning(f\"Issue with price query: {e}\")", "= (\"reverse_simulation\", \"ask_asset\", \"offer_amount\") if reverse else ( \"simulation\", \"offer_asset\",", "from terrakg import logger # Logging from terrakg.client import ClientContainer", "ClientContainer logger = logger.get_logger(__name__) class Rates: \"\"\" Access the most", "result[result_key], result['commission_amount'] except LCDResponseError as e: logger.warning(f\"Issue with price query:", "import ClientContainer logger = logger.get_logger(__name__) class Rates: \"\"\" Access the", "terra_sdk.exceptions import LCDResponseError from terrakg import logger # Logging from", "\"info\": {\"token\": { \"contract_addr\": token_contract } } } } }", "query_msg) return result[result_key], result['commission_amount'] except LCDResponseError as e: logger.warning(f\"Issue with", "if reverse else ( \"simulation\", \"offer_asset\", \"return_amount\") query_msg = {", "`pair` (exchange is included in pair). Set `reverse` to true", "\"\"\" def __init__(self, client: ClientContainer): self.client = client def get_token_quote_and_fees(self,", "the inverse price. \"\"\" desc, action, result_key = (\"reverse_simulation\", \"ask_asset\",", "\"offer_asset\", \"return_amount\") query_msg = { desc: { action: { \"amount\":", "reverse else ( \"simulation\", \"offer_asset\", \"return_amount\") query_msg = { desc:", "= self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount'] except LCDResponseError as e:", "Access the most recent rates. \"\"\" def __init__(self, client: ClientContainer):" ]
[ "clip_name = os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path)", "FOUND\") file.close() #---------------------------------------- # PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty", "context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if tracker is not None: prev", "self.layout scene = context.scene box = layout.box() box.row().label(text = \"Tracking", "= os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file", "= \"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator):", ") tracking_multiplier: bpy.props.FloatProperty \\ ( name = \"Distance multiplier\", description", "the chosen tracker\" def execute(self, context): export_tracking_data(self, context) return {\"FINISHED\"}", "prev = tracker.markers[0].co[0] for m in tracker.markers: writer.writerow([(m.co[0] - prev)", "self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close() #---------------------------------------- # PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup):", "bpy import os, glob from pathlib import Path from enum", "\"Name of the tracker for data export\", ) tracking_multiplier: bpy.props.FloatProperty", "= 1, min = 0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label =", "SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close() #---------------------------------------- # PROPERTIES", "bl_label = \"Export Data\" bl_description = \"Export the tracking data", "import csv from . import keying_module def export_tracking_data(self, context): clip", "tracker.markers: writer.writerow([(m.co[0] - prev) * multiplier]) prev = m.co[0] self.report({\"INFO\"},\"TRACKER", "tracker.markers[0].co[0] for m in tracker.markers: writer.writerow([(m.co[0] - prev) * multiplier])", "= \"Distance multiplier\", description = \"The exported tracking distance gets", "name = \"Distance multiplier\", description = \"The exported tracking distance", "Data\" bl_description = \"Export the tracking data of the chosen", "( name = \"Track name\", description = \"Name of the", "TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def register(): for cls in classes:", "export\", ) tracking_multiplier: bpy.props.FloatProperty \\ ( name = \"Distance multiplier\",", "= \"The exported tracking distance gets multiplied by this value\",", "tracking distance gets multiplied by this value\", default = 1,", "description = \"Name of the tracker for data export\", )", "delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if tracker is", "writer = csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name)", "= \"Name of the tracker for data export\", ) tracking_multiplier:", "keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer = csv.writer(file, delimiter=',')", "\"Tracking Panel\" bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\" bl_region_type =", "Panel\" bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\" bl_region_type = \"UI\"", "= ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def register(): for cls", "clip = context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path", "by this value\", default = 1, min = 0.0001 )", "abstractmethod import csv from . import keying_module def export_tracking_data(self, context):", "distance gets multiplied by this value\", default = 1, min", "os, glob from pathlib import Path from enum import Enum", "class TrackingPanel(bpy.types.Panel): bl_label = \"Tracking Panel\" bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type", "= \"Export the tracking data of the chosen tracker\" def", "export_tracking_data(self, context) return {\"FINISHED\"} classes = ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps", "= context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path =", "chosen tracker\" def execute(self, context): export_tracking_data(self, context) return {\"FINISHED\"} classes", "0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label = \"Tracking Panel\" bl_idname =", "tracker_name: bpy.props.StringProperty \\ ( name = \"Track name\", description =", "\"CLIP_EDITOR\" bl_region_type = \"UI\" bl_context = \"render\" def draw(self, context):", "this value\", default = 1, min = 0.0001 ) class", "file.close() #---------------------------------------- # PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\", "PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\ ( name =", "Path from enum import Enum from abc import ABC, abstractmethod", "the tracker for data export\", ) tracking_multiplier: bpy.props.FloatProperty \\ (", "scene = context.scene box = layout.box() box.row().label(text = \"Tracking export\")", "import bpy import os, glob from pathlib import Path from", "{\"FINISHED\"} classes = ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def register():", "m in tracker.markers: writer.writerow([(m.co[0] - prev) * multiplier]) prev =", "box.row().label(text = \"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class", "= bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls in reversed(classes): bpy.utils.unregister_class(cls) del", "\"The exported tracking distance gets multiplied by this value\", default", "\"render\" def draw(self, context): layout = self.layout scene = context.scene", "TrackingPanel, TrackingSceneProps ) def register(): for cls in classes: bpy.utils.register_class(cls)", "import Path from enum import Enum from abc import ABC,", "clip.tracking.tracks.get(tracker_name) if tracker is not None: prev = tracker.markers[0].co[0] for", "box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\" bl_label = \"Export Data\"", "= m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close()", "abc import ABC, abstractmethod import csv from . import keying_module", "pathlib import Path from enum import Enum from abc import", "import Enum from abc import ABC, abstractmethod import csv from", "\"Distance multiplier\", description = \"The exported tracking distance gets multiplied", ") class TrackingPanel(bpy.types.Panel): bl_label = \"Tracking Panel\" bl_idname = \"SCENE_PT_tracking_rendering\"", "classes = ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def register(): for", "\"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname", "bpy.props.FloatProperty \\ ( name = \"Distance multiplier\", description = \"The", "= context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if tracker is not None:", "( name = \"Distance multiplier\", description = \"The exported tracking", "for data export\", ) tracking_multiplier: bpy.props.FloatProperty \\ ( name =", "os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file =", "= \"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\" bl_region_type = \"UI\" bl_context =", "tracker is not None: prev = tracker.markers[0].co[0] for m in", "import ABC, abstractmethod import csv from . import keying_module def", "\"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\" bl_region_type = \"UI\" bl_context = \"render\"", "os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer = csv.writer(file,", "context.scene box = layout.box() box.row().label(text = \"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\")", "keying_module def export_tracking_data(self, context): clip = context.space_data.clip clip_name = os.path.splitext(clip.name)[0]", "return {\"FINISHED\"} classes = ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def", "= os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer =", "from abc import ABC, abstractmethod import csv from . import", "box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\" bl_label =", "class TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\" bl_label = \"Export Data\" bl_description", "def register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps)", "NOT FOUND\") file.close() #---------------------------------------- # PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name:", "name = \"Track name\", description = \"Name of the tracker", "multiplier]) prev = m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT", "of the tracker for data export\", ) tracking_multiplier: bpy.props.FloatProperty \\", "in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls", "if tracker is not None: prev = tracker.markers[0].co[0] for m", "box = layout.box() box.row().label(text = \"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local,", "\\ ( name = \"Track name\", description = \"Name of", "else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close() #---------------------------------------- # PROPERTIES #---------------------------------------- class", "- prev) * multiplier]) prev = m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\")", "= \"tracking.export_data\" bl_label = \"Export Data\" bl_description = \"Export the", "description = \"The exported tracking distance gets multiplied by this", "layout = self.layout scene = context.scene box = layout.box() box.row().label(text", "#---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\ ( name = \"Track", "default = 1, min = 0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label", "TrackingSceneProps ) def register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local", "def draw(self, context): layout = self.layout scene = context.scene box", "import os, glob from pathlib import Path from enum import", "context): export_tracking_data(self, context) return {\"FINISHED\"} classes = ( TrackingExportDataOp, TrackingPanel,", "\"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\" bl_label", "= 0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label = \"Tracking Panel\" bl_idname", "def export_tracking_data(self, context): clip = context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name", "self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close() #---------------------------------------- #", "= tracker.markers[0].co[0] for m in tracker.markers: writer.writerow([(m.co[0] - prev) *", "from pathlib import Path from enum import Enum from abc", "csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if tracker", "for m in tracker.markers: writer.writerow([(m.co[0] - prev) * multiplier]) prev", "execute(self, context): export_tracking_data(self, context) return {\"FINISHED\"} classes = ( TrackingExportDataOp,", "\"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\" bl_label = \"Export", "tracker\" def execute(self, context): export_tracking_data(self, context) return {\"FINISHED\"} classes =", "\\ ( name = \"Distance multiplier\", description = \"The exported", "= \"render\" def draw(self, context): layout = self.layout scene =", "m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close() #----------------------------------------", "TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\ ( name = \"Track name\", description", "= \"Track name\", description = \"Name of the tracker for", "#---------------------------------------- # PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\ (", "= context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\",", "\"Track name\", description = \"Name of the tracker for data", "output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer", "draw(self, context): layout = self.layout scene = context.scene box =", ") def register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local =", "prev = m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\")", ". import keying_module def export_tracking_data(self, context): clip = context.space_data.clip clip_name", "def execute(self, context): export_tracking_data(self, context) return {\"FINISHED\"} classes = (", "box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\"", "# PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\ ( name", "min = 0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label = \"Tracking Panel\"", "the tracking data of the chosen tracker\" def execute(self, context):", "tracker_name = context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"),", "from enum import Enum from abc import ABC, abstractmethod import", "gets multiplied by this value\", default = 1, min =", "EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER NOT FOUND\") file.close() #---------------------------------------- # PROPERTIES #----------------------------------------", "bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls in reversed(classes): bpy.utils.unregister_class(cls)", "\"UI\" bl_context = \"render\" def draw(self, context): layout = self.layout", "= \"Tracking Panel\" bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\" bl_region_type", "export_tracking_data(self, context): clip = context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name =", "class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \\ ( name = \"Track name\",", "= \"CLIP_EDITOR\" bl_region_type = \"UI\" bl_context = \"render\" def draw(self,", "cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for", "multiplier = context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if tracker is not", "= self.layout scene = context.scene box = layout.box() box.row().label(text =", "data export\", ) tracking_multiplier: bpy.props.FloatProperty \\ ( name = \"Distance", "open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer = csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier", "tracking_multiplier: bpy.props.FloatProperty \\ ( name = \"Distance multiplier\", description =", "bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls in reversed(classes):", "prev) * multiplier]) prev = m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else:", "1, min = 0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label = \"Tracking", "bl_idname = \"tracking.export_data\" bl_label = \"Export Data\" bl_description = \"Export", "multiplied by this value\", default = 1, min = 0.0001", "multiplier\", description = \"The exported tracking distance gets multiplied by", "bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls in reversed(classes): bpy.utils.unregister_class(cls) del bpy.types.Scene.tracking_local", "bl_region_type = \"UI\" bl_context = \"render\" def draw(self, context): layout", "classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls in", "TrackingExportDataOp(bpy.types.Operator): bl_idname = \"tracking.export_data\" bl_label = \"Export Data\" bl_description =", "exported tracking distance gets multiplied by this value\", default =", "bpy.props.StringProperty \\ ( name = \"Track name\", description = \"Name", "\"Export Data\" bl_description = \"Export the tracking data of the", "of the chosen tracker\" def execute(self, context): export_tracking_data(self, context) return", "import keying_module def export_tracking_data(self, context): clip = context.space_data.clip clip_name =", "context): clip = context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name", "name\", description = \"Name of the tracker for data export\",", "from . import keying_module def export_tracking_data(self, context): clip = context.space_data.clip", "context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='')", "= clip.tracking.tracks.get(tracker_name) if tracker is not None: prev = tracker.markers[0].co[0]", "= context.scene box = layout.box() box.row().label(text = \"Tracking export\") box.row().prop(scene.tracking_local,", "context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name)", "csv from . import keying_module def export_tracking_data(self, context): clip =", "tracker for data export\", ) tracking_multiplier: bpy.props.FloatProperty \\ ( name", "= layout.box() box.row().label(text = \"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\")", "register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def", "export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\") class TrackingExportDataOp(bpy.types.Operator): bl_idname =", "bl_label = \"Tracking Panel\" bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\"", "layout.box() box.row().label(text = \"Tracking export\") box.row().prop(scene.tracking_local, \"tracker_name\") box.row().prop(scene.tracking_local, \"tracking_multiplier\") box.row().operator(\"tracking.export_data\")", "writer.writerow([(m.co[0] - prev) * multiplier]) prev = m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY", "= \"Export Data\" bl_description = \"Export the tracking data of", "bl_description = \"Export the tracking data of the chosen tracker\"", "in tracker.markers: writer.writerow([(m.co[0] - prev) * multiplier]) prev = m.co[0]", "context): layout = self.layout scene = context.scene box = layout.box()", "newline='') writer = csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker =", "data of the chosen tracker\" def execute(self, context): export_tracking_data(self, context)", "\"tracking.export_data\" bl_label = \"Export Data\" bl_description = \"Export the tracking", "None: prev = tracker.markers[0].co[0] for m in tracker.markers: writer.writerow([(m.co[0] -", "value\", default = 1, min = 0.0001 ) class TrackingPanel(bpy.types.Panel):", "enum import Enum from abc import ABC, abstractmethod import csv", "= \"UI\" bl_context = \"render\" def draw(self, context): layout =", "file = open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer = csv.writer(file, delimiter=',') multiplier", "glob from pathlib import Path from enum import Enum from", "tracker = clip.tracking.tracks.get(tracker_name) if tracker is not None: prev =", "bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type = \"CLIP_EDITOR\" bl_region_type = \"UI\" bl_context", "\"Export the tracking data of the chosen tracker\" def execute(self,", "= csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if", "not None: prev = tracker.markers[0].co[0] for m in tracker.markers: writer.writerow([(m.co[0]", "TrackingPanel(bpy.types.Panel): bl_label = \"Tracking Panel\" bl_idname = \"SCENE_PT_tracking_rendering\" bl_space_type =", "( TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def register(): for cls in", "bl_space_type = \"CLIP_EDITOR\" bl_region_type = \"UI\" bl_context = \"render\" def", "context) return {\"FINISHED\"} classes = ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps )", "* multiplier]) prev = m.co[0] self.report({\"INFO\"},\"TRACKER SUCESSFULLY EXPORTED\") else: self.report({\"ERROR\"},\"TRACKER", "Enum from abc import ABC, abstractmethod import csv from .", "is not None: prev = tracker.markers[0].co[0] for m in tracker.markers:", "ABC, abstractmethod import csv from . import keying_module def export_tracking_data(self,", "= open(os.path.join(output_path,clip_name+\".csv\"), \"w\", newline='') writer = csv.writer(file, delimiter=',') multiplier =", "bl_context = \"render\" def draw(self, context): layout = self.layout scene", "\"w\", newline='') writer = csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker", "tracking data of the chosen tracker\" def execute(self, context): export_tracking_data(self,", "for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister():" ]
[ "= UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x", "from Autodesk.Revit.DB import * def GetViewTemplate(view): if not view: return", "hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue == -1: return None else: return", "import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * def GetViewTemplate(view): if", "list): OUT = [GetViewTemplate(x) for x in views] else: OUT", "views = UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for", "clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * def GetViewTemplate(view): if not view:", "not view: return None elif hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue ==", "None views = UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT = [GetViewTemplate(x)", "if not view: return None elif hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue", "if view.ViewTemplateId.IntegerValue == -1: return None else: return view.Document.GetElement(view.ViewTemplateId) else:", "elif hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue == -1: return None else:", "GetViewTemplate(view): if not view: return None elif hasattr(view, \"ViewTemplateId\"): if", "None elif hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue == -1: return None", "def GetViewTemplate(view): if not view: return None elif hasattr(view, \"ViewTemplateId\"):", "return view.Document.GetElement(view.ViewTemplateId) else: return None views = UnwrapElement(IN[0]) if isinstance(IN[0],", "view: return None elif hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue == -1:", "else: return None views = UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT", "else: return view.Document.GetElement(view.ViewTemplateId) else: return None views = UnwrapElement(IN[0]) if", "import * def GetViewTemplate(view): if not view: return None elif", "= [GetViewTemplate(x) for x in views] else: OUT = GetViewTemplate(views)", "-1: return None else: return view.Document.GetElement(view.ViewTemplateId) else: return None views", "isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views] else:", "* def GetViewTemplate(view): if not view: return None elif hasattr(view,", "view.ViewTemplateId.IntegerValue == -1: return None else: return view.Document.GetElement(view.ViewTemplateId) else: return", "return None else: return view.Document.GetElement(view.ViewTemplateId) else: return None views =", "return None elif hasattr(view, \"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue == -1: return", "== -1: return None else: return view.Document.GetElement(view.ViewTemplateId) else: return None", "clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * def GetViewTemplate(view): if not", "Autodesk.Revit.DB import * def GetViewTemplate(view): if not view: return None", "return None views = UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT =", "None else: return view.Document.GetElement(view.ViewTemplateId) else: return None views = UnwrapElement(IN[0])", "UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in", "view.Document.GetElement(view.ViewTemplateId) else: return None views = UnwrapElement(IN[0]) if isinstance(IN[0], list):", "OUT = [GetViewTemplate(x) for x in views] else: OUT =", "if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views]", "\"ViewTemplateId\"): if view.ViewTemplateId.IntegerValue == -1: return None else: return view.Document.GetElement(view.ViewTemplateId)" ]
[ "OF ANY # KIND, either express or implied. See the", "from dlab.meta_lib import * from dlab.actions_lib import * import sys", "more contributor license agreements. See the NOTICE file # distributed", "'{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]')", "service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type']", "params)) except: traceback.print_exc() raise Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))", "\"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err: print('Error: {0}'.format(err)) append_result(\"Failed", "Apache Software Foundation (ASF) under one # or more contributor", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "and limitations # under the License. # # ****************************************************************************** import", "os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] =", "following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))", "= os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] =", "2.0 (the # \"License\"); you may not use this file", "limitations # under the License. # # ****************************************************************************** import json", "# under the License. # # ****************************************************************************** import json import", "level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])):", "edge_status != 'RUNNING': logging.info('ERROR: Edge node is unavailable! Aborting...') print('ERROR:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "specific language governing permissions and limitations # under the License.", "'{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] =", "'-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] =", "under the License is distributed on an # \"AS IS\"", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "# ***************************************************************************** # # Licensed to the Apache Software Foundation", "if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig']", "= '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'],", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion']", "import time from fabric.api import * from dlab.fab import *", "params = \"--region {0} --bucket {1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster))", "os import uuid import logging from Crypto.PublicKey import RSA if", "# ****************************************************************************** import json import time from fabric.api import *", "= dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc Cluster]') params", "distributed with this work for additional information # regarding copyright", "dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email", "(os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region']", "os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status =", "dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = {", "json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels']", "int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir']", "= (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])", "sys.exit(1) print(\"Will create exploratory environment with edge node as access", "for the # specific language governing permissions and limitations #", "<EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status", "= (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name']", "'{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] =", "See the License for the # specific language governing permissions", "to in writing, # software distributed under the License is", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email", "file # distributed with this work for additional information #", "logging.info('ERROR: Edge node is unavailable! Aborting...') print('ERROR: Edge node is", "sleep 10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] =", "err: print('Error: {0}'.format(err)) append_result(\"Failed to create Dataproc Cluster.\", str(err)) local('rm", "Edge node is unavailable! Aborting...') print('ERROR: Edge node is unavailable!", "* from dlab.fab import * from dlab.meta_lib import * from", "dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read()", "= \"/logs/\" + os.environ['conf_resource'] + \"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s]", "time.sleep(30) print('Generating infrastructure names and tags') dataproc_conf = dict() try:", "dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: \"not-configured\", \"name\":", "from Crypto.PublicKey import RSA if __name__ == \"__main__\": local_log_filename =", "dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating", "dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] =", "implied. See the License for the # specific language governing", "')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for changes to propagate;", "to you under the Apache License, Version 2.0 (the #", "local_log_filepath = \"/logs/\" + os.environ['conf_resource'] + \"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s", "os.environ['exploratory_name'] except: os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure", "= GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge", "is unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable',", "0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label']", "may not use this file except in compliance # with", "dlab.meta_lib import * from dlab.actions_lib import * import sys import", "{ os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\":", "License, Version 2.0 (the # \"License\"); you may not use", "either express or implied. See the License for the #", "\"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name']", "except Exception as err: print('Error: {0}'.format(err)) append_result(\"Failed to create Dataproc", "= key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag']", "additional information # regarding copyright ownership. The ASF licenses this", "dlab.actions_lib import * import sys import os import uuid import", "\"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = \"/logs/\" +", "dlab.fab import * from dlab.meta_lib import * from dlab.actions_lib import", "See the NOTICE file # distributed with this work for", "except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] =", "key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'],", "as access point as following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ':", "GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING': logging.info('ERROR: Edge node is unavailable!", "import json import time from fabric.api import * from dlab.fab", "is unavailable! Aborting...') print('ERROR: Edge node is unavailable! Aborting...') ssn_hostname", "= dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read() key", "dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])", "Apache License, Version 2.0 (the # \"License\"); you may not", "except: os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names", "\"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name']", "'{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels']", "dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count'])", "file except in compliance # with the License. You may", "{}\".format('dataengine-service_create', params)) except: traceback.print_exc() raise Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm", "# specific language governing permissions and limitations # under the", "dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label']", "= json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] =", "RSA if __name__ == \"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id'])", "you may not use this file except in compliance #", "* from dlab.actions_lib import * import sys import os import", "os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node is unavailable\") sys.exit(1) print(\"Will create exploratory", "= \"--region {0} --bucket {1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try:", "os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'],", "logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for changes to propagate; sleep", "\"--region {0} --bucket {1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py", "use this file except in compliance # with the License.", "\"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\",", "dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except: traceback.print_exc() raise Exception", "contributor license agreements. See the NOTICE file # distributed with", "with edge node as access point as following: \".format(json.dumps(dataproc_conf, sort_keys=True,", "dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])", "'' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name']", "dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri']", "os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) !=", "dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read() key =", "except: traceback.print_exc() raise Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri']", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc", "time from fabric.api import * from dlab.fab import * from", "with this work for additional information # regarding copyright ownership.", "dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name']", "keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err: print('Error:", "os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names and", "== \"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = \"/logs/\"", "work for additional information # regarding copyright ownership. The ASF", "dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_',", "[%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] = ''", "distributed under the License is distributed on an # \"AS", "edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING': logging.info('ERROR: Edge node", "dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances']", "dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user']", "# software distributed under the License is distributed on an", "dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])", "sys import os import uuid import logging from Crypto.PublicKey import", "dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet']", "the License. You may obtain a copy of the License", "dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey", "names and tags') dataproc_conf = dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_',", "= dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] =", "dataproc_conf = dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name']", "under the Apache License, Version 2.0 (the # \"License\"); you", "= '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names and tags')", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "regarding copyright ownership. The ASF licenses this file # to", "unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'],", "or agreed to in writing, # software distributed under the", "= GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING': logging.info('ERROR: Edge node is", "json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except: traceback.print_exc() raise Exception keyfile_name", "print('Error: {0}'.format(err)) append_result(\"Failed to create Dataproc Cluster.\", str(err)) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))", "'-') except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name']", "as err: print('Error: {0}'.format(err)) append_result(\"Failed to create Dataproc Cluster.\", str(err))", "= (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name'] =", "to propagate; sleep 10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id']", "= dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] =", "= int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else:", "= os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\":", "= service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] =", "if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names and tags') dataproc_conf =", "or more contributor license agreements. See the NOTICE file #", "= open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read())", "= '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name']", "this work for additional information # regarding copyright ownership. The", "Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err:", "the NOTICE file # distributed with this work for additional", "License. # # ****************************************************************************** import json import time from fabric.api", "del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name']", "open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey", "= '' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] =", "create exploratory environment with edge node as access point as", "Dataproc Cluster]') print('[Creating Dataproc Cluster]') params = \"--region {0} --bucket", "{1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except:", "unavailable! Aborting...') print('ERROR: Edge node is unavailable! Aborting...') ssn_hostname =", "\"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'],", "changes to propagate; sleep 10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] =", "****************************************************************************** import json import time from fabric.api import * from", "= os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count'])", "= { os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'],", "= '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']:", "= dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] =", "__name__ == \"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath =", "local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err: print('Error: {0}'.format(err)) append_result(\"Failed to", "KIND, either express or implied. See the License for the", "print(\"Will create exploratory environment with edge node as access point", "int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] =", "'{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except: traceback.print_exc() raise", "\"dlab\", \"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email =", "Exception as err: print('Error: {0}'.format(err)) append_result(\"Failed to create Dataproc Cluster.\",", "dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone']", "local(\"echo Waiting for changes to propagate; sleep 10\") dataproc_cluster =", "Edge node is unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')", "\"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name']", "or implied. See the License for the # specific language", "express or implied. See the License for the # specific", "= (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_',", "dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count'])", "dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances']", "dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] =", "= os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'],", "dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'],", "!= 'RUNNING': logging.info('ERROR: Edge node is unavailable! Aborting...') print('ERROR: Edge", "= dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] =", "the # specific language governing permissions and limitations # under", "= int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey =", "* import sys import os import uuid import logging from", "= <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']", "= os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] =", "dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] =", "key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try:", "may obtain a copy of the License at # #", "(os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_',", "os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'],", "Cluster]') print('[Creating Dataproc Cluster]') params = \"--region {0} --bucket {1}", "The ASF licenses this file # to you under the", "import RSA if __name__ == \"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'],", "ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]') print('[Creating", "%(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] = '' if", "# Licensed to the Apache Software Foundation (ASF) under one", "exploratory environment with edge node as access point as following:", "Crypto.PublicKey import RSA if __name__ == \"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'],", "/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for changes to propagate; sleep 10\") dataproc_cluster", "node is unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge',", "10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name']", "dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if", "law or agreed to in writing, # software distributed under", "Foundation (ASF) under one # or more contributor license agreements.", "'' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] = ''", "node is unavailable! Aborting...') print('ERROR: Edge node is unavailable! Aborting...')", "dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name'])", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Software Foundation (ASF) under one # or more contributor license", "is unavailable\") sys.exit(1) print(\"Will create exploratory environment with edge node", "point as following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf))", "# regarding copyright ownership. The ASF licenses this file #", "'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node is unavailable\") sys.exit(1) print(\"Will", "dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc Cluster]')", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "License for the # specific language governing permissions and limitations", "edge node as access point as following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4,", "as following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch", "Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'],", "OR CONDITIONS OF ANY # KIND, either express or implied.", "fabric.api import * from dlab.fab import * from dlab.meta_lib import", "append_result(\"Edge node is unavailable\") sys.exit(1) print(\"Will create exploratory environment with", "import os import uuid import logging from Crypto.PublicKey import RSA", "Aborting...') print('ERROR: Edge node is unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name']", "unavailable\") sys.exit(1) print(\"Will create exploratory environment with edge node as", "= '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')", "except: dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except:", "this file # to you under the Apache License, Version", "{0} --bucket {1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create',", "copyright ownership. The ASF licenses this file # to you", "\"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = \"/logs/\" + os.environ['conf_resource'] + \"/\"", "dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] =", "in writing, # software distributed under the License is distributed", "try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name']", "try: logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc Cluster]') params = \"--region", "under the License. # # ****************************************************************************** import json import time", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] =", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "json import time from fabric.api import * from dlab.fab import", "dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket']", "'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0]", "# \"License\"); you may not use this file except in", "from fabric.api import * from dlab.fab import * from dlab.meta_lib", "separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for changes", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] =", "to the Apache Software Foundation (ASF) under one # or", "\"License\"); you may not use this file except in compliance", "dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'],", "= '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag']", "dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name'] }", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import sys import os import uuid import logging from Crypto.PublicKey", "'-') except: dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')", "node as access point as following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',',", "***************************************************************************** # # Licensed to the Apache Software Foundation (ASF)", "# distributed with this work for additional information # regarding", "writing, # software distributed under the License is distributed on", "tags') dataproc_conf = dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except:", "dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING':", "= '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc", "dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'],", "and tags') dataproc_conf = dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')", "'{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] =", "= '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet']", "CONDITIONS OF ANY # KIND, either express or implied. See", "'-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] =", "os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])", "for additional information # regarding copyright ownership. The ASF licenses", "indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for", "the Apache Software Foundation (ASF) under one # or more", "': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for changes to", "print('[Creating Dataproc Cluster]') params = \"--region {0} --bucket {1} --params", "# # Unless required by applicable law or agreed to", "Version 2.0 (the # \"License\"); you may not use this", "one # or more contributor license agreements. See the NOTICE", "(os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path']", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone']", "local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting for changes to propagate; sleep 10\")", "local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except: traceback.print_exc() raise Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name'])", "dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri']", "dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name']", "except in compliance # with the License. You may obtain", "logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc Cluster]') params = \"--region {0}", "int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del", "<filename>infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py #!/usr/bin/python # ***************************************************************************** # # Licensed to the Apache", "logging from Crypto.PublicKey import RSA if __name__ == \"__main__\": local_log_filename", "NOTICE file # distributed with this work for additional information", "dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc Cluster]') params =", "Waiting for changes to propagate; sleep 10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig'))", "this file except in compliance # with the License. You", "+ \"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try:", "os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names and tags') dataproc_conf = dict()", "dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type']", "try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except: traceback.print_exc() raise Exception keyfile_name =", "license agreements. See the NOTICE file # distributed with this", "= '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if", "os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING': logging.info('ERROR: Edge", "required by applicable law or agreed to in writing, #", "'RUNNING': logging.info('ERROR: Edge node is unavailable! Aborting...') print('ERROR: Edge node", "dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount']", "Dataproc Cluster]') params = \"--region {0} --bucket {1} --params '{2}'\".format(dataproc_conf['region'],", "os.environ['edge_user_name'] + '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\")", "put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node is unavailable\") sys.exit(1)", "dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\":", "os.environ['conf_resource'] + \"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath)", "filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30)", "os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'],", "the License for the # specific language governing permissions and", "dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels']", "raise Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as", "ANY # KIND, either express or implied. See the License", "the License is distributed on an # \"AS IS\" BASIS,", "} dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname']", "from dlab.fab import * from dlab.meta_lib import * from dlab.actions_lib", "= int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances']", "local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = \"/logs/\" + os.environ['conf_resource']", "# # Licensed to the Apache Software Foundation (ASF) under", "'-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node is unavailable\")", "{0}'.format(err)) append_result(\"Failed to create Dataproc Cluster.\", str(err)) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) sys.exit(1)", "= \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = \"/logs/\" + os.environ['conf_resource'] +", "ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] =", "governing permissions and limitations # under the License. # #", "dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\":", "\"notebook_name\": os.environ['notebook_instance_name'], \"product\": \"dlab\", \"computational_name\": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],", "not use this file except in compliance # with the", "\"not-configured\", \"name\": dataproc_conf['cluster_name'], \"sbn\": dataproc_conf['service_base_name'], \"user\": dataproc_conf['edge_user_name'], \"notebook_name\": os.environ['notebook_instance_name'], \"product\":", "import uuid import logging from Crypto.PublicKey import RSA if __name__", "try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating", "'' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names and tags') dataproc_conf", "dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] = ''", "Unless required by applicable law or agreed to in writing,", "'{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status", "(ASF) under one # or more contributor license agreements. See", "dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region']", "# or more contributor license agreements. See the NOTICE file", "agreed to in writing, # software distributed under the License", "= '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version']", "dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: \"not-configured\", \"name\": dataproc_conf['cluster_name'],", "from dlab.actions_lib import * import sys import os import uuid", "* from dlab.meta_lib import * from dlab.actions_lib import * import", "= dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] =", "= dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] =", "os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count'])", "(the # \"License\"); you may not use this file except", "if __name__ == \"__main__\": local_log_filename = \"{}_{}_{}.log\".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath", "\".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo", "!= 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] =", "ASF licenses this file # to you under the Apache", "os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'],", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] = '' try:", "+ '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node is", "= os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone']", "ownership. The ASF licenses this file # to you under", "os.environ['request_id']) local_log_filepath = \"/logs/\" + os.environ['conf_resource'] + \"/\" + local_log_filename", "= os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name']", "import logging from Crypto.PublicKey import RSA if __name__ == \"__main__\":", "'{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])", "# # ****************************************************************************** import json import time from fabric.api import", "ssn_hostname) append_result(\"Edge node is unavailable\") sys.exit(1) print(\"Will create exploratory environment", "(os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "#!/usr/bin/python # ***************************************************************************** # # Licensed to the Apache Software", "logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] =", "with the License. You may obtain a copy of the", "= RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey,", "import * from dlab.meta_lib import * from dlab.actions_lib import *", "applicable law or agreed to in writing, # software distributed", "print('Generating infrastructure names and tags') dataproc_conf = dict() try: dataproc_conf['exploratory_name']", "+ os.environ['edge_user_name'] + '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey =", "os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node is unavailable\") sys.exit(1) print(\"Will create", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "file # to you under the Apache License, Version 2.0", "\"/logs/\" + os.environ['conf_resource'] + \"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',", "import * import sys import os import uuid import logging", "# with the License. You may obtain a copy of", "os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name']", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "--params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params)) except: traceback.print_exc()", "language governing permissions and limitations # under the License. #", "permissions and limitations # under the License. # # ******************************************************************************", "'.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] =", "software distributed under the License is distributed on an #", "uuid import logging from Crypto.PublicKey import RSA if __name__ ==", "Licensed to the Apache Software Foundation (ASF) under one #", "= os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING': logging.info('ERROR:", "os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = \"/logs/\" + os.environ['conf_resource'] + \"/\" +", "ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)", "local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name']", "under one # or more contributor license agreements. See the", "if edge_status != 'RUNNING': logging.info('ERROR: Edge node is unavailable! Aborting...')", "the License. # # ****************************************************************************** import json import time from", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "--bucket {1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local(\"~/scripts/{}.py {}\".format('dataengine-service_create', params))", "Cluster]') params = \"--region {0} --bucket {1} --params '{2}'\".format(dataproc_conf['region'], dataproc_conf['bucket_name'],", "information # regarding copyright ownership. The ASF licenses this file", "for changes to propagate; sleep 10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId']", "the Apache License, Version 2.0 (the # \"License\"); you may", "GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result(\"Edge node", "infrastructure names and tags') dataproc_conf = dict() try: dataproc_conf['exploratory_name'] =", "dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] =", "ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'],", "else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] +", "access point as following: \".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))", "dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] =", "you under the Apache License, Version 2.0 (the # \"License\");", "# KIND, either express or implied. See the License for", "print('ERROR: Edge node is unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] +", "agreements. See the NOTICE file # distributed with this work", "licenses this file # to you under the Apache License,", "= \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err: print('Error: {0}'.format(err))", "+ os.environ['conf_resource'] + \"/\" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO,", "by applicable law or agreed to in writing, # software", "import * from dlab.fab import * from dlab.meta_lib import *", "# Unless required by applicable law or agreed to in", "dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri']", "RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)", "traceback.print_exc() raise Exception keyfile_name = \"/root/keys/{}.pem\".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception", "propagate; sleep 10\") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName']", "/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err: print('Error: {0}'.format(err)) append_result(\"Failed to create", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "'{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: \"not-configured\",", "dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0:", "License. You may obtain a copy of the License at", "You may obtain a copy of the License at #", "dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status !=", "dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname'])", "environment with edge node as access point as following: \".format(json.dumps(dataproc_conf,", "dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = <EMAIL>\".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id'])", "sort_keys=True, indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local(\"echo Waiting", "compliance # with the License. You may obtain a copy", "import * from dlab.actions_lib import * import sys import os", "+ local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except:", "+ '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey(\"OpenSSH\") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys']", "node is unavailable\") sys.exit(1) print(\"Will create exploratory environment with edge", "dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] +" ]
[ "print(direction, delta) if direction == 'forward': h += delta d", "2 \"\"\" def part1(lines): h = 0 d = 0", "if direction == 'forward': h += delta elif direction ==", "lines: direction, delta = line.split() delta = int(delta) print(direction, delta)", "elif direction == 'down': a += delta elif direction ==", "+= delta elif direction == 'up': d -= delta print(h*d)", "'up': a -= delta print(h*d) if __name__ == '__main__': part1(test.splitlines())", "test = \"\"\"forward 5 down 5 forward 8 up 3", "direction, delta = line.split() delta = int(delta) print(direction, delta) if", "d += (delta * a) elif direction == 'down': a", "forward 2 \"\"\" def part1(lines): h = 0 d =", "delta print(h*d) def part2(lines): h = 0 d = 0", "direction == 'forward': h += delta d += (delta *", "0 d = 0 a = 0 for line in", "== 'forward': h += delta elif direction == 'down': d", "= 0 a = 0 for line in lines: direction,", "-= delta print(h*d) if __name__ == '__main__': part1(test.splitlines()) part1(open('in02.txt').readlines()) part2(test.splitlines())", "if direction == 'forward': h += delta d += (delta", "print(h*d) def part2(lines): h = 0 d = 0 a", "= \"\"\"forward 5 down 5 forward 8 up 3 down", "== 'down': a += delta elif direction == 'up': a", "= line.split() delta = int(delta) print(direction, delta) if direction ==", "part1(lines): h = 0 d = 0 for line in", "'forward': h += delta elif direction == 'down': d +=", "int(delta) print(direction, delta) if direction == 'forward': h += delta", "5 down 5 forward 8 up 3 down 8 forward", "== 'up': a -= delta print(h*d) if __name__ == '__main__':", "delta d += (delta * a) elif direction == 'down':", "= 0 d = 0 for line in lines: direction,", "part2(lines): h = 0 d = 0 a = 0", "line.split() delta = int(delta) print(direction, delta) if direction == 'forward':", "delta elif direction == 'up': d -= delta print(h*d) def", "\"\"\" def part1(lines): h = 0 d = 0 for", "def part1(lines): h = 0 d = 0 for line", "delta elif direction == 'up': a -= delta print(h*d) if", "5 forward 8 up 3 down 8 forward 2 \"\"\"", "delta = int(delta) print(direction, delta) if direction == 'forward': h", "= line.split() delta = int(delta) if direction == 'forward': h", "direction == 'up': a -= delta print(h*d) if __name__ ==", "== 'up': d -= delta print(h*d) def part2(lines): h =", "elif direction == 'up': d -= delta print(h*d) def part2(lines):", "forward 8 up 3 down 8 forward 2 \"\"\" def", "direction == 'down': a += delta elif direction == 'up':", "'down': d += delta elif direction == 'up': d -=", "delta = line.split() delta = int(delta) if direction == 'forward':", "= int(delta) if direction == 'forward': h += delta elif", "delta = line.split() delta = int(delta) print(direction, delta) if direction", "in lines: direction, delta = line.split() delta = int(delta) print(direction,", "elif direction == 'up': a -= delta print(h*d) if __name__", "direction, delta = line.split() delta = int(delta) if direction ==", "d = 0 a = 0 for line in lines:", "a = 0 for line in lines: direction, delta =", "== 'forward': h += delta d += (delta * a)", "elif direction == 'down': d += delta elif direction ==", "+= delta elif direction == 'up': a -= delta print(h*d)", "delta) if direction == 'forward': h += delta d +=", "h = 0 d = 0 a = 0 for", "in lines: direction, delta = line.split() delta = int(delta) if", "+= delta d += (delta * a) elif direction ==", "0 a = 0 for line in lines: direction, delta", "up 3 down 8 forward 2 \"\"\" def part1(lines): h", "8 up 3 down 8 forward 2 \"\"\" def part1(lines):", "line.split() delta = int(delta) if direction == 'forward': h +=", "8 forward 2 \"\"\" def part1(lines): h = 0 d", "= 0 d = 0 a = 0 for line", "int(delta) if direction == 'forward': h += delta elif direction", "h += delta d += (delta * a) elif direction", "0 d = 0 for line in lines: direction, delta", "== 'down': d += delta elif direction == 'up': d", "a) elif direction == 'down': a += delta elif direction", "d += delta elif direction == 'up': d -= delta", "for line in lines: direction, delta = line.split() delta =", "line in lines: direction, delta = line.split() delta = int(delta)", "\"\"\"forward 5 down 5 forward 8 up 3 down 8", "def part2(lines): h = 0 d = 0 a =", "0 for line in lines: direction, delta = line.split() delta", "'forward': h += delta d += (delta * a) elif", "+= (delta * a) elif direction == 'down': a +=", "direction == 'forward': h += delta elif direction == 'down':", "+= delta elif direction == 'down': d += delta elif", "d -= delta print(h*d) def part2(lines): h = 0 d", "down 5 forward 8 up 3 down 8 forward 2", "h += delta elif direction == 'down': d += delta", "a += delta elif direction == 'up': a -= delta", "direction == 'down': d += delta elif direction == 'up':", "(delta * a) elif direction == 'down': a += delta", "lines: direction, delta = line.split() delta = int(delta) if direction", "3 down 8 forward 2 \"\"\" def part1(lines): h =", "'up': d -= delta print(h*d) def part2(lines): h = 0", "* a) elif direction == 'down': a += delta elif", "a -= delta print(h*d) if __name__ == '__main__': part1(test.splitlines()) part1(open('in02.txt').readlines())", "down 8 forward 2 \"\"\" def part1(lines): h = 0", "-= delta print(h*d) def part2(lines): h = 0 d =", "d = 0 for line in lines: direction, delta =", "h = 0 d = 0 for line in lines:", "delta print(h*d) if __name__ == '__main__': part1(test.splitlines()) part1(open('in02.txt').readlines()) part2(test.splitlines()) part2(open('in02.txt').readlines())", "delta = int(delta) if direction == 'forward': h += delta", "= 0 for line in lines: direction, delta = line.split()", "'down': a += delta elif direction == 'up': a -=", "= int(delta) print(direction, delta) if direction == 'forward': h +=", "direction == 'up': d -= delta print(h*d) def part2(lines): h", "delta elif direction == 'down': d += delta elif direction" ]
[ "[ migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name',", "dependencies = [ ] operations = [ migrations.CreateModel( name='Associations', fields=[", "operations = [ migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "migrations, models class Migration(migrations.Migration): initial = True dependencies = [", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)),", "initial = True dependencies = [ ] operations = [", "08:56 from django.db import migrations, models class Migration(migrations.Migration): initial =", "models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)), ('contacts', models.CharField(max_length=300)), ], ), ]", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "Generated by Django 3.1.3 on 2020-11-09 08:56 from django.db import", "name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge',", "('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)), ('contacts', models.CharField(max_length=300)), ], ),", "# Generated by Django 3.1.3 on 2020-11-09 08:56 from django.db", "] operations = [ migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)), ('contacts', models.CharField(max_length=300)), ],", "primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)), ('contacts',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)),", "Django 3.1.3 on 2020-11-09 08:56 from django.db import migrations, models", "migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)),", "= [ ] operations = [ migrations.CreateModel( name='Associations', fields=[ ('id',", "[ ] operations = [ migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True,", "= [ migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "from django.db import migrations, models class Migration(migrations.Migration): initial = True", "on 2020-11-09 08:56 from django.db import migrations, models class Migration(migrations.Migration):", "2020-11-09 08:56 from django.db import migrations, models class Migration(migrations.Migration): initial", "3.1.3 on 2020-11-09 08:56 from django.db import migrations, models class", "django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies", "by Django 3.1.3 on 2020-11-09 08:56 from django.db import migrations,", "True dependencies = [ ] operations = [ migrations.CreateModel( name='Associations',", "models class Migration(migrations.Migration): initial = True dependencies = [ ]", "serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)), ('contacts', models.CharField(max_length=300)),", "<reponame>ollc-code/django-back<gh_stars>0 # Generated by Django 3.1.3 on 2020-11-09 08:56 from", "= True dependencies = [ ] operations = [ migrations.CreateModel(", "Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "import migrations, models class Migration(migrations.Migration): initial = True dependencies =" ]
[ "== '2007-05-17 00:00:00+00:00' assert str(t1) == '2007-06-30 23:59:59+00:00' t0, t1", "PDS times modules.\"\"\" from datetime import datetime as dt from", "str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb", "assert str(t0) == '2007-05-17 00:00:00+00:00' assert str(t1) == '2007-06-30 23:59:59+00:00'", "raises(ValueError): _ = cassini_time('v123_1') with raises(ValueError): _ = cassini_time(123) def", "def test_dt_doy(): \"\"\"Test parsing DOY time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) ==", "\\ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): \"\"\"Test decimal year.\"\"\" assert dyear('2005-01-01')", "March 12, 2006') assert len(times) == 2 assert str(times[0]) ==", "'2011-10-01 00:02:04.244000+00:00' assert str(t1) == '2011-12-31 12:28:45.128000+00:00' t0, t1 =", "cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError): _ = cassini_time('v123_1') with raises(ValueError):", "to 2005-02-14T18:03') assert len(times) == 2 assert str(times[0]) == '2005-02-14", "t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0) ==", "== '2005-01-16 18:42:33+00:00' with raises(ValueError): _ = pds_time('No data available')", "raises(ValueError): _ = dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test date pattern.\"\"\" assert", "== '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045'))", "t0, t1 = pds_time('… May 17, 2007 through Jun 30,", "== approx(1487096932.068, abs=1e-3) times = utc2cassini('May 17, 2007 through Jun", "dt_date, dt_doy, dt_iso, dyear, pds_folder, pds_time, utc2cassini) from pytest import", "assert times[1] == approx(1561941262.879, abs=1e-3) def test_pds_folder(): \"\"\"Test convert PDS", "23:59:59+00:00' t0, t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0)", "pds_time, utc2cassini) from pytest import approx, raises def test_dt_iso(): \"\"\"Test", "through Jun 30, 2007') assert str(t0) == '2007-05-17 00:00:00+00:00' assert", "'2005-01-01 00:00:00' def test_utc2cassini(): \"\"\"Test UTC to Cassini time converter.\"\"\"", "str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00' with", "str(t1) == '2011-12-31 12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0)", "assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14", "2005')) == '2005-02-14 23:59:59+00:00' times = dt_date('from Feb 14, 2005", "18:42:33+00:00' with raises(ValueError): _ = pds_time('No data available') def test_cassini_time():", "pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55 2005-016T18:42:33' def", "str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def test_utc2cassini(): \"\"\"Test UTC to Cassini", "Cassini time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times =", "test_dt_doy(): \"\"\"Test parsing DOY time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14", "cassini_time, dt_date, dt_doy, dt_iso, dyear, pds_folder, pds_time, utc2cassini) from pytest", "pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0) == '2010-10-01 00:00:00+00:00' assert", "30, 2007') assert len(times) == 2 assert times[0] == approx(1558053238.602,", "\"\"\"Test convert PDS folder as string.\"\"\" assert pds_folder('2005015T175855') == '2005-015T17:58:55'", "2006') assert len(times) == 2 assert str(times[0]) == '2005-02-14 00:00:00+00:00'", "utc2cassini) from pytest import approx, raises def test_dt_iso(): \"\"\"Test parsing", "with raises(ValueError): _ = pds_time('No data available') def test_cassini_time(): \"\"\"Test", "14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14, 2005')) ==", "dt_date('2005-02-14') def test_pds_time(): \"\"\"Test PDS time parsing.\"\"\" assert str(pds_time('May 17,", "t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15 17:58:55+00:00' assert str(t1)", "assert len(times) == 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert", "12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15 17:58:55+00:00'", "'2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): \"\"\"Test", "== '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14'))", "2 assert times[0] == approx(1558053238.602, abs=1e-3) assert times[1] == approx(1561941262.879,", "abs=1e-3) assert times[1] == approx(1561941262.879, abs=1e-3) def test_pds_folder(): \"\"\"Test convert", "assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14", "str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00'", "str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_iso('2005-045') def", "== '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear():", "dt_iso('2005-045') def test_dt_doy(): \"\"\"Test parsing DOY time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123'))", "== '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244'))", "2007')) == '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert", "'2005-02-14 00:00:00+00:00' assert str(times[1]) == '2006-03-12 23:59:59+00:00' with raises(ValueError): _", "str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03')", "through March 12, 2006') assert len(times) == 2 assert str(times[0])", "decimal year.\"\"\" assert dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31') == 2005.9973", "date pattern.\"\"\" assert str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00' assert", "time parsing.\"\"\" assert str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00' assert", "assert times[0] == approx(1558053238.602, abs=1e-3) assert times[1] == approx(1561941262.879, abs=1e-3)", "string.\"\"\" assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55", "cassini_time(123) def test_cassini2utc(): \"\"\"Test Cassini time to UTC converter.\"\"\" assert", "00:00:00+00:00' assert str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb", "14, 2005')) == '2005-02-14 23:59:59+00:00' times = dt_date('from Feb 14,", "'2011-12-31 12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15", "import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso, dyear, pds_folder, pds_time, utc2cassini)", "str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times", "abs=1e-3) def test_pds_folder(): \"\"\"Test convert PDS folder as string.\"\"\" assert", "== 1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError): _ =", "year.\"\"\" assert dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31') == 2005.9973 assert", "= utc2cassini('May 17, 2007 through Jun 30, 2007') assert len(times)", "def test_pds_time(): \"\"\"Test PDS time parsing.\"\"\" assert str(pds_time('May 17, 2007'))", "== '2010-10-01 00:00:00+00:00' assert str(t1) == '2010-12-31 23:59:59+00:00' t0, t1", "assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29 to", "= cassini_time('v123_1') with raises(ValueError): _ = cassini_time(123) def test_cassini2utc(): \"\"\"Test", "2004.9973 assert dyear(dt(2005, 1, 1)) == 2005.0 assert dyear(dt(2005, 12,", "00:00:00+00:00' assert str(t1) == '2010-12-31 23:59:59+00:00' t0, t1 = pds_time('…", "== 2 assert str(times[0]) == '2005-02-14 00:00:00+00:00' assert str(times[1]) ==", "== '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0, t1", "as string.\"\"\" assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \\", "2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01 00:02:04.244000+00:00' assert str(t1)", "_ = cassini_time(123) def test_cassini2utc(): \"\"\"Test Cassini time to UTC", "assert str(dt_date('to Feb 14, 2005')) == '2005-02-14 23:59:59+00:00' times =", "\"\"\"Test decimal year.\"\"\" assert dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31') ==", "_ = pds_time('No data available') def test_cassini_time(): \"\"\"Test Cassini time", "with raises(ValueError): _ = dt_iso('2005-045') def test_dt_doy(): \"\"\"Test parsing DOY", "def test_cassini_time(): \"\"\"Test Cassini time parsing.\"\"\" assert cassini_time('v1487096932_1.qub') == 1487096932.0", "assert dyear(dt(2005, 12, 31)) == 2005.9973 assert dyear(dt(2004, 12, 31))", "== '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00' assert", "test_cassini_time(): \"\"\"Test Cassini time parsing.\"\"\" assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert", "datetime import datetime as dt from pyvims.pds.times import (cassini2utc, cassini_time,", "time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times = utc2cassini('May", "through 2010-365T23:59:59') assert str(t0) == '2010-10-01 00:00:00+00:00' assert str(t1) ==", "18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14", "Feb 14, 2005')) == '2005-02-14 23:59:59+00:00' times = dt_date('from Feb", "'2005-02-14 00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times)", "raises(ValueError): _ = cassini_time(123) def test_cassini2utc(): \"\"\"Test Cassini time to", "with raises(ValueError): _ = cassini_time(123) def test_cassini2utc(): \"\"\"Test Cassini time", "assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14", "May 17, 2007 through Jun 30, 2007') assert str(t0) ==", "str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def", "00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times) ==", "2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01 00:02:04.244000+00:00' assert str(t1) == '2011-12-31", "== approx(1561941262.879, abs=1e-3) def test_pds_folder(): \"\"\"Test convert PDS folder as", "'2011-10-01 00:02:04.244000+00:00' t0, t1 = pds_time('… May 17, 2007 through", "pds_time('No data available') def test_cassini_time(): \"\"\"Test Cassini time parsing.\"\"\" assert", "== 2005.0 assert dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31') == 2004.9973", "'2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def test_utc2cassini(): \"\"\"Test", "str(times[0]) == '2005-02-14 00:00:00+00:00' assert str(times[1]) == '2006-03-12 23:59:59+00:00' with", "assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14", "parsing DOY time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert", "str(t1) == '2007-06-30 23:59:59+00:00' t0, t1 = pds_time('… 2010-274T00:00:00 through", "2007') assert str(t0) == '2007-05-17 00:00:00+00:00' assert str(t1) == '2007-06-30", "'2005-01-15 17:58:55+00:00' assert str(t1) == '2005-01-16 18:42:33+00:00' with raises(ValueError): _", "18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert", "\"\"\"Test date pattern.\"\"\" assert str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00'", "dyear, pds_folder, pds_time, utc2cassini) from pytest import approx, raises def", "2 assert str(times[0]) == '2005-02-14 00:00:00+00:00' assert str(times[1]) == '2006-03-12", "'2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times = dt_doy('from", "available') def test_cassini_time(): \"\"\"Test Cassini time parsing.\"\"\" assert cassini_time('v1487096932_1.qub') ==", "assert str(t0) == '2011-10-01 00:02:04.244000+00:00' assert str(t1) == '2011-12-31 12:28:45.128000+00:00'", "dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test date pattern.\"\"\" assert str(dt_date('Feb 14, 2005'))", "2 assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14", "2005-045T18:03') assert len(times) == 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00'", "str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0,", "== \\ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): \"\"\"Test decimal year.\"\"\" assert", "Jun 30, 2007') assert str(t0) == '2007-05-17 00:00:00+00:00' assert str(t1)", "18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert", "def test_pds_folder(): \"\"\"Test convert PDS folder as string.\"\"\" assert pds_folder('2005015T175855')", "18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ =", "= pds_time('… May 17, 2007 through Jun 30, 2007') assert", "assert str(times[0]) == '2005-02-14 00:00:00+00:00' assert str(times[1]) == '2006-03-12 23:59:59+00:00'", "utc2cassini('May 17, 2007 through Jun 30, 2007') assert len(times) ==", "assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00'", "= pds_time('No data available') def test_cassini_time(): \"\"\"Test Cassini time parsing.\"\"\"", "<reponame>seignovert/pyvims \"\"\"Test PDS times modules.\"\"\" from datetime import datetime as", "18:03:00+00:00' with raises(ValueError): _ = dt_iso('2005-045') def test_dt_doy(): \"\"\"Test parsing", "DOY time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045", "= cassini_time(123) def test_cassini2utc(): \"\"\"Test Cassini time to UTC converter.\"\"\"", "== '2005-02-14 23:59:59+00:00' times = dt_date('from Feb 14, 2005 through", "= dt_date('from Feb 14, 2005 through March 12, 2006') assert", "== '2011-10-01 00:02:04.244000+00:00' t0, t1 = pds_time('… May 17, 2007", "assert str(t1) == '2011-12-31 12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/') assert", "cassini_time('v123_1') with raises(ValueError): _ = cassini_time(123) def test_cassini2utc(): \"\"\"Test Cassini", "str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00'", "str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times", "assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00'", "23:59:59+00:00' assert str(dt_date('to Feb 14, 2005')) == '2005-02-14 23:59:59+00:00' times", "def test_dt_iso(): \"\"\"Test parsing ISO time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) ==", "== '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14", "t0, t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0) ==", "times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times) == 2", "'2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) ==", "2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times) == 2 assert str(times[0]) ==", "assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_iso('2005-045')", "'2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) ==", "assert dyear(dt(2005, 1, 1)) == 2005.0 assert dyear(dt(2005, 12, 31))", "\"\"\"Test Cassini time to UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1')) == '2005-02-14", "14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb 14,", "raises(ValueError): _ = dt_date('2005-02-14') def test_pds_time(): \"\"\"Test PDS time parsing.\"\"\"", "pds_folder, pds_time, utc2cassini) from pytest import approx, raises def test_dt_iso():", "time to UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert", "23:59:59+00:00' t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0)", "assert pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): \"\"\"Test decimal", "'2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00'", "def test_cassini2utc(): \"\"\"Test Cassini time to UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1'))", "str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0, t1 = pds_time('… May 17,", "test_utc2cassini(): \"\"\"Test UTC to Cassini time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') ==", "'2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0, t1 =", "2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14, 2005')) == '2005-02-14", "pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) ==", "str(t0) == '2011-10-01 00:02:04.244000+00:00' assert str(t1) == '2011-12-31 12:28:45.128000+00:00' t0,", "00:02:04.244000+00:00' assert str(t1) == '2011-12-31 12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/')", "Feb 14, 2005 through March 12, 2006') assert len(times) ==", "through Jun 30, 2007') assert len(times) == 2 assert times[0]", "00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert len(times) ==", "assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00'", "test_dt_iso(): \"\"\"Test parsing ISO time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14", "dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times) == 2 assert str(times[0])", "test_pds_time(): \"\"\"Test PDS time parsing.\"\"\" assert str(pds_time('May 17, 2007')) ==", "23:59:59+00:00' times = dt_date('from Feb 14, 2005 through March 12,", "assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55 2005-016T18:42:33'", "'2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_iso('2005-045') def test_dt_doy(): \"\"\"Test", "== 2005.9973 assert dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005, 1, 1))", "(cassini2utc, cassini_time, dt_date, dt_doy, dt_iso, dyear, pds_folder, pds_time, utc2cassini) from", "2005 through March 12, 2006') assert len(times) == 2 assert", "assert cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError): _ = cassini_time('v123_1') with", "23:59:59+00:00' with raises(ValueError): _ = dt_date('2005-02-14') def test_pds_time(): \"\"\"Test PDS", "assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def test_utc2cassini(): \"\"\"Test UTC to", "abs=1e-3) times = utc2cassini('May 17, 2007 through Jun 30, 2007')", "== '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00'", "time parsing.\"\"\" assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172", "_ = dt_date('2005-02-14') def test_pds_time(): \"\"\"Test PDS time parsing.\"\"\" assert", "2005', eod=True)) == '2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb 14, 2005'))", "00:00:00+00:00' assert str(t1) == '2007-06-30 23:59:59+00:00' t0, t1 = pds_time('…", "def test_dyear(): \"\"\"Test decimal year.\"\"\" assert dyear('2005-01-01') == 2005.0 assert", "time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29'))", "times = dt_date('from Feb 14, 2005 through March 12, 2006')", "str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00'", "2007') assert len(times) == 2 assert times[0] == approx(1558053238.602, abs=1e-3)", "== '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def test_utc2cassini():", "\"\"\"Test PDS times modules.\"\"\" from datetime import datetime as dt", "eod=True)) == '2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb 14, 2005')) ==", "_ = cassini_time('v123_1') with raises(ValueError): _ = cassini_time(123) def test_cassini2utc():", "t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0) == '2010-10-01", "== '2006-03-12 23:59:59+00:00' with raises(ValueError): _ = dt_date('2005-02-14') def test_pds_time():", "2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0) == '2010-10-01 00:00:00+00:00' assert str(t1)", "dt_doy, dt_iso, dyear, pds_folder, pds_time, utc2cassini) from pytest import approx,", "times[1] == approx(1561941262.879, abs=1e-3) def test_pds_folder(): \"\"\"Test convert PDS folder", "== '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_doy('2005-02-14') def test_dt_date():", "len(times) == 2 assert str(times[0]) == '2005-02-14 00:00:00+00:00' assert str(times[1])", "str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00'", "datetime as dt from pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy,", "= dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times) == 2 assert", "14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14, 2005', eod=True))", "== '2011-12-31 12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0) ==", "2005-016T18:42:33' def test_dyear(): \"\"\"Test decimal year.\"\"\" assert dyear('2005-01-01') == 2005.0", "== '2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb 14, 2005')) == '2005-02-14", "2007 through Jun 30, 2007') assert len(times) == 2 assert", "approx(1558053238.602, abs=1e-3) assert times[1] == approx(1561941262.879, abs=1e-3) def test_pds_folder(): \"\"\"Test", "assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError):", "== '2005-02-14 00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert", "assert str(t0) == '2010-10-01 00:00:00+00:00' assert str(t1) == '2010-12-31 23:59:59+00:00'", "dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert len(times) == 2 assert str(times[0])", "= dt_date('2005-02-14') def test_pds_time(): \"\"\"Test PDS time parsing.\"\"\" assert str(pds_time('May", "= pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15 17:58:55+00:00' assert str(t1) ==", "== '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times =", "'2005-02-14 23:59:59+00:00' times = dt_date('from Feb 14, 2005 through March", "dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005, 1, 1)) == 2005.0 assert", "17, 2007 through Jun 30, 2007') assert len(times) == 2", "pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso, dyear, pds_folder, pds_time,", "== 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1]) ==", "assert str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00' assert str(dt_date('to", "data available') def test_cassini_time(): \"\"\"Test Cassini time parsing.\"\"\" assert cassini_time('v1487096932_1.qub')", "times modules.\"\"\" from datetime import datetime as dt from pyvims.pds.times", "time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29'))", "\"\"\"Test UTC to Cassini time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068,", "PDS time parsing.\"\"\" assert str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00'", "= pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01 00:02:04.244000+00:00'", "17:58:55+00:00' assert str(t1) == '2005-01-16 18:42:33+00:00' with raises(ValueError): _ =", "assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00'", "convert PDS folder as string.\"\"\" assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert", "= dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert len(times) == 2 assert", "'2005-01-16 18:42:33+00:00' with raises(ValueError): _ = pds_time('No data available') def", "'2010-10-01 00:00:00+00:00' assert str(t1) == '2010-12-31 23:59:59+00:00' t0, t1 =", "== 1483230358.172 with raises(ValueError): _ = cassini_time('v123_1') with raises(ValueError): _", "len(times) == 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1])", "assert len(times) == 2 assert str(times[0]) == '2005-02-14 00:00:00+00:00' assert", "== '2010-12-31 23:59:59+00:00' t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128')", "assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times = utc2cassini('May 17, 2007", "pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01 00:02:04.244000+00:00' assert", "= dt_iso('2005-045') def test_dt_doy(): \"\"\"Test parsing DOY time pattern.\"\"\" assert", "== '2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError):", "\"\"\"Test parsing DOY time pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00'", "'2005-02-14 00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert len(times)", "'2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) ==", "18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) ==", "== '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_iso('2005-045') def test_dt_doy():", "== '2011-10-01 00:02:04.244000+00:00' assert str(t1) == '2011-12-31 12:28:45.128000+00:00' t0, t1", "\"\"\"Test PDS time parsing.\"\"\" assert str(pds_time('May 17, 2007')) == '2007-05-17", "from pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso, dyear, pds_folder,", "Cassini time to UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29'", "as dt from pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso,", "'2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _", "str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03')", "str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01", "pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15 17:58:55+00:00' assert str(t1) == '2005-01-16", "assert str(times[1]) == '2006-03-12 23:59:59+00:00' with raises(ValueError): _ = dt_date('2005-02-14')", "\"\"\"Test parsing ISO time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00'", "parsing.\"\"\" assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172 with", "test_cassini2utc(): \"\"\"Test Cassini time to UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1')) ==", "dyear(dt(2005, 1, 1)) == 2005.0 assert dyear(dt(2005, 12, 31)) ==", "17, 2007')) == '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00'", "UTC to Cassini time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3)", "folder as string.\"\"\" assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') ==", "18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14", "pds_folder('2005015T175855_2005016T184233/') == \\ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): \"\"\"Test decimal year.\"\"\"", "18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29", "18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29", "cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError): _", "assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0, t1 = pds_time('… May", "through 2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01 00:02:04.244000+00:00' assert str(t1) ==", "to Cassini time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times", "'2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): \"\"\"Test decimal year.\"\"\" assert dyear('2005-01-01') ==", "str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_doy('2005-02-14') def", "dyear(dt(2005, 12, 31)) == 2005.9973 assert dyear(dt(2004, 12, 31)) ==", "dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31') ==", "2005.0 assert dyear(dt(2005, 12, 31)) == 2005.9973 assert dyear(dt(2004, 12,", "'2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test", "'2007-06-30 23:59:59+00:00' t0, t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert", "30, 2007') assert str(t0) == '2007-05-17 00:00:00+00:00' assert str(t1) ==", "assert str(t1) == '2007-06-30 23:59:59+00:00' t0, t1 = pds_time('… 2010-274T00:00:00", "18:03:00+00:00' with raises(ValueError): _ = dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test date", "1483230358.172 with raises(ValueError): _ = cassini_time('v123_1') with raises(ValueError): _ =", "14, 2005 through March 12, 2006') assert len(times) == 2", "str(dt_date('to Feb 14, 2005')) == '2005-02-14 23:59:59+00:00' times = dt_date('from", "str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14, 2005'))", "str(t0) == '2007-05-17 00:00:00+00:00' assert str(t1) == '2007-06-30 23:59:59+00:00' t0,", "2005-02-14T18:03') assert len(times) == 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00'", "t1 = pds_time('… May 17, 2007 through Jun 30, 2007')", "times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert len(times) == 2", "1, 1)) == 2005.0 assert dyear(dt(2005, 12, 31)) == 2005.9973", "def test_utc2cassini(): \"\"\"Test UTC to Cassini time converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29')", "2005.0 assert dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31') == 2004.9973 assert", "approx, raises def test_dt_iso(): \"\"\"Test parsing ISO time pattern.\"\"\" assert", "raises(ValueError): _ = dt_iso('2005-045') def test_dt_doy(): \"\"\"Test parsing DOY time", "str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14, 2005',", "\"\"\"Test Cassini time parsing.\"\"\" assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172)", "== 2005.0 assert dyear(dt(2005, 12, 31)) == 2005.9973 assert dyear(dt(2004,", "times[0] == approx(1558053238.602, abs=1e-3) assert times[1] == approx(1561941262.879, abs=1e-3) def", "== '2007-06-30 23:59:59+00:00' t0, t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59')", "dt_iso, dyear, pds_folder, pds_time, utc2cassini) from pytest import approx, raises", "times = utc2cassini('May 17, 2007 through Jun 30, 2007') assert", "str(t0) == '2010-10-01 00:00:00+00:00' assert str(t1) == '2010-12-31 23:59:59+00:00' t0,", "utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times = utc2cassini('May 17, 2007 through", "Jun 30, 2007') assert len(times) == 2 assert times[0] ==", "== '2005-01-15 17:58:55+00:00' assert str(t1) == '2005-01-16 18:42:33+00:00' with raises(ValueError):", "assert str(t1) == '2010-12-31 23:59:59+00:00' t0, t1 = pds_time('… 2011-10-01T00:02:04.244", "00:02:04.244000+00:00' t0, t1 = pds_time('… May 17, 2007 through Jun", "to 2005-045T18:03') assert len(times) == 2 assert str(times[0]) == '2005-02-14", "PDS folder as string.\"\"\" assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/')", "to UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172))", "18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) ==", "test_dyear(): \"\"\"Test decimal year.\"\"\" assert dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31')", "12, 31)) == 2005.9973 assert dyear(dt(2004, 12, 31)) == 2004.9973", "parsing.\"\"\" assert str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00'))", "assert dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31')", "from datetime import datetime as dt from pyvims.pds.times import (cassini2utc,", "dt from pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso, dyear,", "assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_doy('2005-02-14')", "== 2004.9973 assert dyear(dt(2005, 1, 1)) == 2005.0 assert dyear(dt(2005,", "_ = dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test date pattern.\"\"\" assert str(dt_date('Feb", "pattern.\"\"\" assert str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr", "assert str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) ==", "18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def test_utc2cassini(): \"\"\"Test UTC", "assert str(t1) == '2005-01-16 18:42:33+00:00' with raises(ValueError): _ = pds_time('No", "'2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00' assert", "t0, t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15 17:58:55+00:00' assert", "raises def test_dt_iso(): \"\"\"Test parsing ISO time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123'))", "== '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times =", "assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00'", "pattern.\"\"\" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) ==", "UTC converter.\"\"\" assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) ==", "modules.\"\"\" from datetime import datetime as dt from pyvims.pds.times import", "assert str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14,", "1)) == 2005.0 assert dyear(dt(2005, 12, 31)) == 2005.9973 assert", "00:00:00' def test_utc2cassini(): \"\"\"Test UTC to Cassini time converter.\"\"\" assert", "== '2005-01-01 00:00:00' def test_utc2cassini(): \"\"\"Test UTC to Cassini time", "== approx(1558053238.602, abs=1e-3) assert times[1] == approx(1561941262.879, abs=1e-3) def test_pds_folder():", "with raises(ValueError): _ = dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test date pattern.\"\"\"", "== '2005-02-14 00:00:00+00:00' assert str(times[1]) == '2006-03-12 23:59:59+00:00' with raises(ValueError):", "t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01", "converter.\"\"\" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times = utc2cassini('May 17,", "== 2 assert times[0] == approx(1558053238.602, abs=1e-3) assert times[1] ==", "from pytest import approx, raises def test_dt_iso(): \"\"\"Test parsing ISO", "ISO time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14", "'2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb 14, 2005')) == '2005-02-14 23:59:59+00:00'", "= pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0) == '2010-10-01 00:00:00+00:00'", "str(t1) == '2010-12-31 23:59:59+00:00' t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through", "2005-045T18:02:29 to 2005-045T18:03') assert len(times) == 2 assert str(times[0]) ==", "'2006-03-12 23:59:59+00:00' with raises(ValueError): _ = dt_date('2005-02-14') def test_pds_time(): \"\"\"Test", "converter.\"\"\" assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01", "assert str(t0) == '2005-01-15 17:58:55+00:00' assert str(t1) == '2005-01-16 18:42:33+00:00'", "import approx, raises def test_dt_iso(): \"\"\"Test parsing ISO time pattern.\"\"\"", "== '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00' assert", "dt_date('from Feb 14, 2005 through March 12, 2006') assert len(times)", "str(times[1]) == '2006-03-12 23:59:59+00:00' with raises(ValueError): _ = dt_date('2005-02-14') def", "'2007-05-17 00:00:00+00:00' assert str(t1) == '2007-06-30 23:59:59+00:00' t0, t1 =", "with raises(ValueError): _ = cassini_time('v123_1') with raises(ValueError): _ = cassini_time(123)", "2005.9973 assert dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005, 1, 1)) ==", "12, 2006') assert len(times) == 2 assert str(times[0]) == '2005-02-14", "Cassini time parsing.\"\"\" assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172) ==", "00:00:00+00:00' assert str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00' assert", "'2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02'))", "00:00:00+00:00' assert str(times[1]) == '2006-03-12 23:59:59+00:00' with raises(ValueError): _ =", "2010-365T23:59:59') assert str(t0) == '2010-10-01 00:00:00+00:00' assert str(t1) == '2010-12-31", "_ = dt_iso('2005-045') def test_dt_doy(): \"\"\"Test parsing DOY time pattern.\"\"\"", "'2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times = dt_iso('from", "dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005, 1,", "'2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02'))", "= dt_doy('2005-02-14') def test_dt_date(): \"\"\"Test date pattern.\"\"\" assert str(dt_date('Feb 14,", "approx(1561941262.879, abs=1e-3) def test_pds_folder(): \"\"\"Test convert PDS folder as string.\"\"\"", "2007 through Jun 30, 2007') assert str(t0) == '2007-05-17 00:00:00+00:00'", "str(t1) == '2005-01-16 18:42:33+00:00' with raises(ValueError): _ = pds_time('No data", "00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01", "str(t0) == '2005-01-15 17:58:55+00:00' assert str(t1) == '2005-01-16 18:42:33+00:00' with", "len(times) == 2 assert times[0] == approx(1558053238.602, abs=1e-3) assert times[1]", "assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29 to", "with raises(ValueError): _ = dt_date('2005-02-14') def test_pds_time(): \"\"\"Test PDS time", "test_pds_folder(): \"\"\"Test convert PDS folder as string.\"\"\" assert pds_folder('2005015T175855') ==", "test_dt_date(): \"\"\"Test date pattern.\"\"\" assert str(dt_date('Feb 14, 2005')) == '2005-02-14", "assert dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005,", "1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError): _ = cassini_time('v123_1')", "17, 2007 through Jun 30, 2007') assert str(t0) == '2007-05-17", "approx(1487096932.068, abs=1e-3) times = utc2cassini('May 17, 2007 through Jun 30,", "assert str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14,", "parsing ISO time pattern.\"\"\" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert", "pds_time('… May 17, 2007 through Jun 30, 2007') assert str(t0)", "import datetime as dt from pyvims.pds.times import (cassini2utc, cassini_time, dt_date,", "== '2005-02-14 00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert", "assert dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005, 1, 1)) == 2005.0", "pytest import approx, raises def test_dt_iso(): \"\"\"Test parsing ISO time", "2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14, 2005', eod=True)) ==", "'2010-12-31 23:59:59+00:00' t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert", "def test_dt_date(): \"\"\"Test date pattern.\"\"\" assert str(dt_date('Feb 14, 2005')) ==", "assert len(times) == 2 assert times[0] == approx(1558053238.602, abs=1e-3) assert", "raises(ValueError): _ = pds_time('No data available') def test_cassini_time(): \"\"\"Test Cassini", "00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0, t1 = pds_time('…" ]
[ "'0108_auto_20171130_1004'), ] operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ),", "class Migration(migrations.Migration): dependencies = [ ('mail', '0108_auto_20171130_1004'), ] operations =", "unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies =", "from __future__ import unicode_literals from django.db import models, migrations class", "operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ), migrations.AlterModelOptions( name='spamrptblacklist',", "Migration(migrations.Migration): dependencies = [ ('mail', '0108_auto_20171130_1004'), ] operations = [", "= [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ), migrations.AlterModelOptions( name='spamrptblacklist', options={'verbose_name':", "migrations class Migration(migrations.Migration): dependencies = [ ('mail', '0108_auto_20171130_1004'), ] operations", "models, migrations class Migration(migrations.Migration): dependencies = [ ('mail', '0108_auto_20171130_1004'), ]", "name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ), migrations.AlterModelOptions( name='spamrptblacklist', options={'verbose_name': '\\u7f51\\u5173\\u9694\\u79bb\\u62a5\\u544a\\u6536\\u4ef6\\u4eba\\u9ed1\\u540d\\u5355'}, ), ]", "dependencies = [ ('mail', '0108_auto_20171130_1004'), ] operations = [ migrations.AlterModelOptions(", "import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies", "coding: utf-8 -*- from __future__ import unicode_literals from django.db import", "] operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ), migrations.AlterModelOptions(", "[ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ), migrations.AlterModelOptions( name='spamrptblacklist', options={'verbose_name': '\\u7f51\\u5173\\u9694\\u79bb\\u62a5\\u544a\\u6536\\u4ef6\\u4eba\\u9ed1\\u540d\\u5355'},", "django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('mail',", "from django.db import models, migrations class Migration(migrations.Migration): dependencies = [", "-*- from __future__ import unicode_literals from django.db import models, migrations", "__future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration):", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from", "= [ ('mail', '0108_auto_20171130_1004'), ] operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist',", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db", "('mail', '0108_auto_20171130_1004'), ] operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'},", "[ ('mail', '0108_auto_20171130_1004'), ] operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name':", "import models, migrations class Migration(migrations.Migration): dependencies = [ ('mail', '0108_auto_20171130_1004'),", "migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\\u4e2d\\u7ee7\\u53d1\\u4ef6\\u4eba\\u767d\\u540d\\u5355'}, ), migrations.AlterModelOptions( name='spamrptblacklist', options={'verbose_name': '\\u7f51\\u5173\\u9694\\u79bb\\u62a5\\u544a\\u6536\\u4ef6\\u4eba\\u9ed1\\u540d\\u5355'}, ),", "utf-8 -*- from __future__ import unicode_literals from django.db import models," ]
[ "'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] =", "deprecation removal_version (%r) must be after the ' 'current version", "*list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema),", "Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None,", "must not be in the future if version > current_version:", "\"\"\"Validate explicit runtime metadata file\"\"\" try: with open(path, 'r') as", "enabled this test can start failing # at a random", "called '%s'\" % collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path not in", "# pylint: disable=broad-except # We do not care why it", "date string (YYYY-MM-DD), or YAML date' if not isinstance(value, string_types):", "print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + 1,", "schema avoid_additional_data = Schema( Any( { Required('removal_version'): any_value, 'warning_text': any_value,", "deprecation, the removal date must be in the future. Only", "('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'):", "('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'):", "YAML load failed: %s' % (path, 0, 0, re.sub(r'\\s+', '", "Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}", "validates the input, and the second makes sure no extra", "from distutils.version import StrictVersion, LooseVersion from functools import partial import", "older versions, # we have to do things manually. if", "continue validate_metadata_file( path, is_ansible=path not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates) if", "else: # For a deprecation, the removal date must be", "}, { Required('removal_date'): any_value, 'warning_text': any_value, } ), extra=PREVENT_EXTRA )", "a tombstone, the removal version must not be in the", "} ), extra=PREVENT_EXTRA ) deprecation_schema = All( # The first", "(%s)' % (value, current_version)) except ValueError: raise Invalid(msg) return value", "Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None,", "string_types): raise Invalid(msg) # From Python 3.7 in, there is", "('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'):", "for this test, and (b) make this error optional. check_deprecation_dates", "= yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load failed:", "ex.context_mark.column + 1, re.sub(r'\\s+', ' ', str(ex)))) return except Exception", "collection_runtime_file = 'meta/runtime.yml' # This is currently disabled, because if", "future if version <= current_version: raise Invalid('The deprecation removal_version (%r)", "'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date' if", "Schema( Any( { Required('removal_version'): any_value, 'warning_text': any_value, }, { Required('removal_date'):", "get line/column numbers print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing,", "if version <= current_version: raise Invalid('The deprecation removal_version (%r) must", "'meta/runtime.yml' # This is currently disabled, because if it is", "sure date is correct today = datetime.date.today() if is_tombstone: #", "get_collection_version(): \"\"\"Return current collection version, or None if it is", "suddenly start to fail. if check_deprecation_date and today > removal_date:", "requires_ansible: In the future we should validate this with SpecifierSet", "removal version must not be in the future if version", "test, and (b) make this error optional. check_deprecation_dates = False", "PyBroadException try: result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except", "should validate this with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict, },", "path == collection_legacy_file: print('%s:%d:%d: %s' % (path, 0, 0, (\"Should", ") list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for str_type in string_types] plugin_schema", "sys from distutils.version import StrictVersion, LooseVersion from functools import partial", "line/column numbers print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))", "be in the past if today < removal_date: raise Invalid(", "Any(*string_types), # import_redirect doesn't currently support deprecation }, extra=PREVENT_EXTRA) )", "version must be a semantic version (https://semver.org/)' ) if not", "0): raise Invalid('removal_version (%r) must be a major release, not", "load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column +", "voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA from voluptuous import Required,", "import_redirection_schema = Any( Schema({ ('redirect'): Any(*string_types), # import_redirect doesn't currently", "version > current_version: raise Invalid('The tombstone removal_version (%r) must not", "str(ex)))) return if is_ansible: current_version = get_ansible_version() else: current_version =", "else: # For a deprecation, the removal version must be", "the removal date must be in the past if today", "(%s)' % (removal_date, today)) else: # For a deprecation, the", "0, humanize_error(routing, error))) def main(): \"\"\"Validate runtime metadata\"\"\" paths =", "error optional. check_deprecation_dates = False for path in paths: if", "yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load failed: %s' % (path,", "('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'):", "after today (%s)' % (removal_date, today)) return value def removal_version(value,", ")) if current_version is not None: if is_tombstone: # For", "open(path, 'r') as f_path: routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as", "'current version (%s)' % (value, current_version)) else: # For a", "runtime metadata file\"\"\" try: with open(path, 'r') as f_path: routing", "with open(path, 'r') as f_path: routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError", "For this to be properly activated, we (a) need to", "> current_version: raise Invalid('The tombstone removal_version (%r) must not be", "import os import re import sys from distutils.version import StrictVersion,", "{ 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text':", ") list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for str_type in string_types] #", "check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a datetime.date or ISO 8601 date string.\"\"\"", "Required('removal_date'): any_value, 'warning_text': any_value, } ), extra=PREVENT_EXTRA ) deprecation_schema =", "SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a datetime.date or ISO", "%s' % (path, 0, 0, (\"Should be called '%s'\" %", "plugin_routing_schema} for str_type in string_types] plugin_schema = Schema({ ('action'): Any(None,", "version, or None if it is not available\"\"\" import importlib.util", "if is_tombstone: # For a tombstone, the removal date must", "truish, to avoid checks to suddenly start to fail. if", "keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date':", "Invalid( 'The tombstone removal_date (%s) must not be after today", "schema schema = Schema({ # All of these are optional", "else: version = SemanticVersion() version.parse(value) if version.major != 0 and", "'(see specification at https://semver.org/)' % (value, )) if current_version is", "string_types): raise Invalid(msg) try: if is_ansible: version = StrictVersion() version.parse(value)", "and (version.minor != 0 or version.patch != 0): raise Invalid('removal_version", "('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema =", "removal_date: raise Invalid( 'The deprecation removal_date (%s) must be after", "release, not a minor or patch release ' '(see specification", "current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), } ), avoid_additional_data", "if current_version is not None: if is_tombstone: # For a", "= 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' # This is currently disabled,", "schema(routing) except MultipleInvalid as ex: for error in ex.errors: #", "error in ex.errors: # No way to get line/column numbers", "Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None,", "Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None,", "re.sub(r'\\s+', ' ', str(ex)))) return except Exception as ex: #", "We do not care why it fails, in case we", "today > removal_date: raise Invalid( 'The deprecation removal_date (%s) must", "For a tombstone, the removal version must not be in", "removal date must be in the future. Only test this", "if is_ansible: current_version = get_ansible_version() else: current_version = get_collection_version() #", "str_type in string_types] # top level schema schema = Schema({", "numbers print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error))) def", "from voluptuous.humanize import humanize_error from ansible.module_utils.six import string_types from ansible.utils.version", "= importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail)", "The first schema validates the input, and the second makes", "import StrictVersion, LooseVersion from functools import partial import yaml from", "collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates)", "tombstone, the removal date must be in the past if", "return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return current collection version, or None", "False for path in paths: if path == collection_legacy_file: print('%s:%d:%d:", "collection's meta/runtime.yml\"\"\" from __future__ import (absolute_import, division, print_function) __metaclass__ =", "importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) #", "raise Invalid('The tombstone removal_version (%r) must not be after the", "ex.errors: # No way to get line/column numbers print('%s:%d:%d: %s'", "Invalid(msg) # From Python 3.7 in, there is datetime.date.fromisoformat(). For", "versions, # we have to do things manually. if not", "the ' 'current version (%s)' % (value, current_version)) except ValueError:", "make sure we have a string msg = 'Expected ISO", "'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ), avoid_additional_data ) tombstoning_schema", "is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate a removal version string.\"\"\" msg =", "raise Invalid( 'The deprecation removal_date (%s) must be after today", "(%s)' % (value, current_version)) else: # For a deprecation, the", "current_version)) else: # For a deprecation, the removal version must", "= type import datetime import os import re import sys", "a string msg = 'Expected ISO 8601 date string (YYYY-MM-DD),", "a semantic version (https://semver.org/)' ) if not isinstance(value, string_types): raise", "and today > removal_date: raise Invalid( 'The deprecation removal_date (%s)", "partial import yaml from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA", "!= 0 and (version.minor != 0 or version.patch != 0):", "return None to indicate \"we don't know\". return None def", "0 and (version.minor != 0 or version.patch != 0): raise", "validate_metadata_file( path, is_ansible=path not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates) if __name__", "PREVENT_EXTRA from voluptuous import Required, Schema, Invalid from voluptuous.humanize import", "Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None,", "in the future if version > current_version: raise Invalid('The tombstone", "('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for", "past if today < removal_date: raise Invalid( 'The tombstone removal_date", "# This is currently disabled, because if it is enabled", "minor or patch release ' '(see specification at https://semver.org/)' %", "For a tombstone, the removal date must be in the", "is_ansible else 'Removal version must be a semantic version (https://semver.org/)'", "raise Invalid( 'The tombstone removal_date (%s) must not be after", "Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA) # Ensure schema is valid", "of these are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), #", "this to be properly activated, we (a) need to be", "partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ), avoid_additional_data ) tombstoning_schema =", "after today (%s)' % (removal_date, today)) else: # For a", "\"\"\"Return current collection version, or None if it is not", "able to return # codes for this test, and (b)", "raise Invalid('The deprecation removal_version (%r) must be after the '", "if # check_deprecation_date is truish, to avoid checks to suddenly", "not available\"\"\" import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec =", "date must be in the past if today < removal_date:", "('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'):", "except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load failed: %s' %", "removal_version (%r) must not be after the ' 'current version", "importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail", "top level schema schema = Schema({ # All of these", "file\"\"\" try: with open(path, 'r') as f_path: routing = yaml.safe_load(f_path)", "version string.\"\"\" msg = ( 'Removal version must be a", "version (https://semver.org/)' ) if not isinstance(value, string_types): raise Invalid(msg) try:", "removal_date (%s) must be after today (%s)' % (removal_date, today))", "valid try: schema(routing) except MultipleInvalid as ex: for error in", "plugin_routing schema avoid_additional_data = Schema( Any( { Required('removal_version'): any_value, 'warning_text':", "= [{str_type: import_redirection_schema} for str_type in string_types] # top level", "as ex: for error in ex.errors: # No way to", "this error optional. check_deprecation_dates = False for path in paths:", "= collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try: result = collection_detail.read_manifest_json('.')", "No way to get line/column numbers print('%s:%d:%d: %s' % (path,", "= os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec)", "Invalid('The tombstone removal_version (%r) must not be after the '", "properly activated, we (a) need to be able to return", "= 'meta/runtime.yml' # This is currently disabled, because if it", "deprecation, the removal version must be in the future if", "sure we have a string msg = 'Expected ISO 8601", "datetime.date.today() if is_tombstone: # For a tombstone, the removal date", "in the future if version <= current_version: raise Invalid('The deprecation", "return value def get_ansible_version(): \"\"\"Return current ansible-core version\"\"\" from ansible.release", "collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try: result = collection_detail.read_manifest_json('.') or", "'warning_text': any_value, } ), extra=PREVENT_EXTRA ) deprecation_schema = All( #", "the input, and the second makes sure no extra keys", "# No way to get line/column numbers print('%s:%d:%d: %s' %", "%s' % (path, 0, 0, humanize_error(routing, error))) def main(): \"\"\"Validate", "% (value, current_version)) else: # For a deprecation, the removal", "any_value, 'warning_text': any_value, }, { Required('removal_date'): any_value, 'warning_text': any_value, }", "print('%s:%d:%d: %s' % (path, 0, 0, (\"Should be called '%s'\"", "things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try: removal_date", "failed: %s' % (path, 0, 0, re.sub(r'\\s+', ' ', str(ex))))", "import yaml from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA from", "check_deprecation_dates=False): \"\"\"Validate explicit runtime metadata file\"\"\" try: with open(path, 'r')", "('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible: In the future", "Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA)", "else 'Removal version must be a semantic version (https://semver.org/)' )", "('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'):", "return except Exception as ex: # pylint: disable=broad-except print('%s:%d:%d: YAML", "(https://semver.org/)' ) if not isinstance(value, string_types): raise Invalid(msg) try: if", "raise Invalid(msg) try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise", "have to do things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise", "routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load", "date string.\"\"\" # datetime.date objects come from YAML dates, these", "ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return current collection", "a datetime.date or ISO 8601 date string.\"\"\" # datetime.date objects", "Required('removal_version'): any_value, 'warning_text': any_value, }, { Required('removal_date'): any_value, 'warning_text': any_value,", "these are ok if isinstance(value, datetime.date): removal_date = value else:", "'%s'\" % collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path not in (collection_legacy_file,", "return SemanticVersion(result['version']) except Exception: # pylint: disable=broad-except # We do", "plugin_routing_schema = Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types),", "no extra keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible,", "random date. For this to be properly activated, we (a)", "from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA from voluptuous import", "disabled, because if it is enabled this test can start", "Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), )", "to get line/column numbers print('%s:%d:%d: %s' % (path, 0, 0,", "return if is_ansible: current_version = get_ansible_version() else: current_version = get_collection_version()", "(path, 0, 0, re.sub(r'\\s+', ' ', str(ex)))) return if is_ansible:", "get_ansible_version() else: current_version = get_collection_version() # Updates to schema MUST", "if check_deprecation_date and today > removal_date: raise Invalid( 'The deprecation", "StrictVersion() version.parse(value) version = LooseVersion(value) # We're storing Ansible's version", "'warning_text': Any(*string_types), } ), avoid_additional_data ) plugin_routing_schema = Any( Schema({", "storing Ansible's version as a LooseVersion else: version = SemanticVersion()", "date' if not isinstance(value, string_types): raise Invalid(msg) # From Python", "must be a semantic version (https://semver.org/)' ) if not isinstance(value,", "specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates),", "disable=broad-except # We do not care why it fails, in", "check_deprecation_date and today > removal_date: raise Invalid( 'The deprecation removal_date", "def get_ansible_version(): \"\"\"Return current ansible-core version\"\"\" from ansible.release import __version__", "def main(): \"\"\"Validate runtime metadata\"\"\" paths = sys.argv[1:] or sys.stdin.read().splitlines()", "sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' #", "# import_redirection schema import_redirection_schema = Any( Schema({ ('redirect'): Any(*string_types), #", "paths: if path == collection_legacy_file: print('%s:%d:%d: %s' % (path, 0,", "string_types] # top level schema schema = Schema({ # All", "*list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema),", "+ 1, ex.context_mark.column + 1, re.sub(r'\\s+', ' ', str(ex)))) return", "documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data = Schema( Any(", "Any( { Required('removal_version'): any_value, 'warning_text': any_value, }, { Required('removal_date'): any_value,", "the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data = Schema(", "try: schema(routing) except MultipleInvalid as ex: for error in ex.errors:", "current_version: raise Invalid('The tombstone removal_version (%r) must not be after", "import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path)", "start to fail. if check_deprecation_date and today > removal_date: raise", "future. Only test this if # check_deprecation_date is truish, to", "cannot get the version # just return None to indicate", "Invalid( 'The deprecation removal_date (%s) must be after today (%s)'", "sure no extra keys are specified Schema( { 'removal_version': partial(removal_version,", "% (path, 0, 0, (\"Should be called '%s'\" % collection_runtime_file)))", "python \"\"\"Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\" from", "input, and the second makes sure no extra keys are", "), extra=PREVENT_EXTRA ) deprecation_schema = All( # The first schema", "value def removal_version(value, is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate a removal version", "in, there is datetime.date.fromisoformat(). For older versions, # we have", "value def get_ansible_version(): \"\"\"Return current ansible-core version\"\"\" from ansible.release import", "Invalid(msg) try: if is_ansible: version = StrictVersion() version.parse(value) version =", "or version.patch != 0): raise Invalid('removal_version (%r) must be a", "*list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema),", "Exception as ex: # pylint: disable=broad-except print('%s:%d:%d: YAML load failed:", "this with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA) #", "= LooseVersion(value) # We're storing Ansible's version as a LooseVersion", "None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit runtime metadata file\"\"\"", "try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise Invalid(msg) #", "%s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\\s+',", "Any(None, *list_dict_import_redirection_schema), # requires_ansible: In the future we should validate", "currently support deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type: import_redirection_schema}", "collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException", "\"\"\"Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\" from __future__", "), avoid_additional_data ) tombstoning_schema = All( # The first schema", "('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'):", "not care why it fails, in case we cannot get", "a major release, not a minor or patch release '", "from YAML dates, these are ok if isinstance(value, datetime.date): removal_date", "= datetime.date.today() if is_tombstone: # For a tombstone, the removal", "not be after the ' 'current version (%s)' % (value,", "__metaclass__ = type import datetime import os import re import", "raise Invalid(msg) return value def any_value(value): \"\"\"Accepts anything.\"\"\" return value", "Any(*string_types), } ), avoid_additional_data ) plugin_routing_schema = Any( Schema({ ('deprecation'):", "*list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema = Any( Schema({", "activated, we (a) need to be able to return #", "load failed: %s' % (path, 0, 0, re.sub(r'\\s+', ' ',", "is enabled this test can start failing # at a", "return None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit runtime metadata", "a random date. For this to be properly activated, we", "string.\"\"\" msg = ( 'Removal version must be a string'", "# datetime.date objects come from YAML dates, these are ok", "isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a datetime.date or ISO 8601 date", "is_tombstone=False): \"\"\"Validate a removal version string.\"\"\" msg = ( 'Removal", "), avoid_additional_data ) plugin_routing_schema = Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'):", "validate this with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA)", "removal_date (%s) must not be after today (%s)' % (removal_date,", "8601 date string.\"\"\" # datetime.date objects come from YAML dates,", "SemanticVersion(result['version']) except Exception: # pylint: disable=broad-except # We do not", "noinspection PyBroadException try: result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version'])", "*list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema),", "# ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data = Schema( Any( {", "MultipleInvalid, PREVENT_EXTRA from voluptuous import Required, Schema, Invalid from voluptuous.humanize", "\"\"\"Validate a datetime.date or ISO 8601 date string.\"\"\" # datetime.date", "any_value(value): \"\"\"Accepts anything.\"\"\" return value def get_ansible_version(): \"\"\"Return current ansible-core", "import string_types from ansible.utils.version import SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False):", "humanize_error from ansible.module_utils.six import string_types from ansible.utils.version import SemanticVersion def", "Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None,", "at https://semver.org/)' % (value, )) if current_version is not None:", "version.patch != 0): raise Invalid('removal_version (%r) must be a major", "have a string msg = 'Expected ISO 8601 date string", "{ 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types),", "for error in ex.errors: # No way to get line/column", "first schema validates the input, and the second makes sure", "(%s) must be after today (%s)' % (removal_date, today)) return", "LooseVersion else: version = SemanticVersion() version.parse(value) if version.major != 0", "3.7 in, there is datetime.date.fromisoformat(). For older versions, # we", "Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible: In the future we", "check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ), avoid_additional_data ) tombstoning_schema = All(", "current_version is not None: if is_tombstone: # For a tombstone,", "from ansible.utils.version import SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a", "humanize_error(routing, error))) def main(): \"\"\"Validate runtime metadata\"\"\" paths = sys.argv[1:]", "currently disabled, because if it is enabled this test can", "can start failing # at a random date. For this", "removal_version (%r) must be after the ' 'current version (%s)'", "import Required, Schema, Invalid from voluptuous.humanize import humanize_error from ansible.module_utils.six", "a deprecation, the removal version must be in the future", "= [{str_type: plugin_routing_schema} for str_type in string_types] plugin_schema = Schema({", "import re import sys from distutils.version import StrictVersion, LooseVersion from", "to return # codes for this test, and (b) make", "collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail =", "do things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try:", "indicate \"we don't know\". return None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):", "ValueError: raise Invalid(msg) return value def any_value(value): \"\"\"Accepts anything.\"\"\" return", "{ Required('removal_version'): any_value, 'warning_text': any_value, }, { Required('removal_date'): any_value, 'warning_text':", "is correct today = datetime.date.today() if is_tombstone: # For a", "collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try: result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.')", "for str_type in string_types] plugin_schema = Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema),", "'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' # This is currently disabled, because", "= datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise Invalid(msg) # Make sure", "second makes sure no extra keys are specified Schema( {", "Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema = Any(", "All, Any, MultipleInvalid, PREVENT_EXTRA from voluptuous import Required, Schema, Invalid", "% (removal_date, today)) return value def removal_version(value, is_ansible, current_version=None, is_tombstone=False):", "not isinstance(value, string_types): raise Invalid(msg) try: if is_ansible: version =", "(%s) must not be after today (%s)' % (removal_date, today))", ") if not isinstance(value, string_types): raise Invalid(msg) try: if is_ansible:", "*list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema),", "\"\"\"Validate runtime metadata\"\"\" paths = sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file =", "correct today = datetime.date.today() if is_tombstone: # For a tombstone,", "(value, )) if current_version is not None: if is_tombstone: #", "= All( # The first schema validates the input, and", "MultipleInvalid as ex: for error in ex.errors: # No way", "collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception: # pylint: disable=broad-except", "# For a deprecation, the removal version must be in", "Any, MultipleInvalid, PREVENT_EXTRA from voluptuous import Required, Schema, Invalid from", "> removal_date: raise Invalid( 'The deprecation removal_date (%s) must be", "validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit runtime metadata file\"\"\" try: with", "current_version: raise Invalid('The deprecation removal_version (%r) must be after the", "today = datetime.date.today() if is_tombstone: # For a tombstone, the", "*list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema),", "for path in paths: if path == collection_legacy_file: print('%s:%d:%d: %s'", "deprecation removal_date (%s) must be after today (%s)' % (removal_date,", "collection_legacy_file: print('%s:%d:%d: %s' % (path, 0, 0, (\"Should be called", "[{str_type: plugin_routing_schema} for str_type in string_types] plugin_schema = Schema({ ('action'):", "today)) else: # For a deprecation, the removal date must", "version (%s)' % (value, current_version)) except ValueError: raise Invalid(msg) return", "distutils.version import StrictVersion, LooseVersion from functools import partial import yaml", "('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type:", "!= 0 or version.patch != 0): raise Invalid('removal_version (%r) must", "as f_path: routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d:", "from functools import partial import yaml from voluptuous import All,", "division, print_function) __metaclass__ = type import datetime import os import", "YAML date' if not isinstance(value, string_types): raise Invalid(msg) # From", "ex: for error in ex.errors: # No way to get", "# at a random date. For this to be properly", "# plugin_routing schema avoid_additional_data = Schema( Any( { Required('removal_version'): any_value,", "schema import_redirection_schema = Any( Schema({ ('redirect'): Any(*string_types), # import_redirect doesn't", "in case we cannot get the version # just return", "do not care why it fails, in case we cannot", "# Updates to schema MUST also be reflected in the", "not be after today (%s)' % (removal_date, today)) else: #", "*list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema),", "(removal_date, today)) else: # For a deprecation, the removal date", "isinstance(value, datetime.date): removal_date = value else: # make sure we", "*list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema),", "import (absolute_import, division, print_function) __metaclass__ = type import datetime import", "('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'):", "to be able to return # codes for this test,", "if is_ansible else 'Removal version must be a semantic version", "be reflected in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema", "partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), }", "%s' % (path, 0, 0, re.sub(r'\\s+', ' ', str(ex)))) return", "SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA) # Ensure schema", "a minor or patch release ' '(see specification at https://semver.org/)'", "*list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema),", "metadata\"\"\" paths = sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file", "#!/usr/bin/env python \"\"\"Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\"", "'The deprecation removal_date (%s) must be after today (%s)' %", "be able to return # codes for this test, and", "ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\" from __future__ import (absolute_import, division, print_function)", "(b) make this error optional. check_deprecation_dates = False for path", "in string_types] # top level schema schema = Schema({ #", "a LooseVersion else: version = SemanticVersion() version.parse(value) if version.major !=", "'The tombstone removal_date (%s) must not be after today (%s)'", "We're storing Ansible's version as a LooseVersion else: version =", "patch release ' '(see specification at https://semver.org/)' % (value, ))", "try: with open(path, 'r') as f_path: routing = yaml.safe_load(f_path) except", "or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' # This", "path in paths: if path == collection_legacy_file: print('%s:%d:%d: %s' %", "else: # make sure we have a string msg =", "Updates to schema MUST also be reflected in the documentation", "= 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'", "the removal version must not be in the future if", "('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'):", "to indicate \"we don't know\". return None def validate_metadata_file(path, is_ansible,", "tombstone, the removal version must not be in the future", "today)) return value def removal_version(value, is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate a", "re.sub(r'\\s+', ' ', str(ex)))) return if is_ansible: current_version = get_ansible_version()", "ValueError: raise Invalid(msg) # Make sure date is correct today", "string' if is_ansible else 'Removal version must be a semantic", "Ensure schema is valid try: schema(routing) except MultipleInvalid as ex:", "('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema", "Invalid('The deprecation removal_version (%r) must be after the ' 'current", "# From Python 3.7 in, there is datetime.date.fromisoformat(). For older", "= ( 'Removal version must be a string' if is_ansible", "voluptuous.humanize import humanize_error from ansible.module_utils.six import string_types from ansible.utils.version import", "is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit runtime metadata file\"\"\" try: with open(path,", "Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema),", "*list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema", "Exception: # pylint: disable=broad-except # We do not care why", "version as a LooseVersion else: version = SemanticVersion() version.parse(value) if", "case we cannot get the version # just return None", "'warning_text': any_value, }, { Required('removal_date'): any_value, 'warning_text': any_value, } ),", "Schema, Invalid from voluptuous.humanize import humanize_error from ansible.module_utils.six import string_types", "removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise Invalid(msg) # Make", "version must be a string' if is_ansible else 'Removal version", "0, re.sub(r'\\s+', ' ', str(ex)))) return if is_ansible: current_version =", "f_path: routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML", "a tombstone, the removal date must be in the past", "'Removal version must be a string' if is_ansible else 'Removal", "Invalid from voluptuous.humanize import humanize_error from ansible.module_utils.six import string_types from", "level schema schema = Schema({ # All of these are", "(\"Should be called '%s'\" % collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path", "# make sure we have a string msg = 'Expected", "partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), } ), avoid_additional_data ) plugin_routing_schema =", "('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'):", "'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), }", "a removal version string.\"\"\" msg = ( 'Removal version must", "def removal_version(value, is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate a removal version string.\"\"\"", "msg = ( 'Removal version must be a string' if", "'warning_text': Any(*string_types), } ), avoid_additional_data ) tombstoning_schema = All( #", "list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for str_type in string_types] # top", "we should validate this with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict,", "= sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml'", "ansible.module_utils.six import string_types from ansible.utils.version import SemanticVersion def isodate(value, check_deprecation_date=False,", "semantic version (https://semver.org/)' ) if not isinstance(value, string_types): raise Invalid(msg)", "('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), },", "= importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try:", "', str(ex)))) return except Exception as ex: # pylint: disable=broad-except", "as ex: # pylint: disable=broad-except print('%s:%d:%d: YAML load failed: %s'", "Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None,", "print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error))) def main():", "be called '%s'\" % collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path not", "collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception: # pylint: disable=broad-except # We", "= SemanticVersion() version.parse(value) if version.major != 0 and (version.minor !=", "collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection", "or ISO 8601 date string.\"\"\" # datetime.date objects come from", "% (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\\s+', '", "# top level schema schema = Schema({ # All of", "way to get line/column numbers print('%s:%d:%d: %s' % (path, 0,", "None to indicate \"we don't know\". return None def validate_metadata_file(path,", "is_ansible=path not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates) if __name__ == '__main__':", "reflected in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data", "if version.major != 0 and (version.minor != 0 or version.patch", "validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\" from __future__ import", "extra=PREVENT_EXTRA ) deprecation_schema = All( # The first schema validates", "is not available\"\"\" import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec", "it is enabled this test can start failing # at", "value): raise Invalid(msg) try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError:", "% collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path not in (collection_legacy_file, collection_runtime_file),", "https://semver.org/)' % (value, )) if current_version is not None: if", "is_tombstone: # For a tombstone, the removal date must be", "care why it fails, in case we cannot get the", "importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try: result", "LooseVersion from functools import partial import yaml from voluptuous import", "' ', str(ex)))) return except Exception as ex: # pylint:", "disable=broad-except print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0,", "any_value, 'warning_text': any_value, } ), extra=PREVENT_EXTRA ) deprecation_schema = All(", "Schema({ ('redirect'): Any(*string_types), # import_redirect doesn't currently support deprecation },", "import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return current collection version,", "% (value, )) if current_version is not None: if is_tombstone:", "we cannot get the version # just return None to", "Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None,", "__future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime", "must be in the past if today < removal_date: raise", "removal version must be in the future if version <=", "For a deprecation, the removal date must be in the", "current_version = get_ansible_version() else: current_version = get_collection_version() # Updates to", "Any( Schema({ ('redirect'): Any(*string_types), # import_redirect doesn't currently support deprecation", "Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None,", "# requires_ansible: In the future we should validate this with", "except Exception: # pylint: disable=broad-except # We do not care", "= value else: # make sure we have a string", "avoid_additional_data ) tombstoning_schema = All( # The first schema validates", "}, extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema = Any( Schema({ ('redirect'):", "print_function) __metaclass__ = type import datetime import os import re", "meta/runtime.yml\"\"\" from __future__ import (absolute_import, division, print_function) __metaclass__ = type", "version.major != 0 and (version.minor != 0 or version.patch !=", "From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,", "\"\"\"Accepts anything.\"\"\" return value def get_ansible_version(): \"\"\"Return current ansible-core version\"\"\"", "'%Y-%m-%d').date() except ValueError: raise Invalid(msg) # Make sure date is", "(path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\\s+', ' ',", "if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try: removal_date = datetime.datetime.strptime(value,", "explicit runtime metadata file\"\"\" try: with open(path, 'r') as f_path:", ") plugin_routing_schema = Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'):", "*list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) #", "raise Invalid(msg) # From Python 3.7 in, there is datetime.date.fromisoformat().", "any_value, }, { Required('removal_date'): any_value, 'warning_text': any_value, } ), extra=PREVENT_EXTRA", "version # just return None to indicate \"we don't know\".", "string.\"\"\" # datetime.date objects come from YAML dates, these are", "('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection", "*list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),", "ansible-core version\"\"\" from ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version():", ") deprecation_schema = All( # The first schema validates the", "current_version)) except ValueError: raise Invalid(msg) return value def any_value(value): \"\"\"Accepts", "except ValueError: raise Invalid(msg) # Make sure date is correct", "if today < removal_date: raise Invalid( 'The tombstone removal_date (%s)", "tombstone removal_version (%r) must not be after the ' 'current", "def get_collection_version(): \"\"\"Return current collection version, or None if it", "just return None to indicate \"we don't know\". return None", "extra keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version),", "available\"\"\" import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail',", "Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA),", "today (%s)' % (removal_date, today)) else: # For a deprecation,", "are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible: In", "import datetime import os import re import sys from distutils.version", "def any_value(value): \"\"\"Accepts anything.\"\"\" return value def get_ansible_version(): \"\"\"Return current", "(path, 0, 0, (\"Should be called '%s'\" % collection_runtime_file))) continue", "( 'Removal version must be a string' if is_ansible else", "version = StrictVersion() version.parse(value) version = LooseVersion(value) # We're storing", "error))) def main(): \"\"\"Validate runtime metadata\"\"\" paths = sys.argv[1:] or", "must be in the future. Only test this if #", "sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try: result =", "import_redirection_schema} for str_type in string_types] # top level schema schema", "or YAML date' if not isinstance(value, string_types): raise Invalid(msg) #", "\"we don't know\". return None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate", "import_redirect doesn't currently support deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema =", "(%r) must be after the ' 'current version (%s)' %", "raise Invalid('removal_version (%r) must be a major release, not a", ") tombstoning_schema = All( # The first schema validates the", "(path, 0, 0, humanize_error(routing, error))) def main(): \"\"\"Validate runtime metadata\"\"\"", "there is datetime.date.fromisoformat(). For older versions, # we have to", "Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema =", "value else: # make sure we have a string msg", "at a random date. For this to be properly activated,", "('action_groups'): dict, }, extra=PREVENT_EXTRA) # Ensure schema is valid try:", "msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML", "datetime.date): removal_date = value else: # make sure we have", "current_version = get_collection_version() # Updates to schema MUST also be", "Schema({ # All of these are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'):", "if is_ansible: version = StrictVersion() version.parse(value) version = LooseVersion(value) #", "', str(ex)))) return if is_ansible: current_version = get_ansible_version() else: current_version", "current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ), avoid_additional_data )", "objects come from YAML dates, these are ok if isinstance(value,", "# For a deprecation, the removal date must be in", "raise Invalid(msg) try: if is_ansible: version = StrictVersion() version.parse(value) version", "# Ensure schema is valid try: schema(routing) except MultipleInvalid as", "start failing # at a random date. For this to", "= False for path in paths: if path == collection_legacy_file:", "ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\" from __future__ import (absolute_import, division,", "and (b) make this error optional. check_deprecation_dates = False for", "SemanticVersion() version.parse(value) if version.major != 0 and (version.minor != 0", "dict, }, extra=PREVENT_EXTRA) # Ensure schema is valid try: schema(routing)", "\"\"\"Validate a removal version string.\"\"\" msg = ( 'Removal version", "is not None: if is_tombstone: # For a tombstone, the", "why it fails, in case we cannot get the version", "schema is valid try: schema(routing) except MultipleInvalid as ex: for", "deprecation_schema = All( # The first schema validates the input,", "be a major release, not a minor or patch release", "0, 0, re.sub(r'\\s+', ' ', str(ex)))) return if is_ansible: current_version", "' ', str(ex)))) return if is_ansible: current_version = get_ansible_version() else:", "anything.\"\"\" return value def get_ansible_version(): \"\"\"Return current ansible-core version\"\"\" from", "is valid try: schema(routing) except MultipleInvalid as ex: for error", "make this error optional. check_deprecation_dates = False for path in", "with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA) # Ensure", "must be in the future if version <= current_version: raise", "deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for str_type", "os import re import sys from distutils.version import StrictVersion, LooseVersion", "know\". return None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit runtime", "Invalid('removal_version (%r) must be a major release, not a minor", "not a minor or patch release ' '(see specification at", "('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible: In the future we should", "to be properly activated, we (a) need to be able", "be after today (%s)' % (removal_date, today)) else: # For", "= StrictVersion() version.parse(value) version = LooseVersion(value) # We're storing Ansible's", "('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'):", "datetime.date objects come from YAML dates, these are ok if", "pylint: disable=broad-except # We do not care why it fails,", "# check_deprecation_date is truish, to avoid checks to suddenly start", "plugin_schema = Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'):", "version\"\"\" from ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return", "= collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception: # pylint:", "('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'):", "ex: print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +", "because if it is enabled this test can start failing", "# For a tombstone, the removal version must not be", "extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema = Any( Schema({ ('redirect'): Any(*string_types),", "keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True),", "get_ansible_version(): \"\"\"Return current ansible-core version\"\"\" from ansible.release import __version__ return", "= Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None,", "version must not be in the future if version >", "functools import partial import yaml from voluptuous import All, Any,", "*list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema),", "datetime.date.fromisoformat(). For older versions, # we have to do things", "in the future. Only test this if # check_deprecation_date is", "Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for str_type", "removal version string.\"\"\" msg = ( 'Removal version must be", "any_value, } ), extra=PREVENT_EXTRA ) deprecation_schema = All( # The", "import All, Any, MultipleInvalid, PREVENT_EXTRA from voluptuous import Required, Schema,", "list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for str_type in string_types] plugin_schema =", "*list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema),", "runtime metadata\"\"\" paths = sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml'", "re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except", "must be a major release, not a minor or patch", "is_ansible: current_version = get_ansible_version() else: current_version = get_collection_version() # Updates", "removal_date: raise Invalid( 'The tombstone removal_date (%s) must not be", "' '(see specification at https://semver.org/)' % (value, )) if current_version", "(%s)' % (removal_date, today)) return value def removal_version(value, is_ansible, current_version=None,", "voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error from", "makes sure no extra keys are specified Schema( { 'removal_version':", "string (YYYY-MM-DD), or YAML date' if not isinstance(value, string_types): raise", "!= 0): raise Invalid('removal_version (%r) must be a major release,", "try: if is_ansible: version = StrictVersion() version.parse(value) version = LooseVersion(value)", "these are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible:", "is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), } ),", "'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), } ), avoid_additional_data ) plugin_routing_schema", "in string_types] plugin_schema = Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None,", "be properly activated, we (a) need to be able to", "must not be after the ' 'current version (%s)' %", "be in the future if version > current_version: raise Invalid('The", "specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate,", "in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data =", "not isinstance(value, string_types): raise Invalid(msg) # From Python 3.7 in,", "ISO 8601 date string.\"\"\" # datetime.date objects come from YAML", "# For a tombstone, the removal date must be in", "type import datetime import os import re import sys from", "current ansible-core version\"\"\" from ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def", "Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None,", "removal date must be in the past if today <", "Ansible's version as a LooseVersion else: version = SemanticVersion() version.parse(value)", "}, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for str_type in", "be a semantic version (https://semver.org/)' ) if not isinstance(value, string_types):", "# we have to do things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$',", "must be after the ' 'current version (%s)' % (value,", "None if it is not available\"\"\" import importlib.util collection_detail_path =", "future if version > current_version: raise Invalid('The tombstone removal_version (%r)", "str(ex)))) return except Exception as ex: # pylint: disable=broad-except print('%s:%d:%d:", "from voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error", "future we should validate this with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'):", "this if # check_deprecation_date is truish, to avoid checks to", "% (path, 0, 0, re.sub(r'\\s+', ' ', str(ex)))) return if", "import SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a datetime.date or", "version = LooseVersion(value) # We're storing Ansible's version as a", "ex: # pylint: disable=broad-except print('%s:%d:%d: YAML load failed: %s' %", "ISO 8601 date string (YYYY-MM-DD), or YAML date' if not", "to suddenly start to fail. if check_deprecation_date and today >", "isinstance(value, string_types): raise Invalid(msg) try: if is_ansible: version = StrictVersion()", "this test can start failing # at a random date.", "datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise Invalid(msg) # Make sure date", "~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data = Schema( Any( { Required('removal_version'):", "this test, and (b) make this error optional. check_deprecation_dates =", "result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception: #", "to fail. if check_deprecation_date and today > removal_date: raise Invalid(", "get_collection_version() # Updates to schema MUST also be reflected in", "today < removal_date: raise Invalid( 'The tombstone removal_date (%s) must", "current collection version, or None if it is not available\"\"\"", "collection_legacy_file = 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' # This is currently", "ansible.utils.version import SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a datetime.date", "and collection's meta/runtime.yml\"\"\" from __future__ import (absolute_import, division, print_function) __metaclass__", "# The first schema validates the input, and the second", "removal_date = value else: # make sure we have a", "fails, in case we cannot get the version # just", "are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate,", "is datetime.date.fromisoformat(). For older versions, # we have to do", "must be a string' if is_ansible else 'Removal version must", "version (%s)' % (value, current_version)) else: # For a deprecation,", "paths = sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file =", "except ValueError: raise Invalid(msg) return value def any_value(value): \"\"\"Accepts anything.\"\"\"", "it fails, in case we cannot get the version #", "Required, Schema, Invalid from voluptuous.humanize import humanize_error from ansible.module_utils.six import", "in the past if today < removal_date: raise Invalid( 'The", "Invalid(msg) # Make sure date is correct today = datetime.date.today()", "the future if version <= current_version: raise Invalid('The deprecation removal_version", "the past if today < removal_date: raise Invalid( 'The tombstone", "YAML load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column", "version.parse(value) version = LooseVersion(value) # We're storing Ansible's version as", "extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for str_type in string_types]", "from ansible.module_utils.six import string_types from ansible.utils.version import SemanticVersion def isodate(value,", "LooseVersion(value) # We're storing Ansible's version as a LooseVersion else:", "or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception: # pylint: disable=broad-except #", "__version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return current collection version, or", "Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None,", "import partial import yaml from voluptuous import All, Any, MultipleInvalid,", "[{str_type: import_redirection_schema} for str_type in string_types] # top level schema", "optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible: In the", "we have to do things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):", "import_redirection schema import_redirection_schema = Any( Schema({ ('redirect'): Any(*string_types), # import_redirect", "raise Invalid(msg) # Make sure date is correct today =", "Invalid(msg) return value def any_value(value): \"\"\"Accepts anything.\"\"\" return value def", "+ 1, re.sub(r'\\s+', ' ', str(ex)))) return except Exception as", "'current version (%s)' % (value, current_version)) except ValueError: raise Invalid(msg)", "# All of these are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None,", "version = SemanticVersion() version.parse(value) if version.major != 0 and (version.minor", "we (a) need to be able to return # codes", "extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for str_type in string_types]", "failing # at a random date. For this to be", "MUST also be reflected in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html #", "don't know\". return None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit", "the future. Only test this if # check_deprecation_date is truish,", "import sys from distutils.version import StrictVersion, LooseVersion from functools import", "LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return current collection version, or None if", "need to be able to return # codes for this", "(value, current_version)) else: # For a deprecation, the removal version", "All( # The first schema validates the input, and the", "else: current_version = get_collection_version() # Updates to schema MUST also", "extra keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version,", "Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None,", "or patch release ' '(see specification at https://semver.org/)' % (value,", "= get_ansible_version() else: current_version = get_collection_version() # Updates to schema", "not None: if is_tombstone: # For a tombstone, the removal", "*list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema),", "# Make sure date is correct today = datetime.date.today() if", "Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True),", "('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'):", "tombstoning_schema = All( # The first schema validates the input,", "date is correct today = datetime.date.today() if is_tombstone: # For", "major release, not a minor or patch release ' '(see", "('redirect'): Any(*string_types), # import_redirect doesn't currently support deprecation }, extra=PREVENT_EXTRA)", "and the second makes sure no extra keys are specified", "None: if is_tombstone: # For a tombstone, the removal version", "'Removal version must be a semantic version (https://semver.org/)' ) if", "the future if version > current_version: raise Invalid('The tombstone removal_version", "be a string' if is_ansible else 'Removal version must be", "also be reflected in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing", "failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1,", "partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ),", "*list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema),", "sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' # This is", "if version > current_version: raise Invalid('The tombstone removal_version (%r) must", "support deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for", "{ Required('removal_date'): any_value, 'warning_text': any_value, } ), extra=PREVENT_EXTRA ) deprecation_schema", "are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date':", "0 or version.patch != 0): raise Invalid('removal_version (%r) must be", "avoid_additional_data ) plugin_routing_schema = Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema),", "% (value, current_version)) except ValueError: raise Invalid(msg) return value def", "ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\\s+', ' ', str(ex))))", "path, is_ansible=path not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates) if __name__ ==", "('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'):", "in ex.errors: # No way to get line/column numbers print('%s:%d:%d:", "def isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate a datetime.date or ISO 8601", "1, ex.context_mark.column + 1, re.sub(r'\\s+', ' ', str(ex)))) return except", "} ), avoid_additional_data ) plugin_routing_schema = Any( Schema({ ('deprecation'): Any(deprecation_schema),", "import humanize_error from ansible.module_utils.six import string_types from ansible.utils.version import SemanticVersion", "# We're storing Ansible's version as a LooseVersion else: version", "metadata file\"\"\" try: with open(path, 'r') as f_path: routing =", "Invalid(msg) try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise Invalid(msg)", "(absolute_import, division, print_function) __metaclass__ = type import datetime import os", "ok if isinstance(value, datetime.date): removal_date = value else: # make", "if not isinstance(value, string_types): raise Invalid(msg) # From Python 3.7", "the ' 'current version (%s)' % (value, current_version)) else: #", "if isinstance(value, datetime.date): removal_date = value else: # make sure", "schema validates the input, and the second makes sure no", "release ' '(see specification at https://semver.org/)' % (value, )) if", "the future we should validate this with SpecifierSet ('requires_ansible'): Any(*string_types),", "print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\\s+',", "(a) need to be able to return # codes for", "be in the future. Only test this if # check_deprecation_date", "*list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema),", "1, re.sub(r'\\s+', ' ', str(ex)))) return except Exception as ex:", "}, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for str_type in", "Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None,", "come from YAML dates, these are ok if isinstance(value, datetime.date):", "not be in the future if version > current_version: raise", "to avoid checks to suddenly start to fail. if check_deprecation_date", "# just return None to indicate \"we don't know\". return", "codes for this test, and (b) make this error optional.", "if it is not available\"\"\" import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),", "after the ' 'current version (%s)' % (value, current_version)) except", "is_tombstone=True), 'warning_text': Any(*string_types), } ), avoid_additional_data ) plugin_routing_schema = Any(", "dates, these are ok if isinstance(value, datetime.date): removal_date = value", "current_version=None, is_tombstone=False): \"\"\"Validate a removal version string.\"\"\" msg = (", "string_types from ansible.utils.version import SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False): \"\"\"Validate", "to do things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg)", "< removal_date: raise Invalid( 'The tombstone removal_date (%s) must not", "is currently disabled, because if it is enabled this test", "are ok if isinstance(value, datetime.date): removal_date = value else: #", "Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None,", "from __future__ import (absolute_import, division, print_function) __metaclass__ = type import", "be after the ' 'current version (%s)' % (value, current_version))", "return # codes for this test, and (b) make this", "0, (\"Should be called '%s'\" % collection_runtime_file))) continue validate_metadata_file( path,", "of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml\"\"\" from __future__ import (absolute_import,", "datetime import os import re import sys from distutils.version import", "be after today (%s)' % (removal_date, today)) return value def", "removal_version(value, is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate a removal version string.\"\"\" msg", "a string' if is_ansible else 'Removal version must be a", "YAML dates, these are ok if isinstance(value, datetime.date): removal_date =", "For a deprecation, the removal version must be in the", "*list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema),", "is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ), avoid_additional_data", "}, extra=PREVENT_EXTRA) # Ensure schema is valid try: schema(routing) except", "(YYYY-MM-DD), or YAML date' if not isinstance(value, string_types): raise Invalid(msg)", "must be after today (%s)' % (removal_date, today)) return value", "str_type in string_types] plugin_schema = Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'):", "0, 0, humanize_error(routing, error))) def main(): \"\"\"Validate runtime metadata\"\"\" paths", "StrictVersion, LooseVersion from functools import partial import yaml from voluptuous", "test can start failing # at a random date. For", "isinstance(value, string_types): raise Invalid(msg) # From Python 3.7 in, there", "pylint: disable=broad-except print('%s:%d:%d: YAML load failed: %s' % (path, 0,", "yaml from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA from voluptuous", "datetime.date or ISO 8601 date string.\"\"\" # datetime.date objects come", "test this if # check_deprecation_date is truish, to avoid checks", "avoid_additional_data = Schema( Any( { Required('removal_version'): any_value, 'warning_text': any_value, },", "('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'):", "if not isinstance(value, string_types): raise Invalid(msg) try: if is_ansible: version", "('requires_ansible'): Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA) # Ensure schema is", "version <= current_version: raise Invalid('The deprecation removal_version (%r) must be", "if it is enabled this test can start failing #", "os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail']", "== collection_legacy_file: print('%s:%d:%d: %s' % (path, 0, 0, (\"Should be", "def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): \"\"\"Validate explicit runtime metadata file\"\"\" try:", "the removal date must be in the future. Only test", "' 'current version (%s)' % (value, current_version)) except ValueError: raise", "# codes for this test, and (b) make this error", "try: result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception:", "0, 0, (\"Should be called '%s'\" % collection_runtime_file))) continue validate_metadata_file(", "to schema MUST also be reflected in the documentation #", "# pylint: disable=broad-except print('%s:%d:%d: YAML load failed: %s' % (path,", "schema = Schema({ # All of these are optional ('plugin_routing'):", "'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types),", "be in the future if version <= current_version: raise Invalid('The", "% (path, 0, 0, humanize_error(routing, error))) def main(): \"\"\"Validate runtime", "today (%s)' % (removal_date, today)) return value def removal_version(value, is_ansible,", "specification at https://semver.org/)' % (value, )) if current_version is not", "Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection schema", "it is not available\"\"\" import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py')", "after the ' 'current version (%s)' % (value, current_version)) else:", "(removal_date, today)) return value def removal_version(value, is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate", "# import_redirect doesn't currently support deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema", "= Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), },", "check_deprecation_dates = False for path in paths: if path ==", "is_tombstone=False): \"\"\"Validate a datetime.date or ISO 8601 date string.\"\"\" #", "must not be after today (%s)' % (removal_date, today)) else:", "Only test this if # check_deprecation_date is truish, to avoid", "the version # just return None to indicate \"we don't", "In the future we should validate this with SpecifierSet ('requires_ansible'):", "For older versions, # we have to do things manually.", "is truish, to avoid checks to suddenly start to fail.", "# noinspection PyBroadException try: result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return", "Any(*string_types), } ), avoid_additional_data ) tombstoning_schema = All( # The", "checks to suddenly start to fail. if check_deprecation_date and today", "not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates) if __name__ == '__main__': main()", "collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail", "main(): \"\"\"Validate runtime metadata\"\"\" paths = sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file", "8601 date string (YYYY-MM-DD), or YAML date' if not isinstance(value,", "not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date()", "schema MUST also be reflected in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html", "get the version # just return None to indicate \"we", "Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None,", "This is currently disabled, because if it is enabled this", "\"\"\"Return current ansible-core version\"\"\" from ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3]))", "re import sys from distutils.version import StrictVersion, LooseVersion from functools", "(value, current_version)) except ValueError: raise Invalid(msg) return value def any_value(value):", "extra=PREVENT_EXTRA) # Ensure schema is valid try: schema(routing) except MultipleInvalid", "string msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or", "the second makes sure no extra keys are specified Schema(", "Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None,", "collection version, or None if it is not available\"\"\" import", "return value def removal_version(value, is_ansible, current_version=None, is_tombstone=False): \"\"\"Validate a removal", "fail. if check_deprecation_date and today > removal_date: raise Invalid( 'The", "tombstone removal_date (%s) must not be after today (%s)' %", "# We do not care why it fails, in case", "optional. check_deprecation_dates = False for path in paths: if path", "= Schema( Any( { Required('removal_version'): any_value, 'warning_text': any_value, }, {", "is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), } ), avoid_additional_data )", "Python 3.7 in, there is datetime.date.fromisoformat(). For older versions, #", "we have a string msg = 'Expected ISO 8601 date", "is_ansible: version = StrictVersion() version.parse(value) version = LooseVersion(value) # We're", "date must be in the future. Only test this if", "is_tombstone: # For a tombstone, the removal version must not", "except MultipleInvalid as ex: for error in ex.errors: # No", "(%r) must be a major release, not a minor or", "if path == collection_legacy_file: print('%s:%d:%d: %s' % (path, 0, 0,", "version.parse(value) if version.major != 0 and (version.minor != 0 or", "'r') as f_path: routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex:", "as a LooseVersion else: version = SemanticVersion() version.parse(value) if version.major", "% (removal_date, today)) else: # For a deprecation, the removal", "value def any_value(value): \"\"\"Accepts anything.\"\"\" return value def get_ansible_version(): \"\"\"Return", "(version.minor != 0 or version.patch != 0): raise Invalid('removal_version (%r)", "the removal version must be in the future if version", "' 'current version (%s)' % (value, current_version)) else: # For", "Make sure date is correct today = datetime.date.today() if is_tombstone:", "yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load failed: %s'", "for str_type in string_types] # top level schema schema =", "as ex: print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line", "All of these are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema),", "return value def any_value(value): \"\"\"Accepts anything.\"\"\" return value def get_ansible_version():", "or None if it is not available\"\"\" import importlib.util collection_detail_path", "avoid checks to suddenly start to fail. if check_deprecation_date and", "= get_collection_version() # Updates to schema MUST also be reflected", "= Schema({ # All of these are optional ('plugin_routing'): Any(plugin_schema),", "date. For this to be properly activated, we (a) need", "Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text':", "*list_dict_import_redirection_schema), # requires_ansible: In the future we should validate this", "check_deprecation_date is truish, to avoid checks to suddenly start to", "except Exception as ex: # pylint: disable=broad-except print('%s:%d:%d: YAML load", "a deprecation, the removal date must be in the future.", "from ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): \"\"\"Return current", "in paths: if path == collection_legacy_file: print('%s:%d:%d: %s' % (path,", "doesn't currently support deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type:", "('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'):", "<= current_version: raise Invalid('The deprecation removal_version (%r) must be after", "if is_tombstone: # For a tombstone, the removal version must", "manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try: removal_date =", "(%r) must not be after the ' 'current version (%s)'", "version must be in the future if version <= current_version:", "} ), avoid_additional_data ) tombstoning_schema = All( # The first", "string_types] plugin_schema = Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema),", "= Any( Schema({ ('redirect'): Any(*string_types), # import_redirect doesn't currently support" ]
[ "AlignBearingHolesGoal() goal.task_name = self._task_name self._success = True try: self._client.send_goal(self._topic, goal)", "EventState, Logger from flexbe_core.proxy import ProxyActionClient # example import of", "= task_name self._success = False def execute(self, userdata): if not", "command:\\n%s' % str(e)) self._success = False def on_exit(self, userdata): if", "completed successfully. <= error AlignBearingHoles failed to execute. ''' def", "required clients as dict (topic: type) self._client = ProxyActionClient( {self._topic:", "flexbe_core.proxy import ProxyActionClient # example import of required action from", "except Exception as e: Logger.logwarn( 'Failed to send the AlignBearingHoles", "from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib for", "AlignBearingHoles failed to execute. ''' def __init__(self, task_name): super( AlignBearingHolesActionState,", "__init__(self, task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error']) self._topic =", "failed to execute. ''' def __init__(self, task_name): super( AlignBearingHolesActionState, self).__init__(", "''' Actionlib for aligning the bearing holes -- task_name string", "''' def __init__(self, task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error'])", "<= success AlignBearingHoles completed successfully. <= error AlignBearingHoles failed to", "<= error AlignBearingHoles failed to execute. ''' def __init__(self, task_name):", "error AlignBearingHoles failed to execute. ''' def __init__(self, task_name): super(", "complete AlignBearingHoles') self._success = False return 'error' else: Logger.logwarn('Succeed! completed", "self._success = True try: self._client.send_goal(self._topic, goal) except Exception as e:", "= self._task_name self._success = True try: self._client.send_goal(self._topic, goal) except Exception", "#!/usr/bin/env python from flexbe_core import EventState, Logger from flexbe_core.proxy import", "= True return 'success' def on_enter(self, userdata): goal = AlignBearingHolesGoal()", "completed AlignBearingHoles') self._success = True return 'success' def on_enter(self, userdata):", "aligning the bearing holes -- task_name string Name of the", "def on_enter(self, userdata): goal = AlignBearingHolesGoal() goal.task_name = self._task_name self._success", "task_name string Name of the task <= success AlignBearingHoles completed", "= self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result)) if not result: Logger.logwarn('Fail", "task <= success AlignBearingHoles completed successfully. <= error AlignBearingHoles failed", "self._success = False def on_exit(self, userdata): if not self._client.has_result(self._topic): self._client.cancel(self._topic)", "import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib for aligning the", "super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' #", "self._client.send_goal(self._topic, goal) except Exception as e: Logger.logwarn( 'Failed to send", "return 'success' def on_enter(self, userdata): goal = AlignBearingHolesGoal() goal.task_name =", "# pass required clients as dict (topic: type) self._client =", "AlignBearingHoles completed successfully. <= error AlignBearingHoles failed to execute. '''", "False def execute(self, userdata): if not self._success: return 'error' if", "Actionlib for aligning the bearing holes -- task_name string Name", "(topic: type) self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name = task_name", "task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes'", "'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success = True return 'success'", "clients as dict (topic: type) self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction})", "{self._topic: AlignBearingHolesAction}) self._task_name = task_name self._success = False def execute(self,", "the task <= success AlignBearingHoles completed successfully. <= error AlignBearingHoles", "'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' # pass required clients as dict", "Logger from flexbe_core.proxy import ProxyActionClient # example import of required", "successfully. <= error AlignBearingHoles failed to execute. ''' def __init__(self,", "pass required clients as dict (topic: type) self._client = ProxyActionClient(", "dict (topic: type) self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name =", "self._task_name = task_name self._success = False def execute(self, userdata): if", "Logger.logwarn('result %s' % str(result)) if not result: Logger.logwarn('Fail to complete", "import of required action from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class", "outcomes=[ 'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' # pass required clients", "success AlignBearingHoles completed successfully. <= error AlignBearingHoles failed to execute.", "to complete AlignBearingHoles') self._success = False return 'error' else: Logger.logwarn('Succeed!", "False def on_exit(self, userdata): if not self._client.has_result(self._topic): self._client.cancel(self._topic) Logger.loginfo('Cancelled active", "result: Logger.logwarn('Fail to complete AlignBearingHoles') self._success = False return 'error'", "%s' % str(result)) if not result: Logger.logwarn('Fail to complete AlignBearingHoles')", "'o2ac_flexbe/align_bearing_holes' # pass required clients as dict (topic: type) self._client", "= ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name = task_name self._success = False", "'success' def on_enter(self, userdata): goal = AlignBearingHolesGoal() goal.task_name = self._task_name", "from flexbe_core.proxy import ProxyActionClient # example import of required action", "'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' # pass required clients as", "'error' if self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result))", "on_enter(self, userdata): goal = AlignBearingHolesGoal() goal.task_name = self._task_name self._success =", "for aligning the bearing holes -- task_name string Name of", "self._success: return 'error' if self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result %s'", "the AlignBearingHoles command:\\n%s' % str(e)) self._success = False def on_exit(self,", "# example import of required action from o2ac_msgs.msg import AlignBearingHolesAction,", "= False def on_exit(self, userdata): if not self._client.has_result(self._topic): self._client.cancel(self._topic) Logger.loginfo('Cancelled", "from flexbe_core import EventState, Logger from flexbe_core.proxy import ProxyActionClient #", "self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name = task_name self._success =", "= False def execute(self, userdata): if not self._success: return 'error'", "self._success = False return 'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success", "'Failed to send the AlignBearingHoles command:\\n%s' % str(e)) self._success =", "Exception as e: Logger.logwarn( 'Failed to send the AlignBearingHoles command:\\n%s'", "= 'o2ac_flexbe/align_bearing_holes' # pass required clients as dict (topic: type)", "= AlignBearingHolesGoal() goal.task_name = self._task_name self._success = True try: self._client.send_goal(self._topic,", "not self._success: return 'error' if self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result", "False return 'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success = True", "= True try: self._client.send_goal(self._topic, goal) except Exception as e: Logger.logwarn(", "if not result: Logger.logwarn('Fail to complete AlignBearingHoles') self._success = False", "to send the AlignBearingHoles command:\\n%s' % str(e)) self._success = False", "self._topic = 'o2ac_flexbe/align_bearing_holes' # pass required clients as dict (topic:", "of required action from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState):", "bearing holes -- task_name string Name of the task <=", "else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success = True return 'success' def", "flexbe_core import EventState, Logger from flexbe_core.proxy import ProxyActionClient # example", "self._success = True return 'success' def on_enter(self, userdata): goal =", "if not self._success: return 'error' if self._client.has_result(self._topic): result = self._client.get_result(self._topic)", "as e: Logger.logwarn( 'Failed to send the AlignBearingHoles command:\\n%s' %", "self._success = False def execute(self, userdata): if not self._success: return", "% str(e)) self._success = False def on_exit(self, userdata): if not", "def on_exit(self, userdata): if not self._client.has_result(self._topic): self._client.cancel(self._topic) Logger.loginfo('Cancelled active action", "import EventState, Logger from flexbe_core.proxy import ProxyActionClient # example import", "userdata): goal = AlignBearingHolesGoal() goal.task_name = self._task_name self._success = True", "userdata): if not self._success: return 'error' if self._client.has_result(self._topic): result =", "% str(result)) if not result: Logger.logwarn('Fail to complete AlignBearingHoles') self._success", "execute. ''' def __init__(self, task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success',", "AlignBearingHoles') self._success = False return 'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles')", "class AlignBearingHolesActionState(EventState): ''' Actionlib for aligning the bearing holes --", "string Name of the task <= success AlignBearingHoles completed successfully.", "not result: Logger.logwarn('Fail to complete AlignBearingHoles') self._success = False return", "python from flexbe_core import EventState, Logger from flexbe_core.proxy import ProxyActionClient", "to execute. ''' def __init__(self, task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[", "self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result)) if not result: Logger.logwarn('Fail to", "AlignBearingHoles command:\\n%s' % str(e)) self._success = False def on_exit(self, userdata):", "str(result)) if not result: Logger.logwarn('Fail to complete AlignBearingHoles') self._success =", "task_name self._success = False def execute(self, userdata): if not self._success:", "required action from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): '''", "o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib for aligning", "AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib for aligning the bearing", "AlignBearingHolesActionState(EventState): ''' Actionlib for aligning the bearing holes -- task_name", "return 'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success = True return", "holes -- task_name string Name of the task <= success", "True return 'success' def on_enter(self, userdata): goal = AlignBearingHolesGoal() goal.task_name", "str(e)) self._success = False def on_exit(self, userdata): if not self._client.has_result(self._topic):", "as dict (topic: type) self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name", "def execute(self, userdata): if not self._success: return 'error' if self._client.has_result(self._topic):", "action from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib", "AlignBearingHolesAction}) self._task_name = task_name self._success = False def execute(self, userdata):", "AlignBearingHoles') self._success = True return 'success' def on_enter(self, userdata): goal", "<reponame>mitdo/o2ac-ur #!/usr/bin/env python from flexbe_core import EventState, Logger from flexbe_core.proxy", "AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' # pass", "result = self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result)) if not result:", "execute(self, userdata): if not self._success: return 'error' if self._client.has_result(self._topic): result", "Logger.logwarn('Succeed! completed AlignBearingHoles') self._success = True return 'success' def on_enter(self,", "import ProxyActionClient # example import of required action from o2ac_msgs.msg", "Name of the task <= success AlignBearingHoles completed successfully. <=", "-- task_name string Name of the task <= success AlignBearingHoles", "def __init__(self, task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error']) self._topic", "goal) except Exception as e: Logger.logwarn( 'Failed to send the", "goal = AlignBearingHolesGoal() goal.task_name = self._task_name self._success = True try:", "ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name = task_name self._success = False def", "if self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result)) if", "= False return 'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success =", "e: Logger.logwarn( 'Failed to send the AlignBearingHoles command:\\n%s' % str(e))", "self._task_name self._success = True try: self._client.send_goal(self._topic, goal) except Exception as", "on_exit(self, userdata): if not self._client.has_result(self._topic): self._client.cancel(self._topic) Logger.loginfo('Cancelled active action goal.')", "example import of required action from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal", "Logger.logwarn('Fail to complete AlignBearingHoles') self._success = False return 'error' else:", "Logger.logwarn( 'Failed to send the AlignBearingHoles command:\\n%s' % str(e)) self._success", "the bearing holes -- task_name string Name of the task", "self).__init__( outcomes=[ 'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' # pass required", "return 'error' if self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result %s' %", "type) self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name = task_name self._success", "try: self._client.send_goal(self._topic, goal) except Exception as e: Logger.logwarn( 'Failed to", "AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib for aligning the bearing holes", "True try: self._client.send_goal(self._topic, goal) except Exception as e: Logger.logwarn( 'Failed", "ProxyActionClient # example import of required action from o2ac_msgs.msg import", "send the AlignBearingHoles command:\\n%s' % str(e)) self._success = False def", "self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result)) if not", "of the task <= success AlignBearingHoles completed successfully. <= error", "goal.task_name = self._task_name self._success = True try: self._client.send_goal(self._topic, goal) except" ]
[ "eprint('%s: OK' % filename) def should_read(f): m = magic.detect_from_filename(f) #", "speed up analysis. FIXME: Add a slow mode to get", "inf in infile]) except Exception as e: eprint('%s: %s' %", "Exception as e: eprint('%s: %s' % (filename, e)) return None", "cat != 'Cc': return True return False if __name__ ==", "if should_read(f): text = getfiletext(f) if text: analyze_text(f, text, disallowed,", "mode = os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif S_ISREG(mode):", "files By default the script takes one or more files", "or bidirectional control characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose mode.')", "return None def analyze_text_detailed(filename, text, disallowed, msg): line = 0", "def analyze_text_detailed(filename, text, disallowed, msg): line = 0 warned =", "set(chr(c) for c in range(sys.maxunicode) if \\ unicodedata.category(chr(c)) == 'Cf')", "(filename, msg, text & disallowed)) else: eprint('%s: OK' % filename)", "**kwargs) # Decode a single latin1 line. def decodeline(inf): if", "one or more files or directories and looks for unicode", "files # that have these disallowed chars. def analyze_text(filename, text,", "feed into analyze_text. def analyze_file(f, disallowed, msg): eprint('%s: Reading file'", "in files # that have these disallowed chars. def analyze_text(filename,", "and when set to 'bidi', prints only the 9 bidirectional", "hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime if args.notests: scan_exclude", "space. This includes the # bidi control characters. disallowed =", "re.search(e, m.mime_type)]: return False return True # Get file text", "eprint('%s: Reading file' % f) if should_read(f): text = getfiletext(f)", "in m.mime_type \\ or [e for e in scan_exclude_mime if", "open(filename) as infile: try: if detailed_mode: return [decodeline(inf) for inf", "and cat != 'Cc': return True return False if __name__", "Add a slow mode to get line numbers in files", "help='Sources to analyze') parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all', 'bidi'], help='Look", "or more files or directories and looks for unicode control", "action='store_true', help='Exclude tests (basically test.* as a component of path).')", "bidi control characters. disallowed = set(chr(c) for c in range(sys.maxunicode)", "text, disallowed, msg): if detailed_mode: analyze_text_detailed(filename, text, disallowed, msg) return", "disallowed, msg): eprint('%s: Reading file' % f) if should_read(f): text", "All control characters. disallowed = set(chr(c) for c in range(sys.maxunicode)", "for t in text: line = line + 1 subset", "eprint(*args, **kwargs): if verbose_mode: print(*args, file=sys.stderr, **kwargs) # Decode a", "if not args.nonprint: # Formatting control characters in the unicode", "parser.parse_args() verbose_mode = args.verbose detailed_mode = args.detailed if not args.nonprint:", "disallowed, msg) elif S_ISREG(mode): analyze_file(p, disallowed, msg) else: eprint('%s: UNREADABLE'", "in scan_exclude_mime if re.search(e, m.mime_type)]: return False return True #", "parser = argparse.ArgumentParser(description=\"Look for Unicode control characters\") parser.add_argument('path', metavar='path', nargs='+',", "to speed up analysis. FIXME: Add a slow mode to", "scan_exclude = scan_exclude + settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime =", "def analyze_text(filename, text, disallowed, msg): if detailed_mode: analyze_text_detailed(filename, text, disallowed,", "except UnicodeDecodeError: eprint('%s: Retrying with latin1' % filename) try: text", "& disallowed)) else: eprint('%s: OK' % filename) def should_read(f): m", "regular expressions matching paths to exclude from the scan. There", "disallowed, msg): mode = os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed, msg)", "(basically test.* as a component of path).') parser.add_argument('-c', '--config', required=False,", "r'\\.txt$', r'\\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode =", "Print to stderr in verbose mode. def eprint(*args, **kwargs): if", "help='Configuration file to read settings from.') args = parser.parse_args() verbose_mode", "a scan_exclude list, which should be a list of regular", "subset = [c for c in t if c in", "non-utf-8 locales are not supported at the moment. def getfiletext(filename):", "unicode space. This includes the # bidi control characters. disallowed", "UnicodeDecodeError: eprint('%s: Retrying with latin1' % filename) try: text =", "string from a file, attempting to decode from latin1 if", "necessary. # Other non-utf-8 locales are not supported at the", "[e for e in scan_exclude if re.search(e, f)]: return False", "if re.search(e, f)]: return False # Slower check, mime type.", "= None with open(filename) as infile: try: if detailed_mode: return", "if args.notests: scan_exclude = scan_exclude + [r'/test[^/]+/'] analyze_paths(args.path, disallowed, msg)", "a config file with the -c command line, defining a", "disallowed = set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067),", "try: if detailed_mode: return [decodeline(inf) for inf in infile] except", "# set to speed up analysis. FIXME: Add a slow", "= importlib.util.spec_from_file_location(\"settings\", args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'):", "We reduce all characters into a # set to speed", "file text and feed into analyze_text. def analyze_file(f, disallowed, msg):", "stderr in verbose mode. def eprint(*args, **kwargs): if verbose_mode: print(*args,", "set(text) else: return None def analyze_text_detailed(filename, text, disallowed, msg): line", "type=str, choices=['all', 'bidi'], help='Look for either all non-printable unicode characters", "args.nonprint == 'all': # All control characters. disallowed = set(chr(c)", "msg) else: eprint('%s: UNREADABLE' % p) # Recursively analyze files", "narrow down the files, provide a config file with the", "verbose mode. def eprint(*args, **kwargs): if verbose_mode: print(*args, file=sys.stderr, **kwargs)", "= [c for c in t if c in disallowed]", "disallowed characters in the text. We reduce all characters into", "'Cf') msg = 'unicode control characters' elif args.nonprint == 'all':", "m = magic.detect_from_filename(f) # Fast check, just the file name.", "the scan. There is a second mode enabled with -p", "'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime if args.notests: scan_exclude =", "if detailed_mode: analyze_text_detailed(filename, text, disallowed, msg) return if not text.isdisjoint(disallowed):", "settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime if", "disallowed, msg) else: eprint('%s: UNREADABLE' % p) # Recursively analyze", "settings.scan_exclude_mime if args.notests: scan_exclude = scan_exclude + [r'/test[^/]+/'] analyze_paths(args.path, disallowed,", "r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode = False # Print to stderr", "numbers where characters occur.') parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude tests", "test.* as a component of path).') parser.add_argument('-c', '--config', required=False, type=str,", "r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$',", "== '__main__': parser = argparse.ArgumentParser(description=\"Look for Unicode control characters\") parser.add_argument('path',", "if necessary. # Other non-utf-8 locales are not supported at", "Formatting control characters in the unicode space. This includes the", "chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg = 'bidirectional control", "= False for t in text: line = line +", "else: eprint('%s: UNREADABLE' % p) # Recursively analyze files in", "'--notests', required=False, action='store_true', help='Exclude tests (basically test.* as a component", "file, attempting to decode from latin1 if necessary. # Other", "choices=['all', 'bidi'], help='Look for either all non-printable unicode characters or", "To narrow down the files, provide a config file with", "should_read(f): m = magic.detect_from_filename(f) # Fast check, just the file", "# Only bidi control characters. disallowed = set([ chr(0x202a), chr(0x202b),", "# Look for disallowed characters in the text. We reduce", "these disallowed chars. def analyze_text(filename, text, disallowed, msg): if detailed_mode:", "metavar='path', nargs='+', help='Sources to analyze') parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all',", "args = parser.parse_args() verbose_mode = args.verbose detailed_mode = args.detailed if", "to 'all', prints all control characters and when set to", "path).') parser.add_argument('-c', '--config', required=False, type=str, help='Configuration file to read settings", "% f) # Actual implementation of the recursive descent into", "filename) # Look for disallowed characters in the text. We", "at the moment. def getfiletext(filename): text = None with open(filename)", "def decodeline(inf): if isinstance(inf, str): return inf return inf.decode('latin-1') #", "omit the ascii control characters. def nonprint_unicode(c): cat = unicodedata.category(c)", "control characters. def nonprint_unicode(c): cat = unicodedata.category(c) if cat.startswith('C') and", "Reading file' % f) if should_read(f): text = getfiletext(f) if", "where characters occur.') parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude tests (basically", "if [e for e in scan_exclude if re.search(e, f)]: return", "'--config', required=False, type=str, help='Configuration file to read settings from.') args", "chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg = 'bidirectional control characters'", "infile] except Exception as e: eprint('%s: %s' % (filename, e))", "text. We reduce all characters into a # set to", "into directories. def analyze_any(p, disallowed, msg): mode = os.stat(p).st_mode if", "action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print line numbers", "msg): eprint('%s: Reading file' % f) if should_read(f): text =", "a text string from a file, attempting to decode from", "magic.detect_from_filename(f) # Fast check, just the file name. if [e", "analyze files in the directory. def analyze_dir(d, disallowed, msg): for", "scan. There is a second mode enabled with -p which", "chr(0x2069)]) msg = 'bidirectional control characters' if args.config: spec =", "control characters. disallowed = set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e),", "% filename) # Look for disallowed characters in the text.", "type. if not 'text/' in m.mime_type \\ or [e for", "return inf return inf.decode('latin-1') # Make a text string from", "moment. def getfiletext(filename): text = None with open(filename) as infile:", "as e: eprint('%s: %s' % (filename, e)) if text: return", "line = line + 1 subset = [c for c", "disallowed, msg): line = 0 warned = False for t", "analyze') parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all', 'bidi'], help='Look for either", "characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False,", "chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg", "list, which should be a list of regular expressions matching", "for e in scan_exclude_mime if re.search(e, m.mime_type)]: return False return", "line. def decodeline(inf): if isinstance(inf, str): return inf return inf.decode('latin-1')", "%s' % (filename, e)) if text: return set(text) else: return", "decode from latin1 if necessary. # Other non-utf-8 locales are", "recursive descent into directories. def analyze_any(p, disallowed, msg): mode =", "import importlib from stat import * scan_exclude = [r'\\.git/', r'\\.hg/',", "with -p which when set to 'all', prints all control", "locales are not supported at the moment. def getfiletext(filename): text", "supported at the moment. def getfiletext(filename): text = None with", "control characters' elif args.nonprint == 'all': # All control characters.", "if hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude + settings.scan_exclude if hasattr(settings,", "files, provide a config file with the -c command line,", "from a file, attempting to decode from latin1 if necessary.", "set to 'bidi', prints only the 9 bidirectional control characters.", "= set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068),", "= getfiletext(f) if text: analyze_text(f, text, disallowed, msg) else: eprint('%s:", "disallowed)) else: eprint('%s: OK' % filename) def should_read(f): m =", "tests (basically test.* as a component of path).') parser.add_argument('-c', '--config',", "analyze_dir(p, disallowed, msg) elif S_ISREG(mode): analyze_file(p, disallowed, msg) else: eprint('%s:", "analyze_text(filename, text, disallowed, msg): if detailed_mode: analyze_text_detailed(filename, text, disallowed, msg)", "elif args.nonprint == 'all': # All control characters. disallowed =", "disallowed, msg) def analyze_paths(paths, disallowed, msg): for p in paths:", "warned = True if not warned: eprint('%s: OK' % filename)", "single latin1 line. def decodeline(inf): if isinstance(inf, str): return inf", "characters occur.') parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude tests (basically test.*", "r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode", "text: line = line + 1 subset = [c for", "There is a second mode enabled with -p which when", "ascii control characters. def nonprint_unicode(c): cat = unicodedata.category(c) if cat.startswith('C')", "the file name. if [e for e in scan_exclude if", "eprint('%s: %s' % (filename, e)) return None try: text =", "m.mime_type)]: return False return True # Get file text and", "msg = 'bidirectional control characters' if args.config: spec = importlib.util.spec_from_file_location(\"settings\",", "r'\\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode = False", "into a # set to speed up analysis. FIXME: Add", "characters. disallowed = set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066),", "paths to exclude from the scan. There is a second", "''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying with latin1' % filename) try:", "% filename) try: text = ''.join([decodeline(inf) for inf in infile])", "if S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif S_ISREG(mode): analyze_file(p, disallowed, msg)", "%s: %s' % (filename, msg, text & disallowed)) else: eprint('%s:", "file name. if [e for e in scan_exclude if re.search(e,", "msg) elif S_ISREG(mode): analyze_file(p, disallowed, msg) else: eprint('%s: UNREADABLE' %", "line = 0 warned = False for t in text:", "warned = False for t in text: line = line", "analyze_text(f, text, disallowed, msg) else: eprint('%s: SKIPPED' % f) #", "chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg = 'bidirectional", "msg): mode = os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif", "cat.startswith('C') and cat != 'Cc': return True return False if", "e: eprint('%s: %s' % (filename, e)) return None try: text", "'all', prints all control characters and when set to 'bidi',", "r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$']", "prints all control characters and when set to 'bidi', prints", "spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude + settings.scan_exclude if", "a component of path).') parser.add_argument('-c', '--config', required=False, type=str, help='Configuration file", "scan_exclude list, which should be a list of regular expressions", "= 'unicode control characters' elif args.nonprint == 'all': # All", "text = ''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying with latin1' %", "e in scan_exclude if re.search(e, f)]: return False # Slower", "% (filename, msg, text & disallowed)) else: eprint('%s: OK' %", "mime type. if not 'text/' in m.mime_type \\ or [e", "for f in os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg) def analyze_paths(paths,", "%s' % (filename, msg, text & disallowed)) else: eprint('%s: OK'", "Decode a single latin1 line. def decodeline(inf): if isinstance(inf, str):", "scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode = False #", "settings from.') args = parser.parse_args() verbose_mode = args.verbose detailed_mode =", "if c in disallowed] if subset: print('%s:%d %s: %s' %", "a # set to speed up analysis. FIXME: Add a", "print('%s: %s: %s' % (filename, msg, text & disallowed)) else:", "= set(chr(c) for c in range(sys.maxunicode) if \\ unicodedata.category(chr(c)) ==", "latin1 line. def decodeline(inf): if isinstance(inf, str): return inf return", "scan_exclude_mime if re.search(e, m.mime_type)]: return False return True # Get", "not supported at the moment. def getfiletext(filename): text = None", "else: eprint('%s: SKIPPED' % f) # Actual implementation of the", "msg = 'disallowed characters' else: # Only bidi control characters.", "as e: eprint('%s: %s' % (filename, e)) return None try:", "a slow mode to get line numbers in files #", "the unicode space. This includes the # bidi control characters.", "text and feed into analyze_text. def analyze_file(f, disallowed, msg): eprint('%s:", "try: text = ''.join([decodeline(inf) for inf in infile]) except Exception", "(filename, e)) return None try: text = ''.join(infile) except UnicodeDecodeError:", "in source files By default the script takes one or", "characters and when set to 'bidi', prints only the 9", "a second mode enabled with -p which when set to", "return False return True # Get file text and feed", "import * scan_exclude = [r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$',", "msg, subset)) warned = True if not warned: eprint('%s: OK'", "None def analyze_text_detailed(filename, text, disallowed, msg): line = 0 warned", "analyze_dir(d, disallowed, msg): for f in os.listdir(d): analyze_any(os.path.join(d, f), disallowed,", "unicodedata.category(chr(c)) == 'Cf') msg = 'unicode control characters' elif args.nonprint", "9 bidirectional control characters. \"\"\" import sys, os, argparse, re,", "Other non-utf-8 locales are not supported at the moment. def", "e)) if text: return set(text) else: return None def analyze_text_detailed(filename,", "text, disallowed, msg) return if not text.isdisjoint(disallowed): print('%s: %s: %s'", "%s' % (filename, line, msg, subset)) warned = True if", "characters. disallowed = set(chr(c) for c in range(sys.maxunicode) if \\", "if args.config: spec = importlib.util.spec_from_file_location(\"settings\", args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings)", "return False # Slower check, mime type. if not 'text/'", "* scan_exclude = [r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$',", "except Exception as e: eprint('%s: %s' % (filename, e)) return", "S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif S_ISREG(mode): analyze_file(p, disallowed, msg) else:", "import sys, os, argparse, re, unicodedata, magic import importlib from", "os, argparse, re, unicodedata, magic import importlib from stat import", "the # bidi control characters. disallowed = set(chr(c) for c", "occur.') parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude tests (basically test.* as", "msg) def analyze_paths(paths, disallowed, msg): for p in paths: analyze_any(p,", "return set(text) else: return None def analyze_text_detailed(filename, text, disallowed, msg):", "[r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode = False # Print to", "all text files. To narrow down the files, provide a", "control characters and when set to 'bidi', prints only the", "control characters. \"\"\" import sys, os, argparse, re, unicodedata, magic", "Look for disallowed characters in the text. We reduce all", "False return True # Get file text and feed into", "Exception as e: eprint('%s: %s' % (filename, e)) if text:", "t in text: line = line + 1 subset =", "# Print to stderr in verbose mode. def eprint(*args, **kwargs):", "control characters\") parser.add_argument('path', metavar='path', nargs='+', help='Sources to analyze') parser.add_argument('-p', '--nonprint',", "characters in all text files. To narrow down the files,", "line + 1 subset = [c for c in t", "characters. We omit the ascii control characters. def nonprint_unicode(c): cat", "def nonprint_unicode(c): cat = unicodedata.category(c) if cat.startswith('C') and cat !=", "line, defining a scan_exclude list, which should be a list", "set to 'all', prints all control characters and when set", "% (filename, e)) return None try: text = ''.join(infile) except", "= [r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime", "if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"Look for Unicode control", "Get file text and feed into analyze_text. def analyze_file(f, disallowed,", "'--detailed', required=False, action='store_true', help='Print line numbers where characters occur.') parser.add_argument('-t',", "return [decodeline(inf) for inf in infile] except Exception as e:", "hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude + settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'):", "list of regular expressions matching paths to exclude from the", "latin1' % filename) try: text = ''.join([decodeline(inf) for inf in", "stat import * scan_exclude = [r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$',", "return False if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"Look for", "parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true',", "if verbose_mode: print(*args, file=sys.stderr, **kwargs) # Decode a single latin1", "This includes the # bidi control characters. disallowed = set(chr(c)", "files. To narrow down the files, provide a config file", "with the -c command line, defining a scan_exclude list, which", "p) # Recursively analyze files in the directory. def analyze_dir(d,", "the moment. def getfiletext(filename): text = None with open(filename) as", "required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print line", "for c in t if c in disallowed] if subset:", "analyze_file(f, disallowed, msg): eprint('%s: Reading file' % f) if should_read(f):", "all characters into a # set to speed up analysis.", "return True return False if __name__ == '__main__': parser =", "# that have these disallowed chars. def analyze_text(filename, text, disallowed,", "with open(filename) as infile: try: if detailed_mode: return [decodeline(inf) for", "the directory. def analyze_dir(d, disallowed, msg): for f in os.listdir(d):", "in scan_exclude if re.search(e, f)]: return False # Slower check,", "the text. We reduce all characters into a # set", "'Cc': return True return False if __name__ == '__main__': parser", "+ 1 subset = [c for c in t if", "descent into directories. def analyze_any(p, disallowed, msg): mode = os.stat(p).st_mode", "control characters. disallowed = set(chr(c) for c in range(sys.maxunicode) if", "characters in the text. We reduce all characters into a", "more files or directories and looks for unicode control characters", "required=False, action='store_true', help='Print line numbers where characters occur.') parser.add_argument('-t', '--notests',", "if not text.isdisjoint(disallowed): print('%s: %s: %s' % (filename, msg, text", "True if not warned: eprint('%s: OK' % filename) # Look", "bidi control characters. disallowed = set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d),", "analyze_any(p, disallowed, msg): mode = os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed,", "c in range(sys.maxunicode) if \\ nonprint_unicode(chr(c))) msg = 'disallowed characters'", "1 subset = [c for c in t if c", "else: return None def analyze_text_detailed(filename, text, disallowed, msg): line =", "non-printable unicode characters or bidirectional control characters.') parser.add_argument('-v', '--verbose', required=False,", "== 'Cf') msg = 'unicode control characters' elif args.nonprint ==", "t if c in disallowed] if subset: print('%s:%d %s: %s'", "range(sys.maxunicode) if \\ nonprint_unicode(chr(c))) msg = 'disallowed characters' else: #", "the ascii control characters. def nonprint_unicode(c): cat = unicodedata.category(c) if", "all control characters and when set to 'bidi', prints only", "required=False, type=str, choices=['all', 'bidi'], help='Look for either all non-printable unicode", "= unicodedata.category(c) if cat.startswith('C') and cat != 'Cc': return True", "a single latin1 line. def decodeline(inf): if isinstance(inf, str): return", "#!/usr/bin/env python3 \"\"\"Find unicode control characters in source files By", "is a second mode enabled with -p which when set", "default the script takes one or more files or directories", "eprint('%s: %s' % (filename, e)) if text: return set(text) else:", "disallowed = set(chr(c) for c in range(sys.maxunicode) if \\ unicodedata.category(chr(c))", "os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg) def analyze_paths(paths, disallowed, msg): for", "= ''.join([decodeline(inf) for inf in infile]) except Exception as e:", "% filename) def should_read(f): m = magic.detect_from_filename(f) # Fast check,", "!= 'Cc': return True return False if __name__ == '__main__':", "text = ''.join([decodeline(inf) for inf in infile]) except Exception as", "\\ unicodedata.category(chr(c)) == 'Cf') msg = 'unicode control characters' elif", "args.verbose detailed_mode = args.detailed if not args.nonprint: # Formatting control", "for either all non-printable unicode characters or bidirectional control characters.')", "in the directory. def analyze_dir(d, disallowed, msg): for f in", "for unicode control characters in all text files. To narrow", "+ settings.scan_exclude_mime if args.notests: scan_exclude = scan_exclude + [r'/test[^/]+/'] analyze_paths(args.path,", "r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$',", "in verbose mode. def eprint(*args, **kwargs): if verbose_mode: print(*args, file=sys.stderr,", "verbose_mode = args.verbose detailed_mode = args.detailed if not args.nonprint: #", "from.') args = parser.parse_args() verbose_mode = args.verbose detailed_mode = args.detailed", "inf return inf.decode('latin-1') # Make a text string from a", "to stderr in verbose mode. def eprint(*args, **kwargs): if verbose_mode:", "of path).') parser.add_argument('-c', '--config', required=False, type=str, help='Configuration file to read", "== 'all': # All control characters. disallowed = set(chr(c) for", "isinstance(inf, str): return inf return inf.decode('latin-1') # Make a text", "analyze_text_detailed(filename, text, disallowed, msg): line = 0 warned = False", "control characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed',", "msg) else: eprint('%s: SKIPPED' % f) # Actual implementation of", "args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude =", "for disallowed characters in the text. We reduce all characters", "# All control characters. We omit the ascii control characters.", "verbose_mode = False # Print to stderr in verbose mode.", "if \\ nonprint_unicode(chr(c))) msg = 'disallowed characters' else: # Only", "'all': # All control characters. disallowed = set(chr(c) for c", "if subset: print('%s:%d %s: %s' % (filename, line, msg, subset))", "script takes one or more files or directories and looks", "detailed_mode: analyze_text_detailed(filename, text, disallowed, msg) return if not text.isdisjoint(disallowed): print('%s:", "control characters in all text files. To narrow down the", "infile]) except Exception as e: eprint('%s: %s' % (filename, e))", "scan_exclude + settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime +", "return None try: text = ''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying", "''.join([decodeline(inf) for inf in infile]) except Exception as e: eprint('%s:", "= 'disallowed characters' else: # Only bidi control characters. disallowed", "text.isdisjoint(disallowed): print('%s: %s: %s' % (filename, msg, text & disallowed))", "in the text. We reduce all characters into a #", "analyze_any(os.path.join(d, f), disallowed, msg) def analyze_paths(paths, disallowed, msg): for p", "the script takes one or more files or directories and", "'unicode control characters' elif args.nonprint == 'all': # All control", "scan_exclude = [r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$']", "from the scan. There is a second mode enabled with", "help='Exclude tests (basically test.* as a component of path).') parser.add_argument('-c',", "eprint('%s: SKIPPED' % f) # Actual implementation of the recursive", "# Get file text and feed into analyze_text. def analyze_file(f,", "attempting to decode from latin1 if necessary. # Other non-utf-8", "= ''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying with latin1' % filename)", "# Decode a single latin1 line. def decodeline(inf): if isinstance(inf,", "c in disallowed] if subset: print('%s:%d %s: %s' % (filename,", "SKIPPED' % f) # Actual implementation of the recursive descent", "characters or bidirectional control characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose", "[r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime =", "disallowed, msg) return if not text.isdisjoint(disallowed): print('%s: %s: %s' %", "str): return inf return inf.decode('latin-1') # Make a text string", "msg) # All control characters. We omit the ascii control", "numbers in files # that have these disallowed chars. def", "__name__ == '__main__': parser = argparse.ArgumentParser(description=\"Look for Unicode control characters\")", "p in paths: analyze_any(p, disallowed, msg) # All control characters.", "= argparse.ArgumentParser(description=\"Look for Unicode control characters\") parser.add_argument('path', metavar='path', nargs='+', help='Sources", "filename) try: text = ''.join([decodeline(inf) for inf in infile]) except", "Recursively analyze files in the directory. def analyze_dir(d, disallowed, msg):", "line numbers where characters occur.') parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude", "[e for e in scan_exclude_mime if re.search(e, m.mime_type)]: return False", "warned: eprint('%s: OK' % filename) # Look for disallowed characters", "just the file name. if [e for e in scan_exclude", "'--verbose', required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print", "text = getfiletext(f) if text: analyze_text(f, text, disallowed, msg) else:", "in all text files. To narrow down the files, provide", "importlib from stat import * scan_exclude = [r'\\.git/', r'\\.hg/', r'\\.desktop$',", "and looks for unicode control characters in all text files.", "= 'bidirectional control characters' if args.config: spec = importlib.util.spec_from_file_location(\"settings\", args.config)", "\\ or [e for e in scan_exclude_mime if re.search(e, m.mime_type)]:", "UNREADABLE' % p) # Recursively analyze files in the directory.", "spec = importlib.util.spec_from_file_location(\"settings\", args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings,", "e in scan_exclude_mime if re.search(e, m.mime_type)]: return False return True", "True # Get file text and feed into analyze_text. def", "characters\") parser.add_argument('path', metavar='path', nargs='+', help='Sources to analyze') parser.add_argument('-p', '--nonprint', required=False,", "to get line numbers in files # that have these", "getfiletext(f) if text: analyze_text(f, text, disallowed, msg) else: eprint('%s: SKIPPED'", "in disallowed] if subset: print('%s:%d %s: %s' % (filename, line,", "f) # Actual implementation of the recursive descent into directories.", "expressions matching paths to exclude from the scan. There is", "msg): for p in paths: analyze_any(p, disallowed, msg) # All", "not args.nonprint: # Formatting control characters in the unicode space.", "'scan_exclude'): scan_exclude = scan_exclude + settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime", "for p in paths: analyze_any(p, disallowed, msg) # All control", "directories and looks for unicode control characters in all text", "down the files, provide a config file with the -c", "in range(sys.maxunicode) if \\ nonprint_unicode(chr(c))) msg = 'disallowed characters' else:", "analyze_text_detailed(filename, text, disallowed, msg) return if not text.isdisjoint(disallowed): print('%s: %s:", "for e in scan_exclude if re.search(e, f)]: return False #", "S_ISREG(mode): analyze_file(p, disallowed, msg) else: eprint('%s: UNREADABLE' % p) #", "text: analyze_text(f, text, disallowed, msg) else: eprint('%s: SKIPPED' % f)", "analyze_file(p, disallowed, msg) else: eprint('%s: UNREADABLE' % p) # Recursively", "if cat.startswith('C') and cat != 'Cc': return True return False", "= False # Print to stderr in verbose mode. def", "check, just the file name. if [e for e in", "def analyze_any(p, disallowed, msg): mode = os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p,", "chars. def analyze_text(filename, text, disallowed, msg): if detailed_mode: analyze_text_detailed(filename, text,", "args.config: spec = importlib.util.spec_from_file_location(\"settings\", args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if", "only the 9 bidirectional control characters. \"\"\" import sys, os,", "of regular expressions matching paths to exclude from the scan.", "a file, attempting to decode from latin1 if necessary. #", "paths: analyze_any(p, disallowed, msg) # All control characters. We omit", "component of path).') parser.add_argument('-c', '--config', required=False, type=str, help='Configuration file to", "False for t in text: line = line + 1", "when set to 'bidi', prints only the 9 bidirectional control", "if not warned: eprint('%s: OK' % filename) # Look for", "def should_read(f): m = magic.detect_from_filename(f) # Fast check, just the", "have these disallowed chars. def analyze_text(filename, text, disallowed, msg): if", "the -c command line, defining a scan_exclude list, which should", "print(*args, file=sys.stderr, **kwargs) # Decode a single latin1 line. def", "of the recursive descent into directories. def analyze_any(p, disallowed, msg):", "text & disallowed)) else: eprint('%s: OK' % filename) def should_read(f):", "argparse, re, unicodedata, magic import importlib from stat import *", "if isinstance(inf, str): return inf return inf.decode('latin-1') # Make a", "return inf.decode('latin-1') # Make a text string from a file,", "False # Print to stderr in verbose mode. def eprint(*args,", "help='Look for either all non-printable unicode characters or bidirectional control", "All control characters. We omit the ascii control characters. def", "analysis. FIXME: Add a slow mode to get line numbers", "# Actual implementation of the recursive descent into directories. def", "OK' % filename) def should_read(f): m = magic.detect_from_filename(f) # Fast", "eprint('%s: UNREADABLE' % p) # Recursively analyze files in the", "source files By default the script takes one or more", "# bidi control characters. disallowed = set(chr(c) for c in", "nargs='+', help='Sources to analyze') parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all', 'bidi'],", "in the unicode space. This includes the # bidi control", "parser.add_argument('path', metavar='path', nargs='+', help='Sources to analyze') parser.add_argument('-p', '--nonprint', required=False, type=str,", "parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude tests (basically test.* as a", "nonprint_unicode(c): cat = unicodedata.category(c) if cat.startswith('C') and cat != 'Cc':", "0 warned = False for t in text: line =", "scan_exclude_mime + settings.scan_exclude_mime if args.notests: scan_exclude = scan_exclude + [r'/test[^/]+/']", "file=sys.stderr, **kwargs) # Decode a single latin1 line. def decodeline(inf):", "FIXME: Add a slow mode to get line numbers in", "latin1 if necessary. # Other non-utf-8 locales are not supported", "a list of regular expressions matching paths to exclude from", "to decode from latin1 if necessary. # Other non-utf-8 locales", "# Slower check, mime type. if not 'text/' in m.mime_type", "verbose_mode: print(*args, file=sys.stderr, **kwargs) # Decode a single latin1 line.", "def getfiletext(filename): text = None with open(filename) as infile: try:", "scan_exclude if re.search(e, f)]: return False # Slower check, mime", "with latin1' % filename) try: text = ''.join([decodeline(inf) for inf", "Slower check, mime type. if not 'text/' in m.mime_type \\", "second mode enabled with -p which when set to 'all',", "includes the # bidi control characters. disallowed = set(chr(c) for", "config file with the -c command line, defining a scan_exclude", "directories. def analyze_any(p, disallowed, msg): mode = os.stat(p).st_mode if S_ISDIR(mode):", "analyze_any(p, disallowed, msg) # All control characters. We omit the", "def eprint(*args, **kwargs): if verbose_mode: print(*args, file=sys.stderr, **kwargs) # Decode", "range(sys.maxunicode) if \\ unicodedata.category(chr(c)) == 'Cf') msg = 'unicode control", "elif S_ISREG(mode): analyze_file(p, disallowed, msg) else: eprint('%s: UNREADABLE' % p)", "text: return set(text) else: return None def analyze_text_detailed(filename, text, disallowed,", "type=str, help='Configuration file to read settings from.') args = parser.parse_args()", "unicode control characters in all text files. To narrow down", "\"\"\"Find unicode control characters in source files By default the", "mode to get line numbers in files # that have", "set to speed up analysis. FIXME: Add a slow mode", "text = None with open(filename) as infile: try: if detailed_mode:", "e: eprint('%s: %s' % (filename, e)) if text: return set(text)", "bidirectional control characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d',", "set(chr(c) for c in range(sys.maxunicode) if \\ nonprint_unicode(chr(c))) msg =", "directory. def analyze_dir(d, disallowed, msg): for f in os.listdir(d): analyze_any(os.path.join(d,", "% (filename, line, msg, subset)) warned = True if not", "in t if c in disallowed] if subset: print('%s:%d %s:", "\"\"\" import sys, os, argparse, re, unicodedata, magic import importlib", "'bidi'], help='Look for either all non-printable unicode characters or bidirectional", "e)) return None try: text = ''.join(infile) except UnicodeDecodeError: eprint('%s:", "= magic.detect_from_filename(f) # Fast check, just the file name. if", "if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime if args.notests:", "# All control characters. disallowed = set(chr(c) for c in", "argparse.ArgumentParser(description=\"Look for Unicode control characters\") parser.add_argument('path', metavar='path', nargs='+', help='Sources to", "eprint('%s: OK' % filename) # Look for disallowed characters in", "file' % f) if should_read(f): text = getfiletext(f) if text:", "takes one or more files or directories and looks for", "= importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude +", "Retrying with latin1' % filename) try: text = ''.join([decodeline(inf) for", "control characters. We omit the ascii control characters. def nonprint_unicode(c):", "name. if [e for e in scan_exclude if re.search(e, f)]:", "not 'text/' in m.mime_type \\ or [e for e in", "# Fast check, just the file name. if [e for", "if text: return set(text) else: return None def analyze_text_detailed(filename, text,", "which when set to 'all', prints all control characters and", "= line + 1 subset = [c for c in", "chr(0x2067), chr(0x2068), chr(0x2069)]) msg = 'bidirectional control characters' if args.config:", "parser.add_argument('-c', '--config', required=False, type=str, help='Configuration file to read settings from.')", "None with open(filename) as infile: try: if detailed_mode: return [decodeline(inf)", "command line, defining a scan_exclude list, which should be a", "or [e for e in scan_exclude_mime if re.search(e, m.mime_type)]: return", "chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg = 'bidirectional control characters' if", "c in t if c in disallowed] if subset: print('%s:%d", "return True # Get file text and feed into analyze_text.", "msg): for f in os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg) def", "disallowed, msg): for p in paths: analyze_any(p, disallowed, msg) #", "f in os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg) def analyze_paths(paths, disallowed,", "disallowed] if subset: print('%s:%d %s: %s' % (filename, line, msg,", "to analyze') parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all', 'bidi'], help='Look for", "enabled with -p which when set to 'all', prints all", "Only bidi control characters. disallowed = set([ chr(0x202a), chr(0x202b), chr(0x202c),", "if detailed_mode: return [decodeline(inf) for inf in infile] except Exception", "defining a scan_exclude list, which should be a list of", "not warned: eprint('%s: OK' % filename) # Look for disallowed", "os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif S_ISREG(mode): analyze_file(p, disallowed,", "characters' else: # Only bidi control characters. disallowed = set([", "disallowed, msg): if detailed_mode: analyze_text_detailed(filename, text, disallowed, msg) return if", "Make a text string from a file, attempting to decode", "control characters in source files By default the script takes", "the 9 bidirectional control characters. \"\"\" import sys, os, argparse,", "(filename, e)) if text: return set(text) else: return None def", "msg): line = 0 warned = False for t in", "decodeline(inf): if isinstance(inf, str): return inf return inf.decode('latin-1') # Make", "all non-printable unicode characters or bidirectional control characters.') parser.add_argument('-v', '--verbose',", "should be a list of regular expressions matching paths to", "or directories and looks for unicode control characters in all", "re.search(e, f)]: return False # Slower check, mime type. if", "# Formatting control characters in the unicode space. This includes", "f), disallowed, msg) def analyze_paths(paths, disallowed, msg): for p in", "mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print line numbers where characters", "characters' elif args.nonprint == 'all': # All control characters. disallowed", "mode. def eprint(*args, **kwargs): if verbose_mode: print(*args, file=sys.stderr, **kwargs) #", "as infile: try: if detailed_mode: return [decodeline(inf) for inf in", "'bidirectional control characters' if args.config: spec = importlib.util.spec_from_file_location(\"settings\", args.config) settings", "text files. To narrow down the files, provide a config", "text, disallowed, msg) else: eprint('%s: SKIPPED' % f) # Actual", "= scan_exclude + settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime", "into analyze_text. def analyze_file(f, disallowed, msg): eprint('%s: Reading file' %", "else: # Only bidi control characters. disallowed = set([ chr(0x202a),", "exclude from the scan. There is a second mode enabled", "required=False, type=str, help='Configuration file to read settings from.') args =", "disallowed chars. def analyze_text(filename, text, disallowed, msg): if detailed_mode: analyze_text_detailed(filename,", "file with the -c command line, defining a scan_exclude list,", "-p which when set to 'all', prints all control characters", "characters. def nonprint_unicode(c): cat = unicodedata.category(c) if cat.startswith('C') and cat", "text string from a file, attempting to decode from latin1", "to 'bidi', prints only the 9 bidirectional control characters. \"\"\"", "if re.search(e, m.mime_type)]: return False return True # Get file", "read settings from.') args = parser.parse_args() verbose_mode = args.verbose detailed_mode", "importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude + settings.scan_exclude", "disallowed, msg) # All control characters. We omit the ascii", "for c in range(sys.maxunicode) if \\ nonprint_unicode(chr(c))) msg = 'disallowed", "to read settings from.') args = parser.parse_args() verbose_mode = args.verbose", "nonprint_unicode(chr(c))) msg = 'disallowed characters' else: # Only bidi control", "inf in infile] except Exception as e: eprint('%s: %s' %", "'bidi', prints only the 9 bidirectional control characters. \"\"\" import", "# Recursively analyze files in the directory. def analyze_dir(d, disallowed,", "help='Print line numbers where characters occur.') parser.add_argument('-t', '--notests', required=False, action='store_true',", "'__main__': parser = argparse.ArgumentParser(description=\"Look for Unicode control characters\") parser.add_argument('path', metavar='path',", "characters' if args.config: spec = importlib.util.spec_from_file_location(\"settings\", args.config) settings = importlib.util.module_from_spec(spec)", "print('%s:%d %s: %s' % (filename, line, msg, subset)) warned =", "r'text/html$'] verbose_mode = False # Print to stderr in verbose", "provide a config file with the -c command line, defining", "else: eprint('%s: OK' % filename) def should_read(f): m = magic.detect_from_filename(f)", "msg): if detailed_mode: analyze_text_detailed(filename, text, disallowed, msg) return if not", "False if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"Look for Unicode", "that have these disallowed chars. def analyze_text(filename, text, disallowed, msg):", "for c in range(sys.maxunicode) if \\ unicodedata.category(chr(c)) == 'Cf') msg", "line, msg, subset)) warned = True if not warned: eprint('%s:", "from stat import * scan_exclude = [r'\\.git/', r'\\.hg/', r'\\.desktop$', r'ChangeLog$',", "for inf in infile]) except Exception as e: eprint('%s: %s'", "set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)])", "implementation of the recursive descent into directories. def analyze_any(p, disallowed,", "= [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode = False # Print", "file to read settings from.') args = parser.parse_args() verbose_mode =", "line numbers in files # that have these disallowed chars.", "files or directories and looks for unicode control characters in", "to exclude from the scan. There is a second mode", "# Other non-utf-8 locales are not supported at the moment.", "OK' % filename) # Look for disallowed characters in the", "scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime if args.notests: scan_exclude = scan_exclude", "Fast check, just the file name. if [e for e", "not text.isdisjoint(disallowed): print('%s: %s: %s' % (filename, msg, text &", "analyze_paths(paths, disallowed, msg): for p in paths: analyze_any(p, disallowed, msg)", "msg = 'unicode control characters' elif args.nonprint == 'all': #", "return if not text.isdisjoint(disallowed): print('%s: %s: %s' % (filename, msg,", "def analyze_file(f, disallowed, msg): eprint('%s: Reading file' % f) if", "unicodedata, magic import importlib from stat import * scan_exclude =", "subset)) warned = True if not warned: eprint('%s: OK' %", "**kwargs): if verbose_mode: print(*args, file=sys.stderr, **kwargs) # Decode a single", "= set(chr(c) for c in range(sys.maxunicode) if \\ nonprint_unicode(chr(c))) msg", "= True if not warned: eprint('%s: OK' % filename) #", "f) if should_read(f): text = getfiletext(f) if text: analyze_text(f, text,", "= args.detailed if not args.nonprint: # Formatting control characters in", "check, mime type. if not 'text/' in m.mime_type \\ or", "subset: print('%s:%d %s: %s' % (filename, line, msg, subset)) warned", "args.nonprint: # Formatting control characters in the unicode space. This", "None try: text = ''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying with", "try: text = ''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying with latin1'", "characters in the unicode space. This includes the # bidi", "% f) if should_read(f): text = getfiletext(f) if text: analyze_text(f,", "= os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif S_ISREG(mode): analyze_file(p,", "'text/' in m.mime_type \\ or [e for e in scan_exclude_mime", "By default the script takes one or more files or", "disallowed, msg) else: eprint('%s: SKIPPED' % f) # Actual implementation", "parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all', 'bidi'], help='Look for either all", "inf.decode('latin-1') # Make a text string from a file, attempting", "for Unicode control characters\") parser.add_argument('path', metavar='path', nargs='+', help='Sources to analyze')", "except Exception as e: eprint('%s: %s' % (filename, e)) if", "matching paths to exclude from the scan. There is a", "disallowed, msg): for f in os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg)", "def analyze_paths(paths, disallowed, msg): for p in paths: analyze_any(p, disallowed,", "importlib.util.spec_from_file_location(\"settings\", args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude", "prints only the 9 bidirectional control characters. \"\"\" import sys,", "characters into a # set to speed up analysis. FIXME:", "= 0 warned = False for t in text: line", "r'text/x-troff$', r'text/html$'] verbose_mode = False # Print to stderr in", "'disallowed characters' else: # Only bidi control characters. disallowed =", "files in the directory. def analyze_dir(d, disallowed, msg): for f", "in infile] except Exception as e: eprint('%s: %s' % (filename,", "[c for c in t if c in disallowed] if", "parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print line numbers where characters occur.')", "control characters in the unicode space. This includes the #", "for inf in infile] except Exception as e: eprint('%s: %s'", "mode enabled with -p which when set to 'all', prints", "and feed into analyze_text. def analyze_file(f, disallowed, msg): eprint('%s: Reading", "if \\ unicodedata.category(chr(c)) == 'Cf') msg = 'unicode control characters'", "required=False, action='store_true', help='Exclude tests (basically test.* as a component of", "be a list of regular expressions matching paths to exclude", "f)]: return False # Slower check, mime type. if not", "% p) # Recursively analyze files in the directory. def", "unicode control characters in source files By default the script", "characters. \"\"\" import sys, os, argparse, re, unicodedata, magic import", "filename) def should_read(f): m = magic.detect_from_filename(f) # Fast check, just", "control characters' if args.config: spec = importlib.util.spec_from_file_location(\"settings\", args.config) settings =", "unicode characters or bidirectional control characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true',", "%s' % (filename, e)) return None try: text = ''.join(infile)", "in os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg) def analyze_paths(paths, disallowed, msg):", "characters in source files By default the script takes one", "(filename, line, msg, subset)) warned = True if not warned:", "analyze_text. def analyze_file(f, disallowed, msg): eprint('%s: Reading file' % f)", "'--nonprint', required=False, type=str, choices=['all', 'bidi'], help='Look for either all non-printable", "are not supported at the moment. def getfiletext(filename): text =", "eprint('%s: Retrying with latin1' % filename) try: text = ''.join([decodeline(inf)", "help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print line numbers where", "when set to 'all', prints all control characters and when", "if not 'text/' in m.mime_type \\ or [e for e", "get line numbers in files # that have these disallowed", "either all non-printable unicode characters or bidirectional control characters.') parser.add_argument('-v',", "disallowed = set(chr(c) for c in range(sys.maxunicode) if \\ nonprint_unicode(chr(c)))", "detailed_mode: return [decodeline(inf) for inf in infile] except Exception as", "% (filename, e)) if text: return set(text) else: return None", "def analyze_dir(d, disallowed, msg): for f in os.listdir(d): analyze_any(os.path.join(d, f),", "chr(0x2068), chr(0x2069)]) msg = 'bidirectional control characters' if args.config: spec", "+ settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime", "detailed_mode = args.detailed if not args.nonprint: # Formatting control characters", "\\ nonprint_unicode(chr(c))) msg = 'disallowed characters' else: # Only bidi", "# Make a text string from a file, attempting to", "[decodeline(inf) for inf in infile] except Exception as e: eprint('%s:", "r'\\.hg/', r'\\.desktop$', r'ChangeLog$', r'NEWS$', r'\\.ppd$', r'\\.txt$', r'\\.directory$'] scan_exclude_mime = [r'text/x-po$',", "msg, text & disallowed)) else: eprint('%s: OK' % filename) def", "False # Slower check, mime type. if not 'text/' in", "looks for unicode control characters in all text files. To", "Unicode control characters\") parser.add_argument('path', metavar='path', nargs='+', help='Sources to analyze') parser.add_argument('-p',", "in range(sys.maxunicode) if \\ unicodedata.category(chr(c)) == 'Cf') msg = 'unicode", "should_read(f): text = getfiletext(f) if text: analyze_text(f, text, disallowed, msg)", "magic import importlib from stat import * scan_exclude = [r'\\.git/',", "We omit the ascii control characters. def nonprint_unicode(c): cat =", "the recursive descent into directories. def analyze_any(p, disallowed, msg): mode", "reduce all characters into a # set to speed up", "as a component of path).') parser.add_argument('-c', '--config', required=False, type=str, help='Configuration", "cat = unicodedata.category(c) if cat.startswith('C') and cat != 'Cc': return", "infile: try: if detailed_mode: return [decodeline(inf) for inf in infile]", "c in range(sys.maxunicode) if \\ unicodedata.category(chr(c)) == 'Cf') msg =", "text, disallowed, msg): line = 0 warned = False for", "msg) return if not text.isdisjoint(disallowed): print('%s: %s: %s' % (filename,", "if text: analyze_text(f, text, disallowed, msg) else: eprint('%s: SKIPPED' %", "re, unicodedata, magic import importlib from stat import * scan_exclude", "Actual implementation of the recursive descent into directories. def analyze_any(p,", "= parser.parse_args() verbose_mode = args.verbose detailed_mode = args.detailed if not", "unicodedata.category(c) if cat.startswith('C') and cat != 'Cc': return True return", "the files, provide a config file with the -c command", "action='store_true', help='Print line numbers where characters occur.') parser.add_argument('-t', '--notests', required=False,", "from latin1 if necessary. # Other non-utf-8 locales are not", "chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg =", "sys, os, argparse, re, unicodedata, magic import importlib from stat", "= args.verbose detailed_mode = args.detailed if not args.nonprint: # Formatting", "args.detailed if not args.nonprint: # Formatting control characters in the", "bidirectional control characters. \"\"\" import sys, os, argparse, re, unicodedata,", "up analysis. FIXME: Add a slow mode to get line", "in infile]) except Exception as e: eprint('%s: %s' % (filename,", "m.mime_type \\ or [e for e in scan_exclude_mime if re.search(e,", "python3 \"\"\"Find unicode control characters in source files By default", "-c command line, defining a scan_exclude list, which should be", "slow mode to get line numbers in files # that", "in text: line = line + 1 subset = [c", "in paths: analyze_any(p, disallowed, msg) # All control characters. We", "settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude", "getfiletext(filename): text = None with open(filename) as infile: try: if", "True return False if __name__ == '__main__': parser = argparse.ArgumentParser(description=\"Look", "which should be a list of regular expressions matching paths", "= scan_exclude_mime + settings.scan_exclude_mime if args.notests: scan_exclude = scan_exclude +", "%s: %s' % (filename, line, msg, subset)) warned = True" ]
[ "= self.obj.get_body_id() all_links = get_all_links(body_id) aabbs = [get_aabb(body_id, link=link) for", "if room_aabb_low is None: return np.array(aabb_low), np.array(aabb_hi) # Use the", "Use the z values from pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2]", "or self.obj.room_floor is None: return np.array(aabb_low), np.array(aabb_hi) # TODO: remove", "the correct RoomFloor beforehand room_instance = self.obj.room_floor.room_instance # Get the", "values from the room segmentation map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance)", "link in all_links] aabb_low, aabb_hi = aabb_union(aabbs) if not hasattr(self.obj,", "from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState", "to be done to save/load AABB since it will happen", "room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def", "import aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState):", "setting.\") # Nothing needs to be done to save/load AABB", "room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None: return np.array(aabb_low),", "correct RoomFloor beforehand room_instance = self.obj.room_floor.room_instance # Get the x-y", "happen due to pose caching. def _dump(self): return None def", "= [get_aabb(body_id, link=link) for link in all_links] aabb_low, aabb_hi =", "to save/load AABB since it will happen due to pose", "is None: return np.array(aabb_low), np.array(aabb_hi) # Use the z values", "self.obj.room_floor.room_instance # Get the x-y values from the room segmentation", "# TODO: remove after split floors # room_floor will be", "aabb_low, aabb_hi = aabb_union(aabbs) if not hasattr(self.obj, \"category\") or self.obj.category", "<reponame>mamadbiabon/iGibson import numpy as np from igibson.external.pybullet_tools.utils import aabb_union, get_aabb,", "beforehand room_instance = self.obj.room_floor.room_instance # Get the x-y values from", "room_instance = self.obj.room_floor.room_instance # Get the x-y values from the", "CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self): body_id = self.obj.get_body_id() all_links =", "None: return np.array(aabb_low), np.array(aabb_hi) # Use the z values from", "floors # room_floor will be set to the correct RoomFloor", "self.obj.get_body_id() all_links = get_all_links(body_id) aabbs = [get_aabb(body_id, link=link) for link", "_set_value(self, new_value): raise NotImplementedError(\"AABB state currently does not support setting.\")", "self.obj.room_floor is None: return np.array(aabb_low), np.array(aabb_hi) # TODO: remove after", "None: return np.array(aabb_low), np.array(aabb_hi) # TODO: remove after split floors", "def _set_value(self, new_value): raise NotImplementedError(\"AABB state currently does not support", "AABB since it will happen due to pose caching. def", "after split floors # room_floor will be set to the", "remove after split floors # room_floor will be set to", "done to save/load AABB since it will happen due to", "self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None: return np.array(aabb_low), np.array(aabb_hi) # Use", "igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState class", "all_links] aabb_low, aabb_hi = aabb_union(aabbs) if not hasattr(self.obj, \"category\") or", "split floors # room_floor will be set to the correct", "aabb_union(aabbs) if not hasattr(self.obj, \"category\") or self.obj.category != \"floors\" or", "return np.array(aabb_low), np.array(aabb_hi) # TODO: remove after split floors #", "support setting.\") # Nothing needs to be done to save/load", "NotImplementedError(\"AABB state currently does not support setting.\") # Nothing needs", "# Get the x-y values from the room segmentation map", "state currently does not support setting.\") # Nothing needs to", "as np from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base", "get_aabb, get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self):", "or self.obj.category != \"floors\" or self.obj.room_floor is None: return np.array(aabb_low),", "def _compute_value(self): body_id = self.obj.get_body_id() all_links = get_all_links(body_id) aabbs =", "Nothing needs to be done to save/load AABB since it", "\"category\") or self.obj.category != \"floors\" or self.obj.room_floor is None: return", "igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self): body_id = self.obj.get_body_id()", "_compute_value(self): body_id = self.obj.get_body_id() all_links = get_all_links(body_id) aabbs = [get_aabb(body_id,", "= aabb_union(aabbs) if not hasattr(self.obj, \"category\") or self.obj.category != \"floors\"", "since it will happen due to pose caching. def _dump(self):", "np.array(aabb_hi) # TODO: remove after split floors # room_floor will", "np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value): raise NotImplementedError(\"AABB state currently does", "needs to be done to save/load AABB since it will", "= aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value): raise NotImplementedError(\"AABB", "np.array(aabb_low), np.array(aabb_hi) # TODO: remove after split floors # room_floor", "all_links = get_all_links(body_id) aabbs = [get_aabb(body_id, link=link) for link in", "= get_all_links(body_id) aabbs = [get_aabb(body_id, link=link) for link in all_links]", "aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value): raise NotImplementedError(\"AABB state", "class AABB(CachingEnabledObjectState): def _compute_value(self): body_id = self.obj.get_body_id() all_links = get_all_links(body_id)", "from igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self): body_id =", "# Nothing needs to be done to save/load AABB since", "does not support setting.\") # Nothing needs to be done", "np.array(aabb_hi) # Use the z values from pybullet room_aabb_low[2] =", "return np.array(aabb_low), np.array(aabb_hi) # Use the z values from pybullet", "body_id = self.obj.get_body_id() all_links = get_all_links(body_id) aabbs = [get_aabb(body_id, link=link)", "get_all_links(body_id) aabbs = [get_aabb(body_id, link=link) for link in all_links] aabb_low,", "room_aabb_low is None: return np.array(aabb_low), np.array(aabb_hi) # Use the z", "is None: return np.array(aabb_low), np.array(aabb_hi) # TODO: remove after split", "aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value):", "be done to save/load AABB since it will happen due", "will happen due to pose caching. def _dump(self): return None", "pose caching. def _dump(self): return None def load(self, data): return", "= aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self,", "room_floor will be set to the correct RoomFloor beforehand room_instance", "pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi)", "return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value): raise NotImplementedError(\"AABB state currently", "x-y values from the room segmentation map room_aabb_low, room_aabb_hi =", "save/load AABB since it will happen due to pose caching.", "z values from pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] = aabb_hi[2]", "get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self): body_id", "raise NotImplementedError(\"AABB state currently does not support setting.\") # Nothing", "room segmentation map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is", "room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None: return np.array(aabb_low), np.array(aabb_hi)", "from pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low),", "will be set to the correct RoomFloor beforehand room_instance =", "!= \"floors\" or self.obj.room_floor is None: return np.array(aabb_low), np.array(aabb_hi) #", "TODO: remove after split floors # room_floor will be set", "= self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None: return np.array(aabb_low), np.array(aabb_hi) #", "np.array(room_aabb_hi) def _set_value(self, new_value): raise NotImplementedError(\"AABB state currently does not", "import numpy as np from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links", "Get the x-y values from the room segmentation map room_aabb_low,", "the z values from pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] =", "due to pose caching. def _dump(self): return None def load(self,", "not hasattr(self.obj, \"category\") or self.obj.category != \"floors\" or self.obj.room_floor is", "import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self): body_id = self.obj.get_body_id() all_links", "aabbs = [get_aabb(body_id, link=link) for link in all_links] aabb_low, aabb_hi", "the x-y values from the room segmentation map room_aabb_low, room_aabb_hi", "new_value): raise NotImplementedError(\"AABB state currently does not support setting.\") #", "currently does not support setting.\") # Nothing needs to be", "link=link) for link in all_links] aabb_low, aabb_hi = aabb_union(aabbs) if", "set to the correct RoomFloor beforehand room_instance = self.obj.room_floor.room_instance #", "the room segmentation map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low", "[get_aabb(body_id, link=link) for link in all_links] aabb_low, aabb_hi = aabb_union(aabbs)", "np.array(aabb_low), np.array(aabb_hi) # Use the z values from pybullet room_aabb_low[2]", "# Use the z values from pybullet room_aabb_low[2] = aabb_low[2]", "map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None: return", "if not hasattr(self.obj, \"category\") or self.obj.category != \"floors\" or self.obj.room_floor", "for link in all_links] aabb_low, aabb_hi = aabb_union(aabbs) if not", "not support setting.\") # Nothing needs to be done to", "self.obj.category != \"floors\" or self.obj.room_floor is None: return np.array(aabb_low), np.array(aabb_hi)", "= self.obj.room_floor.room_instance # Get the x-y values from the room", "# room_floor will be set to the correct RoomFloor beforehand", "to the correct RoomFloor beforehand room_instance = self.obj.room_floor.room_instance # Get", "np from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base import", "room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value): raise", "segmentation map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None:", "aabb_hi = aabb_union(aabbs) if not hasattr(self.obj, \"category\") or self.obj.category !=", "values from pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return", "in all_links] aabb_low, aabb_hi = aabb_union(aabbs) if not hasattr(self.obj, \"category\")", "AABB(CachingEnabledObjectState): def _compute_value(self): body_id = self.obj.get_body_id() all_links = get_all_links(body_id) aabbs", "\"floors\" or self.obj.room_floor is None: return np.array(aabb_low), np.array(aabb_hi) # TODO:", "RoomFloor beforehand room_instance = self.obj.room_floor.room_instance # Get the x-y values", "to pose caching. def _dump(self): return None def load(self, data):", "it will happen due to pose caching. def _dump(self): return", "be set to the correct RoomFloor beforehand room_instance = self.obj.room_floor.room_instance", "aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def", "hasattr(self.obj, \"category\") or self.obj.category != \"floors\" or self.obj.room_floor is None:", "numpy as np from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links from", "from the room segmentation map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if" ]
[ "dc = MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter')", "check instance, some params are set at # __init__ time", "count=8) # ...on hosts assertMOR(vsphere, instance, spec=\"host\", count=2) tags =", "external metadata for host, source_tags in ext_host_tags: if host ==", "= MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs =", "vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance = vsphere._get_server_instance(instance) result =", "rights reserved # Licensed under Simplified BSD License (see LICENSE)", "the collect_realtime_only parameter acts as expected \"\"\" vsphere._process_mor_objects_queue_async = MagicMock()", "is 1 assert len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance", "vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\")", "value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) #", "test # configuration is properly propagated init_config = { 'refresh_morlist_interval':", "\"vm[^2]\" instance[\"include_only_marked\"] = True # Discover hosts and virtual machines", "should be a list of size 1 since the batch", "gray.\", tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "for metrics vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host =", "# explicitly set cache expiration times, don't use defaults so", "{}, [instance]) # simulate previous runs, set the last execution", "\"foo\"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache = MagicMock()", "assert vsphere.format_metric_name(counter, compatibility=True) == \"group.name\" for rollup, short_rollup in SHORT_ROLLUP.items():", "server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter)", "# simulate previous runs, set the last execution time in", "createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from', from_status) # noqa:", "setattr(event, 'from', from_status) # noqa: B009 return event def migrated_event():", "was yellow and it's now gray.\", tags=['foo:bar'], alert_type='info', ) def", "`%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called()", "= [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\",", "MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg =", "spec=\"host\", tags=tags) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\",", "All rights reserved # Licensed under Simplified BSD License (see", "'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1',", "name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere,", "= MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache", "\"\"\" vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance)", "VM/host, datacenters are not collected for call_args in vsphere._collect_metrics_async.call_args_list: #", "'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock',", "\"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem])", "call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should be a list", "check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert", "VSphereCheck('vsphere', init_config, {}, [instance]) i_key = check._instance_key(instance) assert check.time_started >", "MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = { vm_no_parent: {\"name\": \"vm_no_parent\",", "aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance): check", "True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value", "not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {\"name\":", "in tag break # vsphere_host tag still in cache for", "50 assert len(check.event_config) == 1 assert 'vsphere_mock' in check.event_config assert", "name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance):", "count=1) def test__is_excluded(): \"\"\" * Exclude hosts/vms not compliant with", "= [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor", "# Queue hasn't been initialized vsphere.log.debug.assert_called_once_with( \"Objects queue is not", "== 50 assert len(check.event_config) == 1 assert 'vsphere_mock' in check.event_config", "= False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS)", "fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from', from_status) #", "'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance = vsphere._get_server_instance(instance)", "[counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter,", "'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] ==", "} regex = {'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list =", "and precedence of instance config over init config check =", "hosts + 2 datacenters + 2 clusters + 1 datastore.", "from __future__ import unicode_literals import time from datetime import datetime", "= MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter =", "len(check.event_config) == 1 assert 'vsphere_mock' in check.event_config assert not check.registry", "\"host\", \"mor\": mocked_host, \"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"], } in obj_list[vim.HostSystem]", "SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME,", "\"vsphere_type:vm\", ] assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\", subset=True, tags=tags) def test_use_guest_hostname(vsphere,", "instance): \"\"\" Explore the vCenter infrastructure to discover hosts, virtual", "not aggregator.events event = alarm_event(from_status='green', to_status='gray', message='Went from Green to", "tags=tags) # ...on VMs assertMOR(vsphere, instance, spec=\"vm\", count=1) tags =", "CurrentTime fails server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect =", "MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere,", "\"mor_type\": \"datacenter\", \"mor\": mocked_datacenter, \"hostname\": None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"],", "Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) assert", "MagicMock() counter.rollupType = \"average\" counter.key = 1 vsphere.format_metric_name = MagicMock()", "MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg =", "properties with property collector \"\"\" server_instance = vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"):", "\"mor_type\": \"vm\", \"mor\": vm_host_parent, \"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"],", "ds=ds_arg, destDatastore=ds_dest_arg, ) return event def test_events(aggregator, vsphere, instance): with", "= vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance)", "for the check instance, some params are set at #", "datetime import datetime import mock import pytest from mock import", "* Exclude \"non-labeled\" virtual machines when the user configuration instructs", "{\"name\": \"mocked_host\", \"parent\": None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance =", "= MagicMock() counter.groupInfo.key = \"group\" counter.nameInfo.key = \"name\" counter.rollupType =", "Test host tags are excluded from external host metadata, but", "# Sample(s) include_regexes = {'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"} # OK", "count=2) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\",", "server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor = {\"hostname\": \"foo\"} vsphere.mor_cache = MagicMock()", "VSphereCheck('vsphere', {}, {}, [instance]) i_key = check._instance_key(instance) # first run", "server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log = MagicMock() vsphere.log = log mor_attrs", "vsphere.format_metric_name = MagicMock() # New way instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value", "MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't been initialized vsphere.log.debug.assert_called_once_with( \"Objects queue", "vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() # vsphere_host tag not in external", "- host2 - folder1 - datacenter2 - compute_resource2 - host3", "first run should always cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance,", "\"vCenter monitor status changed on this alarm, it was gray", "= MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec=\"Alarm\") alarm_arg", "dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm,", "Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS )", "= MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = { mocked_vm: { \"name\": \"mocked_vm\", \"parent\":", "instance config over init config check = VSphereCheck('vsphere', {}, {},", "ext_host_tags: if host == u\"mocked_vm\": tags = source_tags[\"vsphere\"] for tag", "# ...on hosts assertMOR(vsphere, instance, spec=\"host\", count=2) tags = [", "== [\"vsphere_host\"] instance[\"excluded_host_tags\"] = [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]},", "name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg,", "[MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property %s", "\"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere, instance, name=\"vm4\",", "Object queue should be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type)", "vm1 # Not labeled - vm2 # Filtered out -", "status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk',", "2010-2017 # All rights reserved # Licensed under Simplified BSD", "= vsphere._get_all_objs(server_instance, None, False, []) assert len(obj_list[vim.VirtualMachine]) == 2 assert", "name=\"host2\", spec=\"host\", tags=tags) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\",", "for rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType = rollup assert vsphere.format_metric_name(counter)", "result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor", "alarm_event(from_status='green', to_status='red', message='Some error'): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine')", "import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils import", "migration of this virtual machine\", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2',", "we don't raise KeyError if the property collector failed to", "{}, {}, [instance]) i_key = check._instance_key(instance) # first run should", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green', message='Went from", "define a unique 'name' per vCenter instance VSphereCheck('vsphere', {}, {},", "= vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host =", "11 vsphere._process_mor_objects_queue(instance) # Object queue should be empty after processing", "RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only", "== 0 assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only for", "{'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this", "as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) # Default value assert", "= \"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"] = True # Discover", "MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"} vsphere.in_compatibility_mode = MagicMock() vsphere.log =", "\"vCenter monitor status changed on this alarm, it was green", "filtered_vm: { \"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, }, non_filtered_host:", "= MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s`", "'^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex, False, [])", "dc = MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec=\"Alarm\")", "aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def test_check(vsphere, instance): \"\"\" Test the check()", "'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder',", "[] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags", "mocked_datacenter, \"hostname\": None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter]", "MagicMock() counter.groupInfo.key = \"group\" counter.nameInfo.key = \"name\" counter.rollupType = \"rollup\"", "time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 *", "VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) ==", "don't use defaults so we also test # configuration is", "check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some", "= [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor", "1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])]", "SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1,", "assert len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere, instance): \"\"\" Explore the", "we check for errors when collecting properties with property collector", "i_key = vsphere._instance_key(instance) counter = MagicMock() counter.rollupType = \"average\" counter.key", "= [event] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] =", "\"filtered_host_number_1\", \"parent\": None}, filtered_vm: { \"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\":", "1 obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to", "len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value", "\"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric',", "vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event(", "\"host\", \"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0]", "but still stored in the cache for metrics vsphere.excluded_host_tags =", "vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"} vsphere.in_compatibility_mode = MagicMock()", "# OK included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\")", "= 1 vsphere.format_metric_name = MagicMock() # New way instance[\"collection_level\"] =", "all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1',", "def test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance)", "labeled+monitored VM + 2 hosts + 2 datacenters + 2", "u\"mocked_vm\": tags = source_tags[\"vsphere\"] for tag in tags: assert \"vsphere_host:\"", "{ \"mor_type\": \"vm\", \"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"],", "spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\",", "vsphere._collect_metrics_async.call_count == 6 # One for each VM/host, datacenters are", "= [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) assert not aggregator.events", "\"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) ==", "Sample(s) include_regexes = {'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"} # OK included_host", "- host1 # Filtered out - host2 - folder1 -", "vm3 # Powered off - vm4 ``` \"\"\" # Samples", "vsphere.log.debug.assert_called_once_with( \"Objects queue is not initialized yet for instance %s,", "counter.groupInfo.key = \"group\" counter.nameInfo.key = \"name\" counter.rollupType = \"rollup\" assert", "= MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm", "should always cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) #", "disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:", "obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1 assert { \"mor_type\": \"cluster\", \"mor\":", "{'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex, False,", "True # Discover hosts and virtual machines vsphere._cache_morlist_raw(instance) # Assertions:", "vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not", "# SmartConnect succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect =", "non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0] assert", "check.excluded_host_tags == [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance])", "result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor = {\"hostname\": \"foo\"}", "= vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent(", "== 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance[\"all_metrics\"] = False del", "instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\", count=3) # Fallback", "= MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs =", "'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder',", "the 2 datacenters, then 2 clusters, then the datastore assert", "[ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert", "'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config, {},", "vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\",", "userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg,", "OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert not VSphereCheck._is_excluded( included_vm,", "} in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1 assert { \"mor_type\":", "to Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance)", "entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\": \"kb\"}", "{\"name\": \"filtered_host_number_1\", \"parent\": None}, filtered_vm: { \"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn,", "counter.rollupType = \"average\" counter.key = 1 vsphere.format_metric_name = MagicMock() #", "assert 'vsphere_mock' in check.event_config assert not check.registry assert not check.latest_event_query", "mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = { mocked_vm:", "mock import pytest from mock import MagicMock from pyVmomi import", "when guest hostname not available assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\", subset=True)", "datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors import", "\"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\", subset=True,", "tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ]", "not available assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\",", "\"mocked_host\", \"tags\": [\"vsphere_type:host\"], } in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1", "vsphere._instance_key(instance) counter = MagicMock() counter.rollupType = \"average\" counter.key = 1", "assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del", "] assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\", subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance):", "vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter = MagicMock() counter.rollupType = \"average\"", "= vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to", ".utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\",", "datadog_checks.vsphere.errors import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL,", "- datacenter2 - compute_resource2 - host3 - vm1 # Not", "vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) i_key =", "assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter", "= {\"hostname\": \"foo\"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache", "assert check.excluded_host_tags == [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {},", "Green to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) assert not aggregator.events", "= MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\":", "instance, spec=\"vm\", count=3) # Fallback on VM name when guest", "}, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs):", "with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex, False, []) assert", "server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value", "{ \"mor_type\": \"datacenter\", \"mor\": mocked_datacenter, \"hostname\": None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\",", "= vsphere.get_external_host_tags() # vsphere_host tag not in external metadata for", "BSD License (see LICENSE) from __future__ import unicode_literals import time", "event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value", "check.event_config assert not check.registry assert not check.latest_event_query assert check.batch_collector_size ==", "'name' per vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config", "REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils import MockedMOR, assertMOR,", "): vsphere._cache_morlist_raw(instance) # Default value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use", "vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't been initialized vsphere.log.debug.assert_called_once_with(", "'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock',", "missing attributes \"\"\" server_instance = vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate", "list of size 1 since the batch size is 1", "non_filtered_host, }, } regex = {'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs):", "- vm1 # Not labeled - vm2 # Filtered out", "instance %s, skipping processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1 i_key", "= vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1')", "{\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host,", "\"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host2\", spec=\"host\", tags=tags) tags", "host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest,", "Test that we check for errors when collecting properties with", "True} vsphere.check(instance) aggregator.assert_event( \"John has launched a hot migration of", "del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key]", "now red.\", tags=['foo:bar'] ) event = alarm_event(from_status='yellow', to_status='gray', message='Went from", "some params are set at # __init__ time and we", "hosts assertMOR(vsphere, instance, spec=\"host\", count=2) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\",", "{\"hostname\": \"foo\"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache =", "process the 2 datacenters, then 2 clusters, then the datastore", "to_status='green', message='Went from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock']", "spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere, instance):", "event def migrated_event(): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1')", "with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts and virtual machines instance[\"use_guest_hostname\"] =", "now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2", "instance, name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere,", "in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1 assert { \"mor_type\": \"datastore\",", "log.error.assert_called_once_with('Unable to retrieve property %s for object %s: %s', 'prop',", "tag still in cache for sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4,", "after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) ==", "\"parent\": None}, filtered_vm: { \"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host,", "mor = {\"hostname\": \"foo\"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value = mor", "in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1 assert { \"mor_type\": \"datacenter\",", "status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) == \"vsphere_mock\"", "== [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def", "= True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count ==", "vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert", "hostname instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): #", "{}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value", "changed on this alarm, it was gray and it's now", "host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return event def", "changed on this alarm, it was yellow and it's now", "def test_excluded_host_tags(vsphere, instance, aggregator): # Check default value and precedence", "not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere,", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance)", "= [alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this", "2 datacenters, then 2 clusters, then the datastore assert vsphere._process_mor_objects_queue_async.call_count", "check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect", "test_collect_realtime_only(vsphere, instance): \"\"\" Test the collect_realtime_only parameter acts as expected", "vsphere.log = log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) ==", "\"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"] = True # Discover hosts", "False, []) assert len(obj_list[vim.VirtualMachine]) == 2 assert { \"mor_type\": \"vm\",", "server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on", "alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value = [event]", "not in tag break # vsphere_host tag still in cache", "failed to collect some attributes and that we handle the", "vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4,", "event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg )", "vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from',", "'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] ==", "'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check = VSphereCheck('vsphere', init_config, {},", "= True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\", count=3) # Fallback on", "2 datacenters + 2 clusters + 1 datastore. assertMOR(vsphere, instance,", "i_key) == -42 assert check.clean_morlist_interval == 50 assert len(check.event_config) ==", "...on hosts assertMOR(vsphere, instance, spec=\"host\", count=2) tags = [ \"vcenter_server:vsphere_mock\",", "{'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"John has launched a hot migration", "fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property %s for", "# Called once to process the 2 datacenters, then 2", "{}, [instance]) assert check.excluded_host_tags == [] check = VSphereCheck('vsphere', {\"excluded_host_tags\":", "if the property collector failed to collect some attributes and", "for sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\",", "\"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, }, } regex = {'host_include':", "\"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) ==", "import vim from datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig", "\"datacenter\", \"mor\": mocked_datacenter, \"hostname\": None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], }", "under Simplified BSD License (see LICENSE) from __future__ import unicode_literals", "name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes,", "= source_tags[\"vsphere\"] for tag in tags: assert \"vsphere_host:\" not in", "the cache for metrics vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\")", "== 0 # realtime only for call_args in vsphere._process_mor_objects_queue_async.call_args_list: #", "\"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, }, } regex =", "Filtered out - host2 - folder1 - datacenter2 - compute_resource2", "= alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value =", "'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator, instance):", "50 assert check.excluded_host_tags == [] def test_excluded_host_tags(vsphere, instance, aggregator): #", "- compute_resource1 - host1 # Filtered out - host2 -", "def test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test that we check for errors", "in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL))", "2 clusters, then the datastore assert vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"]", "== \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1", "assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def", "cache for sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric',", "...on VMs assertMOR(vsphere, instance, spec=\"vm\", count=1) tags = [ \"vcenter_server:vsphere_mock\",", "still stored in the cache for metrics vsphere.excluded_host_tags = [\"vsphere_host\"]", "with the user's `*_include` configuration. * Exclude \"non-labeled\" virtual machines", "test_excluded_host_tags(vsphere, instance, aggregator): # Check default value and precedence of", "def test_collect_realtime_only(vsphere, instance): \"\"\" Test the collect_realtime_only parameter acts as", "name=\"host3\", spec=\"host\", tags=tags) # ...on VMs assertMOR(vsphere, instance, spec=\"vm\", count=1)", "server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS )", "vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem')", "[ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere,", "cache for metrics vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host", "[\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1 assert", "assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters, 1", "server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert", "MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore') ds_arg =", "vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) i_key", "pytest.raises(BadConfigError): # Must define a unique 'name' per vCenter instance", "= MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg", "= MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore') ds_arg", "vms belonging to a filtered host are also filtered\"\"\" server_instance", "configuration. * Exclude \"non-labeled\" virtual machines when the user configuration", "] assertMOR(vsphere, instance, name=\"host2\", spec=\"host\", tags=tags) tags = [ \"vcenter_server:vsphere_mock\",", "\"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert { \"mor_type\":", "- (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 *", "are also filtered\"\"\" server_instance = vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm", "Gray to Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}", "i_key, now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now -", "\"vsphere_type:datastore\"], } in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1 assert {", "assert check.clean_morlist_interval == 50 assert len(check.event_config) == 1 assert 'vsphere_mock'", "= MagicMock() instance[\"collect_realtime_only\"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) #", "unknown `%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, [])", "tag not in external metadata for host, source_tags in ext_host_tags:", "test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value =", ") def test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance)", "None}, mocked_datastore: {}, mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown", "\"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host3\", spec=\"host\",", "{\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"] =", "Fallback on VM name when guest hostname not available assertMOR(vsphere,", "status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect succeeds, CurrentTime fails", "\"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test that", "] assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2',", "# Not OK! excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\",", "host2 - folder1 - datacenter2 - compute_resource2 - host3 -", "\"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host3\", spec=\"host\", tags=tags)", "= False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert", "as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError):", "dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest,", "vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def test_check(vsphere, instance): \"\"\" Test", "with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now = time.time() # do", "[migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"John has launched", "with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value", "__future__ import unicode_literals import time from datetime import datetime import", "# ...on VMs assertMOR(vsphere, instance, spec=\"vm\", count=1) tags = [", "assert len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem]) == 1 assert {", "subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log", "MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = { filtered_host: {\"name\": \"filtered_host_number_1\",", "= vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent =", "== 1 assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"vm\",", "'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] ==", "been initialized vsphere.log.debug.assert_called_once_with( \"Objects queue is not initialized yet for", "in obj_list[vim.VirtualMachine] assert { \"mor_type\": \"vm\", \"mor\": vm_host_parent, \"hostname\": \"unknown\",", "get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance):", "def test__cache_morlist_raw(vsphere, instance): \"\"\" Explore the vCenter infrastructure to discover", "\"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert { \"mor_type\": \"vm\", \"mor\": vm_host_parent,", "'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check", "mocked_mors_attrs = { filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\": None}, filtered_vm: {", "1 assert { \"mor_type\": \"datacenter\", \"mor\": mocked_datacenter, \"hostname\": None, \"tags\":", "clusters, then the datastore assert vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"] =", "green and it's now red.\", tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere,", "instance[\"collection_level\"] = 2 assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True assert", "= { filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\": None}, filtered_vm: { \"name\":", "== 0 assert check.batch_morlist_size == 50 assert check.excluded_host_tags == []", "filtered_host, }, non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm: { \"name\":", "} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None, False, [])", "it's now red.\", tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere, instance): with", "(2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL))", "vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, }, non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm:", "\"vsphere_mock\" del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now =", "[MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\":", "SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check(", "alarm=alarm_arg ) setattr(event, 'from', from_status) # noqa: B009 return event", "vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg,", "also test # configuration is properly propagated init_config = {", "tag in tags: assert \"vsphere_host:\" not in tag break #", "= datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host", "'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator, instance): check =", "SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1,", "check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) ==", "check.time_started > 0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) ==", "excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes, None)", "# Fallback on VM name when guest hostname not available", "in SHORT_ROLLUP.items(): counter.rollupType = rollup assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def", "\"group\" counter.nameInfo.key = \"name\" counter.rollupType = \"rollup\" assert vsphere.format_metric_name(counter, compatibility=True)", "* REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert", "monitor status changed on this alarm, it was yellow and", "filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs", "{}, [instance]) assert check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"] = [] check", "\"vm\", \"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } ==", "def test_use_guest_hostname(vsphere, instance): # Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs,", "= False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() # vsphere_host tag not", "init_config = { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 *", "\"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None, False,", "'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2',", "1 assert { \"mor_type\": \"datastore\", \"mor\": mocked_datastore, \"hostname\": None, \"tags\":", "+ 2 clusters + 1 datastore. assertMOR(vsphere, instance, count=8) #", "MagicMock() # New way instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter]", "\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\": \"mocked_host\", \"parent\": None},", "the vCenter infrastructure to discover hosts, virtual machines. Input topology:", "= vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went from Gray to Red')", "from datetime import datetime import mock import pytest from mock", "= {\"name\": \"unknown\"} vsphere.in_compatibility_mode = MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value", "0 assert check.batch_morlist_size == 50 assert check.excluded_host_tags == [] def", "tags=tags) def test_use_guest_hostname(vsphere, instance): # Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as", "\"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter])", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere, instance):", "instance): \"\"\" Test the collect_realtime_only parameter acts as expected \"\"\"", "mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with", "vsphere._collect_metrics_async.call_args_list: # query_specs parameter should be a list of size", "== 2 assert { \"mor_type\": \"vm\", \"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\",", "{\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [] # Test", "import CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError,", "monitor status changed on this alarm, it was gray and", "some attributes and that we handle the case were there", "vsphere.batch_morlist_size = 1 i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert", "instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went", "vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host:", "2 clusters, 1 datastore def test_collect_realtime_only(vsphere, instance): \"\"\" Test the", "check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache expiration", "# All rights reserved # Licensed under Simplified BSD License", "'vm_include': \"f[o]+\"} # OK included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm =", "True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\", count=3) # Fallback on VM", "cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set", "regex = {'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance,", "def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once()", "[instance]) assert check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"] = [] check =", "vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11", "{ \"mor_type\": \"datastore\", \"mor\": mocked_datastore, \"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"],", "def test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value", "noqa: B009 return event def migrated_event(): now = datetime.utcnow() vm", "[ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere,", "event = alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value", "= MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg", "include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes, None) #", "need to instantiate the check multiple times check = VSphereCheck('vsphere',", "message='Some error'): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc =", "# Check default value and precedence of instance config over", "\"mor\": vm_host_parent, \"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], } in", "assert { \"mor_type\": \"host\", \"mor\": mocked_host, \"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"],", "check._instance_key(instance) assert check.time_started > 0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist,", "obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1 assert { \"mor_type\": \"datastore\", \"mor\":", "} in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1 assert { \"mor_type\":", "for res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object queue", "include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes, None) # Sample(s)", "query_specs parameter should be a list of size 1 since", "metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded():", "monitor status changed on this alarm, it was green and", "test__init__(instance): with pytest.raises(BadConfigError): # Must define a unique 'name' per", "instance, name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\", subset=True) def", "then the datastore assert vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"] = True", "\"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0] assert { \"mor_type\": \"host\",", "aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance)", "= VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [\"vsphere_host\"]", "to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status", "name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere, instance,", "count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded(): \"\"\" * Exclude hosts/vms", "obj_list = vsphere._get_all_objs(server_instance, regex, False, []) assert len(obj_list[vim.VirtualMachine]) == 1", "\"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1 assert {", "3 instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert", "vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock() instance[\"collection_level\"] =", "since the batch size is 1 assert len(call_args[0][1]) == 1", "assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called()", "the batch size is 1 assert len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"]", "instance): \"\"\"Test that all vms belonging to a filtered host", "\"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags", "that we check for errors when collecting properties with property", "== 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock()", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags", "1 datastore. assertMOR(vsphere, instance, count=8) # ...on hosts assertMOR(vsphere, instance,", "initialized yet for instance %s, skipping processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size", "# Object queue should be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key,", "the case were there are missing attributes \"\"\" server_instance =", "vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True)", "VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [] #", "assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"host\", \"mor\": mocked_host,", "= MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm =", "res_type) for res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count ==", "{\"name\": \"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn,", "not check.latest_event_query assert check.batch_collector_size == 0 assert check.batch_morlist_size == 50", "mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, mocked_datastore: {},", "vsphere._instance_key(instance) == \"vsphere_mock\" del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance):", "mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None, False, []) assert len(obj_list[vim.VirtualMachine])", "not use fixtures for the check instance, some params are", "hosts/vms not compliant with the user's `*_include` configuration. * Exclude", "{}, mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\":", "test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key = \"group\" counter.nameInfo.key = \"name\"", "= \"name\" counter.rollupType = \"rollup\" assert vsphere.format_metric_name(counter, compatibility=True) == \"group.name\"", "tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance =", "the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata,", "instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now = time.time() #", "= alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value =", "VM name when guest hostname not available assertMOR(vsphere, instance, name=\"vm1\",", "} check = VSphereCheck('vsphere', init_config, {}, [instance]) i_key = check._instance_key(instance)", "{\"name\": \"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\": \"datacenter\"}, }", "= MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes, None) assert", "VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect succeeds, CurrentTime", "assertMOR(vsphere, instance, name=\"host2\", spec=\"host\", tags=tags) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\",", "= MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = { filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\": None},", "migrated_event(): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg =", "= False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance", "'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] ==", "virtual machines when the user configuration instructs to. \"\"\" #", "set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder',", "vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\", \"parent\": None},", "server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with", "vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg,", "non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\":", "1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count", "'vsphere_mock' in check.event_config assert not check.registry assert not check.latest_event_query assert", "test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) == \"vsphere_mock\" del instance['name'] with pytest.raises(BadConfigError):", "5 # 2 datacenters, 2 clusters, 1 datastore def test_collect_realtime_only(vsphere,", "property collector \"\"\" server_instance = vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj =", "vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()]", "instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"] = True # Discover hosts and", "MagicMock() vsphere.log = log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs)", "\"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert", "collector failed to collect some attributes and that we handle", "\"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"], } in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) ==", "compliant with the user's `*_include` configuration. * Exclude \"non-labeled\" virtual", "\"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host3\",", "'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1',", "[\"vsphere_host\"] instance[\"excluded_host_tags\"] = [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {},", "spec=\"host\", tags=tags) # ...on VMs assertMOR(vsphere, instance, spec=\"vm\", count=1) tags", "raise KeyError if the property collector failed to collect some", "= dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2',", "check.batch_collector_size == 0 assert check.batch_morlist_size == 50 assert check.excluded_host_tags ==", "{ \"name\": \"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\":", "``` rootFolder - datacenter1 - compute_resource1 - host1 # Filtered", "to instantiate the check multiple times check = VSphereCheck('vsphere', {},", "included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes, include_only_marked)", "in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should be a list of", "status changed on this alarm, it was yellow and it's", "assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"vm\", \"mor\": non_filtered_vm,", "pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now = time.time() # do not", "tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {},", "size is 1 assert len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"] = False", "\"mocked_host\", \"parent\": None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance)", "{}, [instance]) i_key = check._instance_key(instance) assert check.time_started > 0 assert", "SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def", "mocked_datastore, \"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in obj_list[vim.Datastore] assert", "for object %s: %s', 'prop', 'obj', 'fault') assert len(mor_attrs) ==", "assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache", "= MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log = MagicMock() vsphere.log", "we also test # configuration is properly propagated init_config =", "spec=\"vm\", subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance): # Default value with", "vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed", "always cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly", "'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {},", "= MagicMock() counter.rollupType = \"average\" counter.key = 1 vsphere.format_metric_name =", "\"\"\" Explore the vCenter infrastructure to discover hosts, virtual machines.", "mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\": \"kb\"}", "}, } regex = {'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list", "def migrated_event(): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg", "empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS)", "= 1 i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key,", "host, source_tags in ext_host_tags: if host == u\"mocked_vm\": tags =", "MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"} vsphere.in_compatibility_mode =", "= vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2')", "spec=\"host\", count=2) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\",", "out - host2 - folder1 - datacenter2 - compute_resource2 -", "runs, set the last execution time in the past check.cache_config.set_last(CacheConfig.Morlist,", "message='Went from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] =", "with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def", "on this alarm, it was green and it's now red.\",", "len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\":", "= [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key])", "'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] == [", "log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key = \"group\"", "= 1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert", "datacenter2 - compute_resource2 - host3 - vm1 # Not labeled", "vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray',", "- folder1 - datacenter2 - compute_resource2 - host3 - vm1", "MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock()", "{}, [instance]) assert check.excluded_host_tags == [] # Test host tags", "for host, source_tags in ext_host_tags: if host == u\"mocked_vm\": tags", "\"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): \"\"\"", "and it's now red.\", tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere, instance):", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance)", "def test_check(vsphere, instance): \"\"\" Test the check() method \"\"\" with", "vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this alarm, it", "\"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0] assert { \"mor_type\":", "Simplified BSD License (see LICENSE) from __future__ import unicode_literals import", "vCenter infrastructure to discover hosts, virtual machines. Input topology: ```", "[\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test", "and that we handle the case were there are missing", "once to process the 2 datacenters, then 2 clusters, then", "] assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1',", "vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"John has launched a", "vm_host_parent, \"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine]", "to. \"\"\" # Sample(s) include_regexes = {'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"}", "unicode_literals import time from datetime import datetime import mock import", "tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance =", "vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, }, } regex = {'host_include': '^(?!filtered_.+)'} with", "%s', 'prop', 'obj', 'fault') assert len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere,", "host_dest = MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter')", "not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert", "mode instance[\"all_metrics\"] = False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter]", "None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result =", "\"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host2\", spec=\"host\", tags=tags) tags = [", ") event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray')", "not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest hostname instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance)", "None}, non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, },", "instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with", "= {'collect_vcenter_alarms': True} vsphere.check(instance) assert not aggregator.events event = alarm_event(from_status='green',", "previous runs, set the last execution time in the past", "now gray.\", tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere, instance): with", "# SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check(", "True # OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert not", "don't raise KeyError if the property collector failed to collect", "\"vcenter_host:None\", \"foo:bar\"] def test__init__(instance): with pytest.raises(BadConfigError): # Must define a", "alarm, it was green and it's now red.\", tags=['foo:bar'] )", "vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])]", "red.\", tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance", "= { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL,", "# Not OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\":", "{\"name\": \"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False", "[instance]) i_key = check._instance_key(instance) # first run should always cache", "included_vm.customValue}, include_regexes, include_only_marked ) # Not OK included_vm = MockedMOR(spec=\"VirtualMachine\",", "# 2 datacenters, 2 clusters, 1 datastore def test_collect_realtime_only(vsphere, instance):", "* REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config, {}, [instance]) #", "alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event]", "datacenters + 2 clusters + 1 datastore. assertMOR(vsphere, instance, count=8)", "'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE]", "MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event", "vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock() assert", "{}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect", "in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2", "- host3 - vm1 # Not labeled - vm2 #", "# Must define a unique 'name' per vCenter instance VSphereCheck('vsphere',", "assert len(obj_list[vim.ClusterComputeResource]) == 1 assert { \"mor_type\": \"cluster\", \"mor\": mocked_cluster,", "= MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, [])", "instance, aggregator): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value =", "object %s: %s', 'prop', 'obj', 'fault') assert len(mor_attrs) == 1", "assert check.time_started > 0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key)", ") def test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance)", "{\"name\": excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes = None include_only_marked", "vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere):", "None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL,", "* REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere',", "} in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\":", "MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = {", "check = VSphereCheck('vsphere', init_config, {}, [instance]) # simulate previous runs,", "\"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, mocked_datastore: {}, mocked_cluster:", "RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object queue should be empty", "the user configuration instructs to. \"\"\" # Sample(s) include_regexes =", "None) assert not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes, None) # Not", "= {'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"} # OK included_host = MockedMOR(spec=\"HostSystem\",", "assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0 assert", "MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not VSphereCheck._is_excluded(included_host, {\"name\":", "pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() #", "\"parent\": None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result", "on VM name when guest hostname not available assertMOR(vsphere, instance,", "yet for instance %s, skipping processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size =", "assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance)", "count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {},", "assert not aggregator.events event = alarm_event(from_status='green', to_status='gray', message='Went from Green", "def test__is_excluded(): \"\"\" * Exclude hosts/vms not compliant with the", "vm2 # Filtered out - vm3 # Powered off -", "value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest hostname instance[\"use_guest_hostname\"] =", "assert not check.registry assert not check.latest_event_query assert check.batch_collector_size == 0", "= vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2')", "with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception()", "} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result = MagicMock()", "test__cache_morlist_raw(vsphere, instance): \"\"\" Explore the vCenter infrastructure to discover hosts,", "vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance)", "= True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True)", "MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping", "past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key,", "check.registry assert not check.latest_event_query assert check.batch_collector_size == 0 assert check.batch_morlist_size", "len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log =", "= [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"John has", "== \"group.name\" for rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType = rollup", "\"John has launched a hot migration of this virtual machine\",", "ds = MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore')", "vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest", "0 assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only for call_args", "'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE]", "\"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once()", "datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return event def test_events(aggregator, vsphere,", "\"cluster\", \"mor\": mocked_cluster, \"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in", "instance, count=8) # ...on hosts assertMOR(vsphere, instance, spec=\"host\", count=2) tags", "[instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server()", "\"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance):", "MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm,", "vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once to process the 2 datacenters,", "excluded from external host metadata, but still stored in the", "aggregator.assert_event( \"John has launched a hot migration of this virtual", "VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE from", "'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1',", "None}, filtered_vm: { \"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, },", "# Filtered out - vm3 # Powered off - vm4", "virtual machines vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored VM + 2", "1 labeled+monitored VM + 2 hosts + 2 datacenters +", "False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance =", "hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded(): \"\"\" * Exclude", "= vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status,", "realtime only for call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should", "check.excluded_host_tags == [] # Test host tags are excluded from", "[\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert { \"mor_type\": \"vm\", \"mor\":", "name=\"vm4_guest\", spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async", "obj_list[vim.VirtualMachine][0] assert { \"mor_type\": \"host\", \"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\":", "counter = MagicMock() counter.rollupType = \"average\" counter.key = 1 vsphere.format_metric_name", "CacheConfig.Metadata) # explicitly set cache expiration times, don't use defaults", "assert not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some error'): now", "instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in", "pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator,", "from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event( \"vCenter", "check.batch_morlist_size == 50 assert check.excluded_host_tags == [] def test_excluded_host_tags(vsphere, instance,", "vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\", count=3) # Fallback on VM name", "count=3) # Fallback on VM name when guest hostname not", "\"f[o]+\", 'vm_include': \"f[o]+\"} # OK included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm", "i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval", "config check = VSphereCheck('vsphere', {}, {}, [instance]) assert check.excluded_host_tags ==", "yellow and it's now gray.\", tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator,", "== 11 vsphere._process_mor_objects_queue(instance) # Object queue should be empty after", "datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host =", "\"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, }, non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\": None},", "= MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance", "MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg =", "metrics vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\")", "= [\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs =", "off - vm4 ``` \"\"\" # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"]", "way instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert", "test__get_all_objs(vsphere, instance): \"\"\" Test that we don't raise KeyError if", "] def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))", "1 datastore def test_collect_realtime_only(vsphere, instance): \"\"\" Test the collect_realtime_only parameter", "1 assert 'vsphere_mock' in check.event_config assert not check.registry assert not", "len(obj_list[vim.Datacenter]) == 1 assert { \"mor_type\": \"datacenter\", \"mor\": mocked_datacenter, \"hostname\":", "with mock.patch(\"time.time\", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance,", "vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1", "label=True) assert not VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue}, include_regexes, include_only_marked )", "vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\":", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once to process the", "- compute_resource2 - host3 - vm1 # Not labeled -", "instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count", "set cache expiration times, don't use defaults so we also", ") # Not OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm,", "set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [", "aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect succeeds,", "vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance):", "virtual machine\", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ],", "obj_list = vsphere._get_all_objs(server_instance, None, False, []) assert len(obj_list[vim.VirtualMachine]) == 2", "= VSphereCheck('vsphere', init_config, {}, [instance]) # simulate previous runs, set", "assert { \"mor_type\": \"vm\", \"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\",", "host are also filtered\"\"\" server_instance = vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\")", "\"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host2\", spec=\"host\", tags=tags) tags =", "gray and it's now red.\", tags=['foo:bar'] ) event = alarm_event(from_status='yellow',", "vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6 # One for", "= VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == []", "errors when collecting properties with property collector \"\"\" server_instance =", "Test the check() method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags')", "\"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host2\",", "not check.registry assert not check.latest_event_query assert check.batch_collector_size == 0 assert", "True} vsphere.check(instance) assert not aggregator.events event = alarm_event(from_status='green', to_status='gray', message='Went", "stored in the cache for metrics vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm", "[instance]) i_key = check._instance_key(instance) assert check.time_started > 0 assert not", "def test__get_all_objs(vsphere, instance): \"\"\" Test that we don't raise KeyError", "- vm4 ``` \"\"\" # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] =", "assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log =", "count=1) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\",", "1 assert len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance) assert", "vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter =", "assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata,", "mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property %s for object", "instance, name=\"vm4_guest\", spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock()", "all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ]", "sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) #", "vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance,", "server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}", "MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter') dc_arg =", "\"vCenter monitor status changed on this alarm, it was yellow", "MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes, None) assert", "mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert", "assert vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.',", "vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs", "MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\")", "= MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't been initialized vsphere.log.debug.assert_called_once_with( \"Objects", "0 assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters,", "'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock',", "mocked_datastore: {}, mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"),", "collector \"\"\" server_instance = vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None,", "== 0 assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2", "it's now red.\", tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere, instance): with", "vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock()", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"] = True", "{ vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent:", "compute_resource1 - host1 # Filtered out - host2 - folder1", "REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config, {}, [instance]) # simulate", "vm4 ``` \"\"\" # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\"", "with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None, False, []) assert", "error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return", "and virtual machines instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\",", "the property collector failed to collect some attributes and that", "check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval == 50 assert len(check.event_config)", "subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\",", "Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"]", "tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance =", "= vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type", "times, don't use defaults so we also test # configuration", "'obj', 'fault') assert len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere, instance): \"\"\"", "= None include_only_marked = True # OK included_vm = MockedMOR(spec=\"VirtualMachine\",", "obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test that we check for", "New way instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3)", "instance[\"collect_realtime_only\"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once", "vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config = {", "[{'': ''}]) init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval':", "MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't been initialized", "= vsphere._instance_key(instance) counter = MagicMock() counter.rollupType = \"average\" counter.key =", "# Licensed under Simplified BSD License (see LICENSE) from __future__", "# vsphere_host tag not in external metadata for host, source_tags", "size is 1 assert len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere, instance):", "Licensed under Simplified BSD License (see LICENSE) from __future__ import", "test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "guest hostname not available assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere,", "== 1 obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable", "instructs to. \"\"\" # Sample(s) include_regexes = {'host_include': \"f[o]+\", 'vm_include':", "len(obj_list[vim.ClusterComputeResource]) == 1 assert { \"mor_type\": \"cluster\", \"mor\": mocked_cluster, \"hostname\":", "params are set at # __init__ time and we need", "pyVmomi import vim from datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config import", "\"group.name\" for rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType = rollup assert", "MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = { vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate:", "# New way instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance)", "\"f[o]+\"} # OK included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\",", "test__should_cache(instance): now = time.time() # do not use fixtures for", "mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) == 1 obj.missingSet =", "in the cache for metrics vsphere.excluded_host_tags = [\"vsphere_host\"] mocked_vm =", "guest hostname instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"):", "== 6 # One for each VM/host, datacenters are not", "B009 return event def migrated_event(): now = datetime.utcnow() vm =", "with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0])", "\"Objects queue is not initialized yet for instance %s, skipping", "was green and it's now red.\", tags=['foo:bar'] ) def test_events_gray_handled(aggregator,", "vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown')", "# __init__ time and we need to instantiate the check", "name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host,", "aggregator.assert_event( \"vCenter monitor status changed on this alarm, it was", "[event] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this alarm,", "\"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, }, } regex = {'host_include': '^(?!filtered_.+)'}", "} in obj_list[vim.VirtualMachine] assert { \"mor_type\": \"vm\", \"mor\": vm_host_parent, \"hostname\":", "use guest hostname instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with", "\"mocked_host\", \"parent\": None}, mocked_datastore: {}, mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter: {\"parent\":", "[]) assert len(obj_list[vim.VirtualMachine]) == 2 assert { \"mor_type\": \"vm\", \"mor\":", "assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42", "name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc =", "then 2 clusters, then the datastore assert vsphere._process_mor_objects_queue_async.call_count == 3", "mocked_vm: { \"name\": \"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host:", "def test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value", ") from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS =", "REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\",", "mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest hostname instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assert", "= { vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"},", "\"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host3\", spec=\"host\", tags=tags) #", "as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS", "check multiple times check = VSphereCheck('vsphere', {}, {}, [instance]) i_key", "name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event =", "test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event =", "42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check = VSphereCheck('vsphere', init_config,", "\"mor_type\": \"datastore\", \"mor\": mocked_datastore, \"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], }", "-42, 'batch_property_collector_size': -1, } check = VSphereCheck('vsphere', init_config, {}, [instance])", "[ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm',", "vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result],", "vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, mocked_datastore: {}, mocked_cluster: {\"name\":", "{ \"mor_type\": \"vm\", \"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"],", "vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key])", "with pytest.raises(BadConfigError): # Must define a unique 'name' per vCenter", "# Compatibility mode instance[\"all_metrics\"] = False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter", "= MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\")", "ext_host_tags = vsphere.get_external_host_tags() # vsphere_host tag not in external metadata", "= MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert not VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue},", "non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, }, }", "metadata for host, source_tags in ext_host_tags: if host == u\"mocked_vm\":", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] =", "for res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 5", "(C) Datadog, Inc. 2010-2017 # All rights reserved # Licensed", "}, non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\",", "it was gray and it's now red.\", tags=['foo:bar'] ) event", "with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None, obj=\"obj\") result = MagicMock(token=None, objects=[obj])", "to collect some attributes and that we handle the case", "instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"] = True #", "MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\")", "RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters,", "'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE]", "check.excluded_host_tags == [] def test_excluded_host_tags(vsphere, instance, aggregator): # Check default", "== 1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value =", "vsphere._get_all_objs(server_instance, None, False, []) assert len(obj_list[vim.VirtualMachine]) == 2 assert {", "Default value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest hostname instance[\"use_guest_hostname\"]", "name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\":", "get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def test__init__(instance): with pytest.raises(BadConfigError):", "'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1',", "assert check.excluded_host_tags == [] # Test host tags are excluded", "\"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags =", "assert len(obj_list[vim.Datacenter]) == 1 assert { \"mor_type\": \"datacenter\", \"mor\": mocked_datacenter,", "Sample(s) include_regexes = None include_only_marked = True # OK included_vm", "= vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance)", "= MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter') dc_arg", "excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes = None include_only_marked =", "= VSphereCheck('vsphere', {}, {}, [instance]) i_key = check._instance_key(instance) # first", "'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value", "assert len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance)", "multiple times check = VSphereCheck('vsphere', {}, {}, [instance]) i_key =", "== obj_list[vim.VirtualMachine][0] assert { \"mor_type\": \"host\", \"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\",", "(2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist)", "import time from datetime import datetime import mock import pytest", "metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def", "assert \"vsphere_host:\" not in tag break # vsphere_host tag still", "-1, } check = VSphereCheck('vsphere', init_config, {}, [instance]) i_key =", "{ \"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, }, non_filtered_host: {\"name\":", "MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name},", "vsphere.mor_cache = MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"}", "machine\", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], )", "test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test that we check for errors when", "time and we need to instantiate the check multiple times", "= MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value =", "obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1 assert { \"mor_type\": \"datacenter\", \"mor\":", "\"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere,", "MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg =", "check for errors when collecting properties with property collector \"\"\"", "= MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1')", "from mock import MagicMock from pyVmomi import vim from datadog_checks.vsphere", "= True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown') vsphere.log.reset_mock()", "defaults so we also test # configuration is properly propagated", "name=\"unknown folder\"), \"name\": \"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list =", "{'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"} # OK included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\")", "\"name\" counter.rollupType = \"rollup\" assert vsphere.format_metric_name(counter, compatibility=True) == \"group.name\" for", "MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server", "each VM/host, datacenters are not collected for call_args in vsphere._collect_metrics_async.call_args_list:", "init_config, {}, [instance]) i_key = check._instance_key(instance) assert check.time_started > 0", "server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"John", "not initialized yet for instance %s, skipping processing\", vsphere._instance_key(instance) )", "'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1',", "instance, name=\"vm4\", spec=\"vm\", subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance): # Default", "it's now red.\", tags=['foo:bar'] ) event = alarm_event(from_status='yellow', to_status='gray', message='Went", "= { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1,", "Filtered out - vm3 # Powered off - vm4 ```", "\"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\": \"mocked_host\", \"parent\":", "assertMOR(vsphere, instance, count=8) # ...on hosts assertMOR(vsphere, instance, spec=\"host\", count=2)", "# first run should always cache assert check._should_cache(instance, CacheConfig.Morlist) assert", "vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\":", "assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test", "assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log", "'batch_property_collector_size': -1, } check = VSphereCheck('vsphere', init_config, {}, [instance]) i_key", "- (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now): assert not check._should_cache(instance,", "assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2',", "\"mor_type\": \"host\", \"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], } ==", "\"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\", subset=True, tags=tags)", "check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags ==", "Exclude \"non-labeled\" virtual machines when the user configuration instructs to.", "short_rollup in SHORT_ROLLUP.items(): counter.rollupType = rollup assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup)", "\"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere, instance,", "vsphere.in_compatibility_mode = MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance,", "host3 - vm1 # Not labeled - vm2 # Filtered", "License (see LICENSE) from __future__ import unicode_literals import time from", "\"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn},", "to process the 2 datacenters, then 2 clusters, then the", "event def test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance)", "this alarm, it was gray and it's now red.\", tags=['foo:bar']", "fixtures for the check instance, some params are set at", "= \"vm[^2]\" instance[\"include_only_marked\"] = True # Discover hosts and virtual", "[\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def test__init__(instance): with pytest.raises(BadConfigError): # Must define", "def alarm_event(from_status='green', to_status='red', message='Some error'): now = datetime.utcnow() vm =", ") def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))", "server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\":", "def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance = vsphere._get_server_instance(instance) result = MagicMock()", "assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2',", "{\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, mocked_datastore:", "alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event]", "vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now,", "initialized vsphere.log.debug.assert_called_once_with( \"Objects queue is not initialized yet for instance", "REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config,", "= MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg,", "= Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS", "instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock']", "fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, )", "and it's now gray.\", tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere,", "ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest,", "properly propagated init_config = { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval':", "\"mor_type\": \"cluster\", \"mor\": mocked_cluster, \"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], }", "\"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\":", "[\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"] = []", "# (C) Datadog, Inc. 2010-2017 # All rights reserved #", "= MagicMock() vsphere.log = log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert", "= False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once to", "= MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6", "vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance)", "\"\"\" Test the collect_realtime_only parameter acts as expected \"\"\" vsphere._process_mor_objects_queue_async", "it was green and it's now red.\", tags=['foo:bar'] ) def", "def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) == \"vsphere_mock\" del instance['name'] with", "in external metadata for host, source_tags in ext_host_tags: if host", "= MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() #", "= vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg,", "as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] ==", "2 datacenters, 2 clusters, 1 datastore def test_collect_realtime_only(vsphere, instance): \"\"\"", "= MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host =", "were there are missing attributes \"\"\" server_instance = vsphere._get_server_instance(instance) vm_no_parent", "vsphere._cache_morlist_raw(instance) # Default value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest", "server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went from Gray to", "def test__init__(instance): with pytest.raises(BadConfigError): # Must define a unique 'name'", "call_args in vsphere._collect_metrics_async.call_args_list: # query_specs parameter should be a list", "in check.event_config assert not check.registry assert not check.latest_event_query assert check.batch_collector_size", "check.clean_morlist_interval == 50 assert len(check.event_config) == 1 assert 'vsphere_mock' in", "{\"name\": included_vm.name}, include_regexes, None) # Not OK! excluded_host = MockedMOR(spec=\"HostSystem\",", "batch size is 1 assert len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"] =", "= {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on", "server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter", "i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for", "] assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2',", "vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache", "test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache =", "vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters, 1 datastore", "pytest from mock import MagicMock from pyVmomi import vim from", "False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert not", "= MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"} vsphere.in_compatibility_mode = MagicMock() vsphere.log", "succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect", "tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded(): \"\"\" * Exclude hosts/vms not compliant", "source_tags in ext_host_tags: if host == u\"mocked_vm\": tags = source_tags[\"vsphere\"]", "datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils", "vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value", "is not initialized yet for instance %s, skipping processing\", vsphere._instance_key(instance)", "use defaults so we also test # configuration is properly", "be a list of size 1 since the batch size", "vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest", "excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes, None) #", "value=23.4, hostname=\"foo\") def test_check(vsphere, instance): \"\"\" Test the check() method", "Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance)", "vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds", "not VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue}, include_regexes, include_only_marked ) # Not", "in tags: assert \"vsphere_host:\" not in tag break # vsphere_host", "= True # OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert", "tags=tags) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\",", "result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock()", "explicitly set cache expiration times, don't use defaults so we", "status changed on this alarm, it was green and it's", "datastore def test_collect_realtime_only(vsphere, instance): \"\"\" Test the collect_realtime_only parameter acts", "include_only_marked ) # Not OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert", "\"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, }, non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\":", "vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key = \"group\" counter.nameInfo.key", "i_key = check._instance_key(instance) # first run should always cache assert", "import unicode_literals import time from datetime import datetime import mock", "%s, skipping processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1 i_key =", "in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1 assert { \"mor_type\": \"cluster\",", "1 assert len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance =", "check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:", "obj=\"obj\") result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log =", "we handle the case were there are missing attributes \"\"\"", "vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() # vsphere_host tag", "name when guest hostname not available assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\",", "with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def", "None) # Not OK! excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm =", "= MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg", "test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value", "False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() # vsphere_host tag not in", "mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = { vm_no_parent:", "# Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ):", "\"average\" counter.key = 1 vsphere.format_metric_name = MagicMock() # New way", "aggregator.reset() # SmartConnect succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect", "`*_include` configuration. * Exclude \"non-labeled\" virtual machines when the user", "[]}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that all vms", "instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config = { 'clean_morlist_interval':", "0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert", "[23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value =", "name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg", "vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green')", "instance): server_instance = vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter = MagicMock()", "processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0", "\"runtime.host\": filtered_host, }, non_filtered_host: {\"name\": \"non_filtered_host_number_1\", \"parent\": None}, non_filtered_vm: {", "mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) # Default value", "server_instance = vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter = MagicMock() counter.rollupType", "1 vsphere.format_metric_name = MagicMock() # New way instance[\"collection_level\"] = 3", "to Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance)", "CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError, ConnectionError", "-42 assert check.clean_morlist_interval == 50 assert len(check.event_config) == 1 assert", "assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size", "import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"]", "vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key =", "= server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\" instance[\"include_only_marked\"] =", "datacenters, then 2 clusters, then the datastore assert vsphere._process_mor_objects_queue_async.call_count ==", "42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval == 50", "aggregator.events event = alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray')", "mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex, False, []) assert len(obj_list[vim.VirtualMachine])", "counter.rollupType = \"rollup\" assert vsphere.format_metric_name(counter, compatibility=True) == \"group.name\" for rollup,", "= vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error',", "tags = source_tags[\"vsphere\"] for tag in tags: assert \"vsphere_host:\" not", "vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance):", "host == u\"mocked_vm\": tags = source_tags[\"vsphere\"] for tag in tags:", "folder\"), \"name\": \"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance,", "to_status='gray', message='Went from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance)", "= None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME,", "mocked_mors_attrs = { mocked_vm: { \"name\": \"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\":", "\"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host2\", spec=\"host\",", "* Exclude hosts/vms not compliant with the user's `*_include` configuration.", "skipping processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1 i_key = vsphere._instance_key(instance)", "check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance):", "topology: ``` rootFolder - datacenter1 - compute_resource1 - host1 #", "now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now): assert not", "and virtual machines vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored VM +", "'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check = VSphereCheck('vsphere',", "import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS,", "instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as", "event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg,", "as expected \"\"\" vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"] = False with", "hosts, virtual machines. Input topology: ``` rootFolder - datacenter1 -", "vm_arg = vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1')", "50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check =", "machines instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\", count=3) #", "'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] ==", "to a filtered host are also filtered\"\"\" server_instance = vsphere._get_server_instance(instance)", "MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor = {\"hostname\":", "= MagicMock() instance[\"collection_level\"] = 2 assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] =", "for errors when collecting properties with property collector \"\"\" server_instance", "= vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache =", "\"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert {", "case were there are missing attributes \"\"\" server_instance = vsphere._get_server_instance(instance)", "MagicMock() instance[\"collection_level\"] = 2 assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True", "MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec=\"Alarm\") alarm_arg =", "[\"vsphere_type:host\"], } in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1 assert {", "\"rollup\" assert vsphere.format_metric_name(counter, compatibility=True) == \"group.name\" for rollup, short_rollup in", "LICENSE) from __future__ import unicode_literals import time from datetime import", "for call_args in vsphere._collect_metrics_async.call_args_list: # query_specs parameter should be a", "= disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as", "instance): \"\"\" Test the check() method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with", "filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm", "\"parent\": None}, non_filtered_vm: { \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host,", "= {\"name\": \"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value =", "ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some", "vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"} vsphere.in_compatibility_mode = MagicMock() vsphere.log = MagicMock()", "[23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor = {\"hostname\": \"foo\"} vsphere.mor_cache =", "= [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check(", "return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata) def", "include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that all vms belonging to", "vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache = MagicMock()", "# Not labeled - vm2 # Filtered out - vm3", "host1 # Filtered out - host2 - folder1 - datacenter2", "if host == u\"mocked_vm\": tags = source_tags[\"vsphere\"] for tag in", "] assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1',", "[counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key]) ==", "regex, False, []) assert len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem]) ==", "res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 0 #", "when the user configuration instructs to. \"\"\" # Sample(s) include_regexes", "instance): vsphere.log = MagicMock() instance[\"collection_level\"] = 2 assert not vsphere.in_compatibility_mode(instance)", "assert not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm,", "'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check", "green and it's now red.\", tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere,", "'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ]", "now red.\", tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "# vsphere_host tag still in cache for sending with metrics", "== 1 def test__cache_morlist_raw(vsphere, instance): \"\"\" Explore the vCenter infrastructure", "available assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\",", "createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return event", "{}, {}, [instance]) assert check.excluded_host_tags == [] check = VSphereCheck('vsphere',", "include_regexes, None) # Not OK! excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm", "vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator):", "assert len(check.event_config) == 1 assert 'vsphere_mock' in check.event_config assert not", "of instance config over init config check = VSphereCheck('vsphere', {},", "MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance =", "= VSphereCheck('vsphere', init_config, {}, [instance]) i_key = check._instance_key(instance) assert check.time_started", "from datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors", ") return event def test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance", "assert vsphere._instance_key(instance) == \"vsphere_mock\" del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def", "{\"customValue\": included_vm.customValue}, include_regexes, include_only_marked ) # Not OK included_vm =", "assertMOR(vsphere, instance, spec=\"host\", count=2) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\",", "the batch size is 1 assert len(call_args[0][1]) == 1 def", "instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def", "= MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't been", "{\"name\": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes, None)", "in vsphere._collect_metrics_async.call_args_list: # query_specs parameter should be a list of", "error'): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\")", "init_config, {}, [instance]) # simulate previous runs, set the last", "MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def", "\"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host3\", spec=\"host\", tags=tags) # ...on", "SHORT_ROLLUP.items(): counter.rollupType = rollup assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere,", "'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1',", "+ 1 datastore. assertMOR(vsphere, instance, count=8) # ...on hosts assertMOR(vsphere,", "should be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type", "still in cache for sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\",", "2 clusters + 1 datastore. assertMOR(vsphere, instance, count=8) # ...on", "instance, spec=\"host\", count=2) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\",", "[] # Test host tags are excluded from external host", "vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest =", "# Discover hosts and virtual machines vsphere._cache_morlist_raw(instance) # Assertions: 1", "== [] def test_excluded_host_tags(vsphere, instance, aggregator): # Check default value", "VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that", "mocked_host, \"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"], } in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore])", "aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded(): \"\"\"", "assert { \"mor_type\": \"cluster\", \"mor\": mocked_cluster, \"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\",", "init config check = VSphereCheck('vsphere', {}, {}, [instance]) assert check.excluded_host_tags", "counter = MagicMock() counter.groupInfo.key = \"group\" counter.nameInfo.key = \"name\" counter.rollupType", "all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3',", "server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}", "True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once()", "\"non-labeled\" virtual machines when the user configuration instructs to. \"\"\"", "'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder',", "VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes,", "[\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1", "vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity", "res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 5 #", "all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ]", "MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\":", "SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def test__init__(instance): with pytest.raises(BadConfigError): #", "= MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value =", "entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now,", "# noqa: B009 return event def migrated_event(): now = datetime.utcnow()", "time from datetime import datetime import mock import pytest from", "for tag in tags: assert \"vsphere_host:\" not in tag break", "'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] == [", "{}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value =", "'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder',", "fails server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None", "False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def test_check(vsphere, instance): \"\"\"", "mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs", "1 instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type", "execution time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2", "def test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock() instance[\"collection_level\"] = 2 assert", "instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert", "Test the collect_realtime_only parameter acts as expected \"\"\" vsphere._process_mor_objects_queue_async =", "\"tags\": [\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): \"\"\" Test", "now red.\", tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "Powered off - vm4 ``` \"\"\" # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\": None}, filtered_vm: { \"name\": \"this_vm_is_filtered\", \"runtime.powerState\":", "MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert not VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue}, include_regexes,", "vsphere._instance_key(instance) def test__should_cache(instance): now = time.time() # do not use", "expected \"\"\" vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "\"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance,", "\"\"\" # Sample(s) include_regexes = {'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"} #", "= time.time() # do not use fixtures for the check", "mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster", "instance[\"include_only_marked\"] = True # Discover hosts and virtual machines vsphere._cache_morlist_raw(instance)", "not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called()", "{\"customValue\": []}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that all", "vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0 def", "\"mor_type\": \"host\", \"mor\": mocked_host, \"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"], } in", "name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds =", "time.time() # do not use fixtures for the check instance,", "with property collector \"\"\" server_instance = vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj", "check() method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags:", "was gray and it's now red.\", tags=['foo:bar'] ) event =", "'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2',", "test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) #", "True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts and", "assert check.batch_collector_size == 0 assert check.batch_morlist_size == 50 assert check.excluded_host_tags", "vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance)", "check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some error'): now = datetime.utcnow()", "[ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator,", "assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance[\"all_metrics\"] =", "REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert not", "mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts and virtual machines instance[\"use_guest_hostname\"]", "= vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')", "mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance", "only for call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should be", "\"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\":", "instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green',", "rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType = rollup assert vsphere.format_metric_name(counter) ==", "[instance]) assert check.excluded_host_tags == [] # Test host tags are", "= Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(ConnectionError):", "res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object queue should", "retrieve property %s for object %s: %s', 'prop', 'obj', 'fault')", "server_instance = vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None, obj=\"obj\") result", "host tags are excluded from external host metadata, but still", "external host metadata, but still stored in the cache for", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS)", "False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) ==", "] assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2',", "CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some error'): now = datetime.utcnow() vm", "value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded(): \"\"\" *", "vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should be a list of size", "instance): \"\"\" Test that we don't raise KeyError if the", "of size 1 since the batch size is 1 assert", "destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return event def test_events(aggregator,", "\"\"\" # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"] =", "= MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore =", "OK included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert", "labeled - vm2 # Filtered out - vm3 # Powered", "hasn't been initialized vsphere.log.debug.assert_called_once_with( \"Objects queue is not initialized yet", "log.error.assert_not_called() assert len(mor_attrs) == 1 obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs", "= [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value =", "= vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest", "instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere,", "\"\"\" server_instance = vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\")", "over init config check = VSphereCheck('vsphere', {}, {}, [instance]) assert", "\"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere, instance,", "} == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): \"\"\" Test that we", "in ext_host_tags: if host == u\"mocked_vm\": tags = source_tags[\"vsphere\"] for", "folder1 - datacenter2 - compute_resource2 - host3 - vm1 #", "for instance %s, skipping processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1", "hosts and virtual machines vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored VM", "assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def test__init__(instance):", "destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return event def test_events(aggregator, vsphere, instance):", "vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1 i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "assert not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green',", "test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that all vms belonging to a filtered", "datastore assert vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock() with", "tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\",", "[]) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance = vsphere._get_server_instance(instance) result", "2 * REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config, {}, [instance])", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once to process the 2", "> 0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42", "'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert", "and we need to instantiate the check multiple times check", "\"mor\": mocked_datacenter, \"hostname\": None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in", "import datetime import mock import pytest from mock import MagicMock", "instance, name=\"host3\", spec=\"host\", tags=tags) # ...on VMs assertMOR(vsphere, instance, spec=\"vm\",", "not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some error'): now =", "OK! excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert", "== 1 assert { \"mor_type\": \"cluster\", \"mor\": mocked_cluster, \"hostname\": None,", "2 assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True assert not vsphere.in_compatibility_mode(instance)", "assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval == 50 assert", "log = MagicMock() vsphere.log = log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called()", "subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance): # Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\")", "KeyError if the property collector failed to collect some attributes", "VSphereCheck('vsphere', init_config, {}, [instance]) # simulate previous runs, set the", "\"foo:bar\"] def test__init__(instance): with pytest.raises(BadConfigError): # Must define a unique", "assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key", "\"parent\": None}, mocked_datastore: {}, mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\",", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms':", "from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\",", "assert not check.latest_event_query assert check.batch_collector_size == 0 assert check.batch_morlist_size ==", "vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown", "assert len(mor_attrs) == 1 obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs =", "check = VSphereCheck('vsphere', init_config, {}, [instance]) i_key = check._instance_key(instance) assert", "\"vsphere_host:\" not in tag break # vsphere_host tag still in", "# Filtered out - host2 - folder1 - datacenter2 -", "assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts and virtual machines", "datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common import", "= MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster =", "vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance = vsphere._get_server_instance(instance)", "= datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\") dc_arg =", "out - vm3 # Powered off - vm4 ``` \"\"\"", "0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once()", "that all vms belonging to a filtered host are also", "- vm2 # Filtered out - vm3 # Powered off", "are set at # __init__ time and we need to", "= False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def test_check(vsphere, instance):", "\"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\", subset=True, tags=tags) def", "to_status='red', message='Some error'): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc", "Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor", "== [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3',", "SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS )", "# Assertions: 1 labeled+monitored VM + 2 hosts + 2", "len(obj_list[vim.Datastore]) == 1 assert { \"mor_type\": \"datastore\", \"mor\": mocked_datastore, \"hostname\":", "collecting properties with property collector \"\"\" server_instance = vsphere._get_server_instance(instance) with", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere,", "instance[\"all_metrics\"] = True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance,", "vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value", "= {'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex,", "= MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not VSphereCheck._is_excluded(included_host,", "== 50 assert check.excluded_host_tags == [] def test_excluded_host_tags(vsphere, instance, aggregator):", "instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) # Queue", "\"hostname\": None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter] assert", "VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes, None) # Not OK! excluded_host =", "{ 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, }", "vsphere.check(instance) aggregator.assert_event( \"John has launched a hot migration of this", "== u\"mocked_vm\": tags = source_tags[\"vsphere\"] for tag in tags: assert", "expiration times, don't use defaults so we also test #", "return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex, False, []) assert len(obj_list[vim.VirtualMachine]) ==", "result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log = MagicMock()", "instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key])", "import mock import pytest from mock import MagicMock from pyVmomi", "all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host',", "a hot migration of this virtual machine\", exact_match=False, tags=[ 'foo:bar',", "'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2',", "def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with", "this alarm, it was yellow and it's now gray.\", tags=['foo:bar'],", "= [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ]", "excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\") assert VSphereCheck._is_excluded(excluded_host,", "Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event(", "= True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts", "counter.nameInfo.key = \"name\" counter.rollupType = \"rollup\" assert vsphere.format_metric_name(counter, compatibility=True) ==", "One for each VM/host, datacenters are not collected for call_args", "\"runtime.host\": non_filtered_host, }, } regex = {'host_include': '^(?!filtered_.+)'} with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\",", "vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity,", "= MagicMock(missingSet=None, obj=\"obj\") result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result", "{ \"mor_type\": \"host\", \"mor\": mocked_host, \"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"], }", "check.latest_event_query assert check.batch_collector_size == 0 assert check.batch_morlist_size == 50 assert", "i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now): assert", "= { mocked_vm: { \"name\": \"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn,", "len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"vm\", \"mor\": non_filtered_vm, \"hostname\":", "vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property %s for object %s: %s',", "import pytest from mock import MagicMock from pyVmomi import vim", "assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2',", "clusters, 1 datastore def test_collect_realtime_only(vsphere, instance): \"\"\" Test the collect_realtime_only", "ConnectionError from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, )", "assert check.batch_morlist_size == 50 assert check.excluded_host_tags == [] def test_excluded_host_tags(vsphere,", "alarm, it was gray and it's now red.\", tags=['foo:bar'] )", "= \"group\" counter.nameInfo.key = \"name\" counter.rollupType = \"rollup\" assert vsphere.format_metric_name(counter,", "= vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) == 1 obj.missingSet = [MagicMock(path=\"prop\",", "vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6 # One for each VM/host,", "MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\")", "{ 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, }", "subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async = MagicMock()", "vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event", "assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm4_guest\", spec=\"vm\", subset=True)", "counter.key = 1 vsphere.format_metric_name = MagicMock() # New way instance[\"collection_level\"]", "True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0", "def test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key = \"group\" counter.nameInfo.key =", "return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None, False, []) assert len(obj_list[vim.VirtualMachine]) ==", "1 assert { \"mor_type\": \"cluster\", \"mor\": mocked_cluster, \"hostname\": None, \"tags\":", "vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size =", "len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"host\", \"mor\": mocked_host, \"hostname\":", "from_status) # noqa: B009 return event def migrated_event(): now =", "'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk',", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went from Gray", "virtual machines. Input topology: ``` rootFolder - datacenter1 - compute_resource1", "mocked_cluster, \"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource] def", "= vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1')", "None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes", "ds_dest = MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent(", "= [counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1", "vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance,", "log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) == 1 obj.missingSet", "MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def", "Exclude hosts/vms not compliant with the user's `*_include` configuration. *", "{'collect_vcenter_alarms': True} vsphere.check(instance) assert not aggregator.events event = alarm_event(from_status='green', to_status='gray',", "== 1 assert 'vsphere_mock' in check.event_config assert not check.registry assert", "mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts and virtual machines instance[\"use_guest_hostname\"] = True", "# Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"] = \"vm[^2]\"", "run should always cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata)", "\"mor_type\": \"vm\", \"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], }", "[event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status", "None, \"tags\": [\"vsphere_folder:unknown\", \"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource])", "include_regexes, None) # Sample(s) include_regexes = None include_only_marked = True", "\"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\": \"datacenter\"}, } with", "\"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host2\", spec=\"host\", tags=tags)", "\"\"\" server_instance = vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None, obj=\"obj\")", "property %s for object %s: %s', 'prop', 'obj', 'fault') assert", "= 2 assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"] = True assert not", "= \"average\" counter.key = 1 vsphere.format_metric_name = MagicMock() # New", "include_regexes, include_only_marked ) # Not OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\")", "clusters + 1 datastore. assertMOR(vsphere, instance, count=8) # ...on hosts", "parameter should be a list of size 1 since the", "= MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode =", "alarm = MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm,", "method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance)", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green', message='Went", "not collected for call_args in vsphere._collect_metrics_async.call_args_list: # query_specs parameter should", "= vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event,", "name='dc1') alarm = MockedMOR(spec=\"Alarm\") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity =", "destDatastore=ds_dest_arg, ) return event def test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "= disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: #", "(see LICENSE) from __future__ import unicode_literals import time from datetime", "the check multiple times check = VSphereCheck('vsphere', {}, {}, [instance])", "aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1) def test__is_excluded(): \"\"\" * Exclude hosts/vms not", "SHORT_ROLLUP, ) from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS", "name=\"foo\") assert not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes, None) assert not", "server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value", "has launched a hot migration of this virtual machine\", exact_match=False,", "name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name},", "and it's now red.\", tags=['foo:bar'] ) event = alarm_event(from_status='yellow', to_status='gray',", "VSphereCheck('vsphere', {}, {}, [instance]) assert check.excluded_host_tags == [] check =", "= MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = { filtered_host: {\"name\":", "'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]", "def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that all vms belonging to a", "= [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor = {\"hostname\": \"foo\"} vsphere.mor_cache", "} check = VSphereCheck('vsphere', init_config, {}, [instance]) # simulate previous", "\"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine]", "Explore the vCenter infrastructure to discover hosts, virtual machines. Input", "on this alarm, it was gray and it's now red.\",", "= VSphereCheck('vsphere', {}, {}, [instance]) assert check.excluded_host_tags == [] check", "check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"] = [] check = VSphereCheck('vsphere', {\"excluded_host_tags\":", "assert vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock()", "to_status='gray', message='Went from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance)", "def test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event", "# One for each VM/host, datacenters are not collected for", "Datadog, Inc. 2010-2017 # All rights reserved # Licensed under", "del instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once()", "that we handle the case were there are missing attributes", "cache expiration times, don't use defaults so we also test", "''}]) init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42,", "attributes \"\"\" server_instance = vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate =", "assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance)", "instance, name=\"host2\", spec=\"host\", tags=tags) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\",", "vsphere_host tag not in external metadata for host, source_tags in", "[] def test_excluded_host_tags(vsphere, instance, aggregator): # Check default value and", "set the last execution time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key,", "{\"name\": included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes,", "\"\"\" Test the check() method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere,", "VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config = { 'clean_morlist_interval': 50,", "discover hosts, virtual machines. Input topology: ``` rootFolder - datacenter1", "result log = MagicMock() vsphere.log = log mor_attrs = vsphere._collect_mors_and_attributes(server_instance)", "vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in", "instance, aggregator): # Check default value and precedence of instance", "counter.rollupType = rollup assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance):", "2 hosts + 2 datacenters + 2 clusters + 1", "this virtual machine\", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2',", "host = MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem')", "vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6 # One", "assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2',", "i_key = check._instance_key(instance) assert check.time_started > 0 assert not check.server_instances", "collect_realtime_only parameter acts as expected \"\"\" vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"]", "1 assert { \"mor_type\": \"host\", \"mor\": mocked_host, \"hostname\": \"mocked_host\", \"tags\":", "== 5 # 2 datacenters, 2 clusters, 1 datastore def", "batch size is 1 assert len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere,", "belonging to a filtered host are also filtered\"\"\" server_instance =", "== 1 assert { \"mor_type\": \"datastore\", \"mor\": mocked_datastore, \"hostname\": None,", "from external host metadata, but still stored in the cache", "test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async = MagicMock()", "assert not VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue}, include_regexes, include_only_marked ) #", "[]) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False", "test_check(vsphere, instance): \"\"\" Test the check() method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache expiration times, don't", "= vsphere._get_all_objs(server_instance, regex, False, []) assert len(obj_list[vim.VirtualMachine]) == 1 assert", "propagated init_config = { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2", "# Powered off - vm4 ``` \"\"\" # Samples with", "assert { \"mor_type\": \"datacenter\", \"mor\": mocked_datacenter, \"hostname\": None, \"tags\": [\"vsphere_folder:unknown\",", "None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) ==", "datastore. assertMOR(vsphere, instance, count=8) # ...on hosts assertMOR(vsphere, instance, spec=\"host\",", "vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value =", "= 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) ==", "__init__ time and we need to instantiate the check multiple", "assert len(obj_list[vim.VirtualMachine]) == 2 assert { \"mor_type\": \"vm\", \"mor\": vm_no_parent,", "\"mor\": mocked_host, \"hostname\": \"mocked_host\", \"tags\": [\"vsphere_type:host\"], } in obj_list[vim.HostSystem] assert", "log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance,", "included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes, None)", "Not OK! excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\") excluded_vm = MockedMOR(spec=\"VirtualMachine\", name=\"bar\")", "mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, mocked_datastore: {}, mocked_cluster: {\"name\": \"cluster\"},", "= [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert", "from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) assert not", "len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility", "MagicMock(missingSet=None, obj=\"obj\") result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log", "\"\"\" * Exclude hosts/vms not compliant with the user's `*_include`", "hostname not available assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere, instance,", "MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John',", "instance[\"excluded_host_tags\"] = [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance])", "status changed on this alarm, it was gray and it's", "mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) # Default value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"]", "attributes and that we handle the case were there are", "alarm_event(from_status='gray', message='Went from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock']", "'vsphere_type:host', ] def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {},", "name='host2') dc = MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest =", "len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere, instance): \"\"\" Explore the vCenter", "MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log = MagicMock() vsphere.log =", "vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc", "= vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2')", "instantiate the check multiple times check = VSphereCheck('vsphere', {}, {},", "now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\") dc_arg", "default value and precedence of instance config over init config", "dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds,", "check._instance_key(instance) # first run should always cache assert check._should_cache(instance, CacheConfig.Morlist)", "= MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"unknown\"} vsphere.in_compatibility_mode", "# Discover hosts and virtual machines instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance)", "def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter", "include_regexes = {'host_include': \"f[o]+\", 'vm_include': \"f[o]+\"} # OK included_host =", "res_type) for res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object", "1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance[\"all_metrics\"] = False del instance[\"collection_level\"]", "[event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) assert not aggregator.events event", "datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc,", "import VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE", "\"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0] def", "\"mor\": mocked_datastore, \"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in obj_list[vim.Datastore]", "assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\", subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance): #", "\"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, } with", "= True # Discover hosts and virtual machines vsphere._cache_morlist_raw(instance) #", "sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag=\"vsphere_host:mocked_host\", count=1)", "vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance[\"all_metrics\"] = False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock()", "queue should be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for", "assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes =", "from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms':", "[instance]) assert check.excluded_host_tags == [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]},", "\"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): \"\"\"", "instance[\"all_metrics\"] = False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance)", "assertMOR(vsphere, instance, name=\"vm1\", spec=\"vm\", subset=True) assertMOR(vsphere, instance, name=\"vm2_guest\", spec=\"vm\", subset=True)", "VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"]", "SmartConnect succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect = Exception()", "# query_specs parameter should be a list of size 1", "to retrieve property %s for object %s: %s', 'prop', 'obj',", "machines vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored VM + 2 hosts", "precedence of instance config over init config check = VSphereCheck('vsphere',", "None, False, []) assert len(obj_list[vim.VirtualMachine]) == 2 assert { \"mor_type\":", "[]) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def test_check(vsphere, instance): \"\"\" Test the", "== 1 assert { \"mor_type\": \"datacenter\", \"mor\": mocked_datacenter, \"hostname\": None,", "launched a hot migration of this virtual machine\", exact_match=False, tags=[", "[\"vsphere_host\"] mocked_vm = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = {", "Gray to Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}", "vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags()", "= vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status", "MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = { mocked_vm: { \"name\": \"mocked_vm\", \"parent\": mocked_host,", "Not OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []},", "instance, some params are set at # __init__ time and", "BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP,", "test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock() instance[\"collection_level\"] = 2 assert not", "= \"rollup\" assert vsphere.format_metric_name(counter, compatibility=True) == \"group.name\" for rollup, short_rollup", "'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value =", "datacenters are not collected for call_args in vsphere._collect_metrics_async.call_args_list: # query_specs", "is properly propagated init_config = { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL,", "1 def test__cache_morlist_raw(vsphere, instance): \"\"\" Explore the vCenter infrastructure to", "VMs assertMOR(vsphere, instance, spec=\"vm\", count=1) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\",", "include_regexes = None include_only_marked = True # OK included_vm =", "the last execution time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now", "= alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value =", "test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event =", "= MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = { mocked_vm: {", "server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value", "} in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test that we", "spec=\"vm\", count=1) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\",", "'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1',", "obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"host\", \"mor\":", "'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock',", "SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import (", "\"\"\"Test that all vms belonging to a filtered host are", "to discover hosts, virtual machines. Input topology: ``` rootFolder -", "mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = { mocked_vm: { \"name\": \"mocked_vm\",", "Must define a unique 'name' per vCenter instance VSphereCheck('vsphere', {},", "tags are excluded from external host metadata, but still stored", "== [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ]", "vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None, obj=\"obj\") result = MagicMock(token=None,", "use fixtures for the check instance, some params are set", "\"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1 assert {", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags =", "included_vm.name}, include_regexes, None) # Not OK! excluded_host = MockedMOR(spec=\"HostSystem\", name=\"bar\")", "that we don't raise KeyError if the property collector failed", "[\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0] assert { \"mor_type\": \"host\", \"mor\":", "vsphere.check(instance) assert not aggregator.events event = alarm_event(from_status='green', to_status='gray', message='Went from", "at # __init__ time and we need to instantiate the", "aggregator): # Check default value and precedence of instance config", "vim from datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig from", "property collector failed to collect some attributes and that we", "dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore')", "\"vsphere_datacenter:datacenter\", \"vsphere_type:datacenter\"], } in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1 assert", "= log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) == 1", "MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm =", "source_tags[\"vsphere\"] for tag in tags: assert \"vsphere_host:\" not in tag", "datetime import mock import pytest from mock import MagicMock from", "== 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval ==", "init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size':", "= {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"John has launched a hot", "are excluded from external host metadata, but still stored in", "assert check.excluded_host_tags == [\"vsphere_host\"] instance[\"excluded_host_tags\"] = [] check = VSphereCheck('vsphere',", "\"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0]", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms':", "compatibility=True) == \"group.name\" for rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType =", "test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value =", "non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = { filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\":", "%s for object %s: %s', 'prop', 'obj', 'fault') assert len(mor_attrs)", "assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only for call_args in", "simulate previous runs, set the last execution time in the", "collect some attributes and that we handle the case were", "{ \"name\": \"this_vm_is_not_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": non_filtered_host, }, } regex", "= mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\":", "Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance)", "instance): \"\"\" Test that we check for errors when collecting", "{ \"mor_type\": \"cluster\", \"mor\": mocked_cluster, \"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"],", "vsphere_host tag still in cache for sending with metrics aggregator.assert_metric('vsphere.mymetric',", "sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count", "= check._instance_key(instance) assert check.time_started > 0 assert not check.server_instances assert", "[\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): \"\"\" Test that", "\"mor\": mocked_cluster, \"hostname\": None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource]", "configuration is properly propagated init_config = { 'refresh_morlist_interval': 2 *", "server_instance = vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\") non_filtered_host", "vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache =", "vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"foo\") def test_check(vsphere,", "test__is_excluded(): \"\"\" * Exclude hosts/vms not compliant with the user's", "== [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert", "in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): \"\"\" Test that we check", "Assertions: 1 labeled+monitored VM + 2 hosts + 2 datacenters", "times check = VSphereCheck('vsphere', {}, {}, [instance]) i_key = check._instance_key(instance)", "all vms belonging to a filtered host are also filtered\"\"\"", "exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance", "obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve", "red.\", tags=['foo:bar'] ) event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow", "MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() # vsphere_host", "vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg,", "vsphere.log = MagicMock() instance[\"collection_level\"] = 2 assert not vsphere.in_compatibility_mode(instance) instance[\"all_metrics\"]", "= vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value =", "compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock() instance[\"collection_level\"] = 2", "server_instance = vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent", "the user's `*_include` configuration. * Exclude \"non-labeled\" virtual machines when", "= result log = MagicMock() vsphere.log = log mor_attrs =", "len(mor_attrs) == 1 obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance)", "test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def", "processing\", vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1 i_key = vsphere._instance_key(instance) with", "[ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE]", "\"vm\", \"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in", "user configuration instructs to. \"\"\" # Sample(s) include_regexes = {'host_include':", "acts as expected \"\"\" vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"] = False", "= vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter = MagicMock() counter.rollupType =", "instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover", "machines. Input topology: ``` rootFolder - datacenter1 - compute_resource1 -", "set at # __init__ time and we need to instantiate", "# OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert not VSphereCheck._is_excluded(", "vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore", "entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from', from_status)", "assert { \"mor_type\": \"datastore\", \"mor\": mocked_datastore, \"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\",", "Inc. 2010-2017 # All rights reserved # Licensed under Simplified", "'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder',", "with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset()", "non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], } == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere,", "handle the case were there are missing attributes \"\"\" server_instance", "'from', from_status) # noqa: B009 return event def migrated_event(): now", "def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with", "from pyVmomi import vim from datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config", "[ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere, instance,", "VM + 2 hosts + 2 datacenters + 2 clusters", "message='Went from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) assert", "it was yellow and it's now gray.\", tags=['foo:bar'], alert_type='info', )", "alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance =", "Input topology: ``` rootFolder - datacenter1 - compute_resource1 - host1", "'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] == [", "queue is not initialized yet for instance %s, skipping processing\",", "= [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_type:host\", ]", "CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache expiration times,", "infrastructure to discover hosts, virtual machines. Input topology: ``` rootFolder", "\"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert { \"mor_type\": \"vm\",", "datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from', from_status) # noqa: B009 return", "] assertMOR(vsphere, instance, name=\"host3\", spec=\"host\", tags=tags) # ...on VMs assertMOR(vsphere,", "check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch(\"time.time\", return_value=now):", "\"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance)", "vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6 # One for each", "{ mocked_vm: { \"name\": \"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, },", "= [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:rootFolder\", \"vsphere_datacenter:datacenter1\", \"vsphere_compute:compute_resource1\", \"vsphere_cluster:compute_resource1\", \"vsphere_type:host\", ] assertMOR(vsphere,", "for each VM/host, datacenters are not collected for call_args in", "= MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes, include_only_marked) def", "spec=\"vm\", count=3) # Fallback on VM name when guest hostname", "mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = { vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\":", "def test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache", "'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE]", "test_use_guest_hostname(vsphere, instance): # Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch(", "'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE]", "vm = MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec=\"Datacenter\") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')", "'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator, instance): check", "is 1 assert len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance)", "the check instance, some params are set at # __init__", "1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance[\"all_metrics\"]", "vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]", "vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\")", "= [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value", "6 # One for each VM/host, datacenters are not collected", "1 i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type)", "MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache =", "MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6 #", "changed on this alarm, it was green and it's now", "\"mor_type\": \"vm\", \"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], }", "+ 2 datacenters + 2 clusters + 1 datastore. assertMOR(vsphere,", "vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't", "return event def migrated_event(): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine',", "Discover hosts and virtual machines instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere,", ") def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) == \"vsphere_mock\" del instance['name']", "be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in", "tags: assert \"vsphere_host:\" not in tag break # vsphere_host tag", "not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes, None) # Not OK! excluded_host", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went from", ") aggregator.reset() # SmartConnect succeeds, CurrentTime fails server = MagicMock()", "vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) assert not aggregator.events event =", "= MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes, None)", "{ \"mor_type\": \"host\", \"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"], }", "assert VSphereCheck._is_excluded(excluded_host, {\"name\": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name},", "= MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg", "== [] check = VSphereCheck('vsphere', {\"excluded_host_tags\": [\"vsphere_host\"]}, {}, [instance]) assert", "[instance]) # simulate previous runs, set the last execution time", "None include_only_marked = True # OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\",", "was green and it's now red.\", tags=['foo:bar'] ) def test_events_tags(aggregator,", "def test__should_cache(instance): now = time.time() # do not use fixtures", "datacenters, 2 clusters, 1 datastore def test_collect_realtime_only(vsphere, instance): \"\"\" Test", "for call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should be a", "all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3',", "on this alarm, it was yellow and it's now gray.\",", "machines when the user configuration instructs to. \"\"\" # Sample(s)", "\"datastore\", \"mor\": mocked_datastore, \"hostname\": None, \"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in", "Discover hosts and virtual machines vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored", "obj_list[vim.VirtualMachine] assert { \"mor_type\": \"vm\", \"mor\": vm_host_parent, \"hostname\": \"unknown\", \"tags\":", "{}, {}, [{'': ''}]) init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval':", "RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server", "MagicMock() instance[\"collect_realtime_only\"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called", "False, []) assert len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem]) == 1", "a unique 'name' per vCenter instance VSphereCheck('vsphere', {}, {}, [{'':", "assert { \"mor_type\": \"vm\", \"mor\": vm_host_parent, \"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\",", "\"\"\" Test that we don't raise KeyError if the property", "1 assert { \"mor_type\": \"vm\", \"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\", \"tags\":", "== obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): \"\"\" Test that we don't", "from datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common", "tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect succeeds, CurrentTime fails server =", "vm_no_parent = MockedMOR(spec=\"VirtualMachine\") vm_no_powerstate = MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host", "{\"name\": \"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\",", "VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue}, include_regexes, include_only_marked ) # Not OK", "0 # realtime only for call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs", "vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) == 1 obj.missingSet = [MagicMock(path=\"prop\", fault=\"fault\")]", "assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host',", "def test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event", "vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only for call_args in vsphere._process_mor_objects_queue_async.call_args_list:", "not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance[\"collection_level\"]", "we need to instantiate the check multiple times check =", "included_host = MockedMOR(spec=\"HostSystem\", name=\"foo\") included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not", "alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event =", "filtered\"\"\" server_instance = vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm = MockedMOR(spec=\"VirtualMachine\")", "assert len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key,", "now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm)", "Not labeled - vm2 # Filtered out - vm3 #", "disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def test__init__(instance): with", "mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\": \"datacenter\"},", "MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = {", "collected for call_args in vsphere._collect_metrics_async.call_args_list: # query_specs parameter should be", "{\"name\": \"unknown\"} vsphere.in_compatibility_mode = MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value =", "server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter", "vsphere._get_all_objs(server_instance, regex, False, []) assert len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem])", "'fault') assert len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere, instance): \"\"\" Explore", "assertMOR(vsphere, instance, spec=\"vm\", count=1) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\",", "with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance)", "return event def test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance =", "\"vm\", \"mor\": vm_host_parent, \"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], }", "%s: %s', 'prop', 'obj', 'fault') assert len(mor_attrs) == 1 def", "name=\"vm4\", spec=\"vm\", subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance): # Default value", "server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) ==", "vsphere.get_external_host_tags() # vsphere_host tag not in external metadata for host,", "\"unknown\"} vsphere.in_compatibility_mode = MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value = True", "= MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = { vm_no_parent: {\"name\":", "check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache expiration times, don't use", "message='Went from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] =", "vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\": \"kb\"} vsphere.in_compatibility_mode", "], ) server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event(", "check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now", "\"hostname\": \"this_vm_is_not_filtered\", \"tags\": [\"vsphere_host:non_filtered_host_number_1\", \"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0] assert {", "[MagicMock(value=[result])] mor = {\"hostname\": \"foo\"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value =", "objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log = MagicMock() vsphere.log = log", "result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)]", "instance, spec=\"vm\", count=1) tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\",", "'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] == [", "assertMOR(vsphere, instance, name=\"host3\", spec=\"host\", tags=tags) # ...on VMs assertMOR(vsphere, instance,", "\"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) # Default value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] #", "check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect')", "tag break # vsphere_host tag still in cache for sending", "return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4]", "with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) # Default", "included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True) assert not VSphereCheck._is_excluded( included_vm, {\"customValue\":", "1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock() instance[\"collection_level\"]", "== 1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode", "it's now gray.\", tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere, instance):", "check = VSphereCheck('vsphere', {}, {}, [instance]) assert check.excluded_host_tags == []", "VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance): check =", "assert len(obj_list[vim.Datastore]) == 1 assert { \"mor_type\": \"datastore\", \"mor\": mocked_datastore,", "count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect succeeds, CurrentTime fails server", "event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value", "vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value", "= vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property %s for object %s:", "== 1 assert { \"mor_type\": \"host\", \"mor\": mocked_host, \"hostname\": \"mocked_host\",", "( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils import MockedMOR,", "'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock',", "check = VSphereCheck('vsphere', {}, {}, [instance]) i_key = check._instance_key(instance) #", "= alarm_event(from_status='gray', message='Went from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value = [event]", "VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name},", "import SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import", "check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance): assert", "assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host',", "value and precedence of instance config over init config check", "test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter =", "config over init config check = VSphereCheck('vsphere', {}, {}, [instance])", "vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance)", "not compliant with the user's `*_include` configuration. * Exclude \"non-labeled\"", "server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on", "[instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect =", "vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\",", "instance): # Default value with mock.patch(\"datadog_checks.vsphere.VSphereCheck._get_all_objs\") as mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\"", "\"\"\" Test that we check for errors when collecting properties", "non_filtered_host = MockedMOR(spec=\"HostSystem\") non_filtered_vm = MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = { filtered_host:", "``` \"\"\" # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance[\"host_include_only_regex\"] = \"host[2-9]\" instance[\"vm_include_only_regex\"]", "assert not VSphereCheck._is_excluded(included_vm, {\"name\": included_vm.name}, include_regexes, None) # Not OK!", "= check._instance_key(instance) # first run should always cache assert check._should_cache(instance,", "\"name\": \"this_vm_is_filtered\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, \"runtime.host\": filtered_host, }, non_filtered_host: {\"name\": \"non_filtered_host_number_1\",", "metadata, but still stored in the cache for metrics vsphere.excluded_host_tags", "} == obj_list[vim.VirtualMachine][0] assert { \"mor_type\": \"host\", \"mor\": non_filtered_host, \"hostname\":", "all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3',", "MockedMOR(spec=\"VirtualMachine\") vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\")", "mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK,", "event = alarm_event(from_status='gray', message='Went from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value =", "2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check =", "REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils import MockedMOR, assertMOR, disable_thread_pool,", "\"tags\": [\"vsphere_datastore:unknown\", \"vsphere_type:datastore\"], } in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1", "are missing attributes \"\"\" server_instance = vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec=\"VirtualMachine\")", "\"tags\": [\"vsphere_type:host\"], } in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1 assert", "do not use fixtures for the check instance, some params", "1 since the batch size is 1 assert len(call_args[0][1]) ==", "del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now = time.time()", ") server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter", "parameter acts as expected \"\"\" vsphere._process_mor_objects_queue_async = MagicMock() instance[\"collect_realtime_only\"] =", "# do not use fixtures for the check instance, some", "dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2',", "name='dc2') ds = MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest =", "configuration instructs to. \"\"\" # Sample(s) include_regexes = {'host_include': \"f[o]+\",", "of this virtual machine\", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1',", "MagicMock from pyVmomi import vim from datadog_checks.vsphere import VSphereCheck from", "in cache for sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname=\"mocked_vm\", count=1)", "+ 2 hosts + 2 datacenters + 2 clusters +", "from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from", "compute_resource2 - host3 - vm1 # Not labeled - vm2", "# configuration is properly propagated init_config = { 'refresh_morlist_interval': 2", "len(obj_list[vim.VirtualMachine]) == 2 assert { \"mor_type\": \"vm\", \"mor\": vm_no_parent, \"hostname\":", "include_only_marked = True # OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\", label=True)", "obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): \"\"\" Test that we don't raise", "fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL,", "vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed", "== 1 instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for", "last execution time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now -", "Compatibility mode instance[\"all_metrics\"] = False del instance[\"collection_level\"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter =", "so we also test # configuration is properly propagated init_config", "= rollup assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance): with", "\"vsphere_type:vm\"], } == obj_list[vim.VirtualMachine][0] assert { \"mor_type\": \"host\", \"mor\": non_filtered_host,", "in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object queue should be", "vsphere._process_mor_objects_queue(instance) # Called once to process the 2 datacenters, then", "vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance)", "= [\"vcenter_server:vsphere_mock\", \"vcenter_host:None\", \"foo:bar\"] def test__init__(instance): with pytest.raises(BadConfigError): # Must", "include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): \"\"\"Test that all vms belonging", "mocked_mors_attrs = { vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {\"name\":", "test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect')", "assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest hostname instance[\"use_guest_hostname\"] = True", "mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None, obj=\"obj\") result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value", "for res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 0", "OK included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert VSphereCheck._is_excluded(included_vm, {\"customValue\": []}, include_regexes,", "vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert", "host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc,", "included_vm = MockedMOR(spec=\"VirtualMachine\", name=\"foo\") assert not VSphereCheck._is_excluded(included_host, {\"name\": included_host.name}, include_regexes,", "vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored VM + 2 hosts +", "mock import MagicMock from pyVmomi import vim from datadog_checks.vsphere import", "instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance)", "mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value =", "vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock',", "{ filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\": None}, filtered_vm: { \"name\": \"this_vm_is_filtered\",", "instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock()", "rollup assert vsphere.format_metric_name(counter) == \"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):", "mock_get_all_objs, mock.patch( \"datadog_checks.vsphere.vsphere.vmodl\" ): vsphere._cache_morlist_raw(instance) # Default value assert not", ") setattr(event, 'from', from_status) # noqa: B009 return event def", "VSphereCheck._is_excluded(excluded_vm, {\"name\": excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes = None", "{\"name\": \"mocked_host\", \"parent\": None}, mocked_datastore: {}, mocked_cluster: {\"name\": \"cluster\"}, mocked_datacenter:", "# use guest hostname instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"]", "spec=\"vm\", subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async =", "# Test host tags are excluded from external host metadata,", "Queue hasn't been initialized vsphere.log.debug.assert_called_once_with( \"Objects queue is not initialized", "mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\": \"mocked_host\", \"parent\": None}, }", "MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_mors_attrs = { mocked_vm: { \"name\":", "hot migration of this virtual machine\", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1',", "[]) assert len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem]) == 1 assert", "\"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\", ] assertMOR(vsphere, instance, name=\"vm4\", spec=\"vm\",", ") vsphere.batch_morlist_size = 1 i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance)", "instance): assert vsphere._instance_key(instance) == \"vsphere_mock\" del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance)", "[alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status", "the check() method \"\"\" with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as", "filtered host are also filtered\"\"\" server_instance = vsphere._get_server_instance(instance) filtered_host =", "not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key)", "== 3 instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance)", "import MagicMock from pyVmomi import vim from datadog_checks.vsphere import VSphereCheck", "= MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor =", "hosts and virtual machines instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance,", "[alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this alarm,", "from datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError, ConnectionError from", "= MockedMOR(spec=\"ClusterComputeResource\") mocked_mors_attrs = { vm_no_parent: {\"name\": \"vm_no_parent\", \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn},", "are not collected for call_args in vsphere._collect_metrics_async.call_args_list: # query_specs parameter", "== \"vsphere_mock\" del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now", "now = time.time() # do not use fixtures for the", "[\"vsphere_host\"]}, {}, [instance]) assert check.excluded_host_tags == [] # Test host", "# realtime only for call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter", "# Sample(s) include_regexes = None include_only_marked = True # OK", "from datadog_checks.vsphere.errors import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL,", "a filtered host are also filtered\"\"\" server_instance = vsphere._get_server_instance(instance) filtered_host", "len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance[\"all_metrics\"] = False", "assertMOR(vsphere, instance, spec=\"vm\", count=3) # Fallback on VM name when", "with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME,", "assert { \"mor_type\": \"host\", \"mor\": non_filtered_host, \"hostname\": \"non_filtered_host_number_1\", \"tags\": [\"vsphere_type:host\"],", "== -42 assert check.clean_morlist_interval == 50 assert len(check.event_config) == 1", "1 assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"vm\", \"mor\":", "vm_host_parent = MockedMOR(spec=\"VirtualMachine\") mocked_host = MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter", "from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms':", "and it's now red.\", tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere, instance):", "in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime", "* REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with", "server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache", "MockedMOR(spec=\"HostSystem\") mocked_datastore = MockedMOR(spec=\"Datastore\") mocked_datacenter = MockedMOR(spec=\"Datacenter\") mocked_cluster = MockedMOR(spec=\"ClusterComputeResource\")", "tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) == \"vsphere_mock\" del", "vsphere._process_mor_objects_queue(instance) # Queue hasn't been initialized vsphere.log.debug.assert_called_once_with( \"Objects queue is", "message='Went from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event(", "rootFolder - datacenter1 - compute_resource1 - host1 # Filtered out", "\"name\": \"mocked_vm\", \"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {\"name\": \"mocked_host\",", "tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance", "assert vsphere._collect_metrics_async.call_count == 6 # One for each VM/host, datacenters", "vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count ==", "= vsphere._get_server_instance(instance) with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): obj = MagicMock(missingSet=None, obj=\"obj\") result =", "2 assert { \"mor_type\": \"vm\", \"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\":", "{}, [{'': ''}]) init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42,", "not in external metadata for host, source_tags in ext_host_tags: if", "vsphere._process_mor_objects_queue(instance) # Object queue should be empty after processing assert", "mocked_datacenter: {\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\": \"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\",", "aggregator): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4]", "datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere", "assert check.excluded_host_tags == [] def test_excluded_host_tags(vsphere, instance, aggregator): # Check", "= get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere,", "None) # Sample(s) include_regexes = None include_only_marked = True #", "server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor", "disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect", "\"unit\": \"kb\"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, [])", "name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message,", "MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\": \"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list", "Test that we don't raise KeyError if the property collector", "red.\", tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance", "server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) assert not", "- datacenter1 - compute_resource1 - host1 # Filtered out -", "vsphere.log.warning.assert_called_once() del instance[\"collection_level\"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True)", "not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red',", "\"name\": \"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None,", "3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1", "unique 'name' per vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}])", "also filtered\"\"\" server_instance = vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec=\"HostSystem\") filtered_vm =", "obj = MagicMock(missingSet=None, obj=\"obj\") result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value =", "True} vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed on this alarm,", "Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event( \"vCenter monitor status changed", "[MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {\"name\": \"mymetric\", \"unit\":", "{}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails", "{\"parent\": MockedMOR(spec=\"Folder\", name=\"unknown folder\"), \"name\": \"datacenter\"}, } with mock.patch(\"datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes\", return_value=mocked_mors_attrs):", "\"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1 assert", "= MagicMock() # New way instance[\"collection_level\"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value =", "'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] == [", "in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1 assert { \"mor_type\": \"host\",", "== 1 assert { \"mor_type\": \"vm\", \"mor\": non_filtered_vm, \"hostname\": \"this_vm_is_not_filtered\",", "break # vsphere_host tag still in cache for sending with", "there are missing attributes \"\"\" server_instance = vsphere._get_server_instance(instance) vm_no_parent =", "\"vsphere_type:host\", ] assertMOR(vsphere, instance, name=\"host3\", spec=\"host\", tags=tags) # ...on VMs", "a list of size 1 since the batch size is", "host metadata, but still stored in the cache for metrics", "def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async =", "alarm, it was yellow and it's now gray.\", tags=['foo:bar'], alert_type='info',", "== 0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance))", "vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1][\"use_guest_hostname\"] with mock.patch(\"datadog_checks.vsphere.vsphere.vmodl\"): # Discover hosts and virtual", "== [] # Test host tags are excluded from external", "assert { \"mor_type\": \"vm\", \"mor\": vm_no_parent, \"hostname\": \"vm_no_parent\", \"tags\": [\"vsphere_host:unknown\",", "'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere,", "# Default value assert not mock_get_all_objs.call_args[1][\"use_guest_hostname\"] # use guest hostname", "{ \"mor_type\": \"vm\", \"mor\": vm_host_parent, \"hostname\": \"unknown\", \"tags\": [\"vsphere_host:mocked_host\", \"vsphere_host:unknown\",", "mock.patch(\"time.time\", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata)", "'prop', 'obj', 'fault') assert len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere, instance):", "mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) ==", "tags = [ \"vcenter_server:vsphere_mock\", \"vsphere_folder:folder1\", \"vsphere_datacenter:datacenter2\", \"vsphere_compute:compute_resource2\", \"vsphere_cluster:compute_resource2\", \"vsphere_host:host3\", \"vsphere_type:vm\",", "'vsphere_datacenter:dc2', ], ) server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance)", "this alarm, it was green and it's now red.\", tags=['foo:bar']", "included_vm, {\"customValue\": included_vm.customValue}, include_regexes, include_only_marked ) # Not OK included_vm", "- vm3 # Powered off - vm4 ``` \"\"\" #", "[\"vsphere_host:mocked_host\", \"vsphere_host:unknown\", \"vsphere_type:vm\"], } in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1", "instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock']", "user's `*_include` configuration. * Exclude \"non-labeled\" virtual machines when the", "= [MagicMock(value=[result])] mor = {\"hostname\": \"foo\"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value", "server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green', message='Went from Gray", "False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once to process", "name=\"foo\", label=True) assert not VSphereCheck._is_excluded( included_vm, {\"customValue\": included_vm.customValue}, include_regexes, include_only_marked", "\"group.name.{}\".format(short_rollup) def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async", "MockedMOR(spec=\"VirtualMachine\") mocked_mors_attrs = { filtered_host: {\"name\": \"filtered_host_number_1\", \"parent\": None}, filtered_vm:", "SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance)", "reserved # Licensed under Simplified BSD License (see LICENSE) from", "{}, [instance]) i_key = check._instance_key(instance) # first run should always", "assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) #", "the datastore assert vsphere._process_mor_objects_queue_async.call_count == 3 instance[\"collect_realtime_only\"] = True vsphere._process_mor_objects_queue_async.reset_mock()", "} in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1 assert { \"mor_type\":", "virtual machines instance[\"use_guest_hostname\"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec=\"vm\", count=3)", "Check default value and precedence of instance config over init", "size 1 since the batch size is 1 assert len(call_args[0][1])", "vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter = MagicMock()", "when collecting properties with property collector \"\"\" server_instance = vsphere._get_server_instance(instance)", "CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some error'):", "to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from', from_status) # noqa: B009", "hostname=\"foo\") def test_check(vsphere, instance): \"\"\" Test the check() method \"\"\"", "\"vm_no_powerstate\"}, vm_host_parent: {\"parent\": mocked_host, \"runtime.powerState\": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {\"name\": \"mocked_host\", \"parent\":", "datacenter1 - compute_resource1 - host1 # Filtered out - host2", "vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance)", ") def test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance)", "vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event(", "None, \"tags\": [\"vsphere_cluster:cluster\", \"vsphere_type:cluster\"], } in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance):", "vsphere.format_metric_name(counter, compatibility=True) == \"group.name\" for rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType", "Called once to process the 2 datacenters, then 2 clusters,", "tags=['foo:bar'] ) event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to", "len(call_args[0][1]) == 1 instance[\"collect_realtime_only\"] = False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type)", "per vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config =" ]
[ "}}</em>.<br> If you have additional questions or wish to no", "the errors before making a new submission{% endif %}.<br> If", "key not set in environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\"", "loaded on {{ timestamp }}.<br> {% endif %} Please review", "team, DRC<br> <em>All of Us</em> Research Program<br> <img src=\"cid:{{ aou_logo", "data steward' # HPO contact list table columns SITE_NAME =", "table columns SITE_NAME = 'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT =", "folder in your GCS bucket <a href=\"{{ submission_folder_url }}\">here</a>.<br> For", "<p style=\"font-size:115%;\">Hi {{ site_name }},</p> <p style=\"font-size:115%;\">Your submission <b>{{ folder", "To view the full set of curation reports, please visit", "{% else %}was successfully loaded on {{ timestamp }}.<br> {%", "<a href=\"{{ ehr_ops_site_url }}\">EHR Ops website</a>.</p> <p style=\"font-size:115%;\">You are receiving", "{{ site_name }},</p> <p style=\"font-size:115%;\">Your submission <b>{{ folder }}</b> {%", "report attached to this email{% if submission_error %}<br> and resolve", "your files have not been successfully uploaded, please run the", "EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD", "these emails, please reply/send an email to <a href=\"mailto:{{ eo_zendesk", "you are listed as a point of contact for HPO", "wish to no longer receive these emails, please reply/send an", "or wish to no longer receive these emails, please reply/send", "\"\"\" SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS = 'EHR Ops'", "}}.<br> {% else %}was successfully loaded on {{ timestamp }}.<br>", "submission_error %}was NOT successfully loaded on {{ timestamp }}.<br> {%", "not been successfully uploaded, please run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file", "= \"\"\" <p style=\"font-size:115%;\">Hi {{ site_name }},</p> <p style=\"font-size:115%;\">Your submission", "point of contact for HPO Site <em>{{ site_name }}</em>.<br> If", "making a new submission{% endif %}.<br> If any of your", "Us</em> Research Program<br> <img src=\"cid:{{ aou_logo }}\"/></p> \"\"\" AOU_LOGO =", "Email content EMAIL_BODY = \"\"\" <p style=\"font-size:115%;\">Hi {{ site_name }},</p>", "are listed as a point of contact for HPO Site", "on the reports and how to download them, please refer", "constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email content", "%} Please review the <code>results.html</code> submission report attached to this", "to our <a href=\"{{ ehr_ops_site_url }}\">EHR Ops website</a>.</p> <p style=\"font-size:115%;\">You", "<b>{{ folder }}</b> {% if submission_error %}was NOT successfully loaded", "else %}was successfully loaded on {{ timestamp }}.<br> {% endif", "the full set of curation reports, please visit the submission", "email{% if submission_error %}<br> and resolve the errors before making", "HPO contact list table columns SITE_NAME = 'site_name' HPO_ID =", "UNSET_MANDRILL_API_KEY_MSG = f\"Mandrill API key not set in environment variable", "\"\"\" <p style=\"font-size:115%;\">Hi {{ site_name }},</p> <p style=\"font-size:115%;\">Your submission <b>{{", "folder }}</b> {% if submission_error %}was NOT successfully loaded on", "Ops website</a>.</p> <p style=\"font-size:115%;\">You are receiving this email because you", "please reply/send an email to <a href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk", "{% endif %} Please review the <code>results.html</code> submission report attached", "how to download them, please refer to our <a href=\"{{", "{{ timestamp }}.<br> {% else %}was successfully loaded on {{", "emails, please reply/send an email to <a href=\"mailto:{{ eo_zendesk }}\">{{", "questions or wish to no longer receive these emails, please", "additional questions or wish to no longer receive these emails,", "been successfully uploaded, please run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a>", "}}\">EHR Ops website</a>.</p> <p style=\"font-size:115%;\">You are receiving this email because", "DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD = 'no data", "{MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\" SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS", "<p style=\"font-size:115%;\">EHR Ops team, DRC<br> <em>All of Us</em> Research Program<br>", "submission report attached to this email{% if submission_error %}<br> and", "successfully loaded on {{ timestamp }}.<br> {% endif %} Please", "any of your files have not been successfully uploaded, please", "email because you are listed as a point of contact", "endif %}.<br> If any of your files have not been", "CONTACT_LIST_QUERY = \"\"\" SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS =", "reports, please visit the submission folder in your GCS bucket", "<a href=\"{{ submission_folder_url }}\">here</a>.<br> For more information on the reports", "}}\">here</a>.<br> For more information on the reports and how to", "eo_zendesk }}\">{{ eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR Ops team, DRC<br> <em>All", "because you are listed as a point of contact for", "to <a href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR Ops", "no longer receive these emails, please reply/send an email to", "review the <code>results.html</code> submission report attached to this email{% if", "in environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\" SELECT * FROM", "= '<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD =", "%}.<br> If any of your files have not been successfully", "a new submission{% endif %}.<br> If any of your files", "endif %} Please review the <code>results.html</code> submission report attached to", "'<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD = 'no data steward' #", "environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\" SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}`", "= '<EMAIL>' NO_DATA_STEWARD = 'no data steward' # HPO contact", "check</a> before making your submission.<br> To view the full set", "to no longer receive these emails, please reply/send an email", "\"\"\" EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV =", "}}.<br> {% endif %} Please review the <code>results.html</code> submission report", "this email because you are listed as a point of", "src=\"cid:{{ aou_logo }}\"/></p> \"\"\" AOU_LOGO = 'aou_logo' AOU_LOGO_PNG = 'all-of-us-logo.png'", "the submission folder in your GCS bucket <a href=\"{{ submission_folder_url", "in your GCS bucket <a href=\"{{ submission_folder_url }}\">here</a>.<br> For more", "Research Program<br> <img src=\"cid:{{ aou_logo }}\"/></p> \"\"\" AOU_LOGO = 'aou_logo'", "download them, please refer to our <a href=\"{{ ehr_ops_site_url }}\">EHR", "Ops team, DRC<br> <em>All of Us</em> Research Program<br> <img src=\"cid:{{", "EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY = \"\"\" <p", "href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR Ops team, DRC<br>", "If you have additional questions or wish to no longer", "Site <em>{{ site_name }}</em>.<br> If you have additional questions or", "steward' # HPO contact list table columns SITE_NAME = 'site_name'", "GCS bucket <a href=\"{{ submission_folder_url }}\">here</a>.<br> For more information on", "bucket <a href=\"{{ submission_folder_url }}\">here</a>.<br> For more information on the", "<code>results.html</code> submission report attached to this email{% if submission_error %}<br>", "contact for HPO Site <em>{{ site_name }}</em>.<br> If you have", "= \"\"\" SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS = 'EHR", "submission_error %}<br> and resolve the errors before making a new", "curation reports, please visit the submission folder in your GCS", "* FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK =", "as a point of contact for HPO Site <em>{{ site_name", "}}\">{{ eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR Ops team, DRC<br> <em>All of", "not set in environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\" SELECT", "= f\"Mandrill API key not set in environment variable {MANDRILL_API_KEY}\"", "'<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD = 'no", "the <code>results.html</code> submission report attached to this email{% if submission_error", "ehr_ops_site_url }}\">EHR Ops website</a>.</p> <p style=\"font-size:115%;\">You are receiving this email", "errors before making a new submission{% endif %}.<br> If any", "%}was NOT successfully loaded on {{ timestamp }}.<br> {% else", "= 'EHR Ops' EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS", "please visit the submission folder in your GCS bucket <a", "timestamp }}.<br> {% else %}was successfully loaded on {{ timestamp", "{{ timestamp }}.<br> {% endif %} Please review the <code>results.html</code>", "longer receive these emails, please reply/send an email to <a", "successfully uploaded, please run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before", "NOT successfully loaded on {{ timestamp }}.<br> {% else %}was", "variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\" SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\"", "and how to download them, please refer to our <a", "are receiving this email because you are listed as a", "of your files have not been successfully uploaded, please run", "# HPO contact list table columns SITE_NAME = 'site_name' HPO_ID", "to this email{% if submission_error %}<br> and resolve the errors", "<em>All of Us</em> Research Program<br> <img src=\"cid:{{ aou_logo }}\"/></p> \"\"\"", "reports and how to download them, please refer to our", "uploaded, please run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before making", "= 'no data steward' # HPO contact list table columns", "submission{% endif %}.<br> If any of your files have not", "Ops' EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>'", "receiving this email because you are listed as a point", "HPO Site <em>{{ site_name }}</em>.<br> If you have additional questions", "%}<br> and resolve the errors before making a new submission{%", "please refer to our <a href=\"{{ ehr_ops_site_url }}\">EHR Ops website</a>.</p>", "'MANDRILL_API_KEY' UNSET_MANDRILL_API_KEY_MSG = f\"Mandrill API key not set in environment", "Please review the <code>results.html</code> submission report attached to this email{%", "making your submission.<br> To view the full set of curation", "on {{ timestamp }}.<br> {% else %}was successfully loaded on", "a point of contact for HPO Site <em>{{ site_name }}</em>.<br>", "an email to <a href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk }}</a>.</p> <p", "`{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV", "NO_DATA_STEWARD = 'no data steward' # HPO contact list table", "}}</a>.</p> <p style=\"font-size:115%;\">EHR Ops team, DRC<br> <em>All of Us</em> Research", "site_name }},</p> <p style=\"font-size:115%;\">Your submission <b>{{ folder }}</b> {% if", "MANDRILL_API_KEY = 'MANDRILL_API_KEY' UNSET_MANDRILL_API_KEY_MSG = f\"Mandrill API key not set", "email to <a href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR", "= '<EMAIL>' NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD = 'no data steward'", "submission.<br> To view the full set of curation reports, please", "Mandrill API constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' #", "more information on the reports and how to download them,", "site_name }}</em>.<br> If you have additional questions or wish to", "before making a new submission{% endif %}.<br> If any of", "you have additional questions or wish to no longer receive", "file check</a> before making your submission.<br> To view the full", "eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR Ops team, DRC<br> <em>All of Us</em>", "API key not set in environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY =", "NO_REPLY_ADDRESS = '<EMAIL>' NO_DATA_STEWARD = 'no data steward' # HPO", "style=\"font-size:115%;\">EHR Ops team, DRC<br> <em>All of Us</em> Research Program<br> <img", "Program<br> <img src=\"cid:{{ aou_logo }}\"/></p> \"\"\" AOU_LOGO = 'aou_logo' AOU_LOGO_PNG", "receive these emails, please reply/send an email to <a href=\"mailto:{{", "<img src=\"cid:{{ aou_logo }}\"/></p> \"\"\" AOU_LOGO = 'aou_logo' AOU_LOGO_PNG =", "= 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill API constants MAIL_TO", "<p style=\"font-size:115%;\">Your submission <b>{{ folder }}</b> {% if submission_error %}was", "of contact for HPO Site <em>{{ site_name }}</em>.<br> If you", "view the full set of curation reports, please visit the", "set of curation reports, please visit the submission folder in", "DRC<br> <em>All of Us</em> Research Program<br> <img src=\"cid:{{ aou_logo }}\"/></p>", "new submission{% endif %}.<br> If any of your files have", "SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill API constants MAIL_TO = 'mail_to'", "set in environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY = \"\"\" SELECT *", "them, please refer to our <a href=\"{{ ehr_ops_site_url }}\">EHR Ops", "of Us</em> Research Program<br> <img src=\"cid:{{ aou_logo }}\"/></p> \"\"\" AOU_LOGO", "successfully loaded on {{ timestamp }}.<br> {% else %}was successfully", "and resolve the errors before making a new submission{% endif", "your GCS bucket <a href=\"{{ submission_folder_url }}\">here</a>.<br> For more information", "submission <b>{{ folder }}</b> {% if submission_error %}was NOT successfully", "if submission_error %}was NOT successfully loaded on {{ timestamp }}.<br>", "loaded on {{ timestamp }}.<br> {% else %}was successfully loaded", "}},</p> <p style=\"font-size:115%;\">Your submission <b>{{ folder }}</b> {% if submission_error", "= 'MANDRILL_API_KEY' UNSET_MANDRILL_API_KEY_MSG = f\"Mandrill API key not set in", "to download them, please refer to our <a href=\"{{ ehr_ops_site_url", "our <a href=\"{{ ehr_ops_site_url }}\">EHR Ops website</a>.</p> <p style=\"font-size:115%;\">You are", "your submission.<br> To view the full set of curation reports,", "have not been successfully uploaded, please run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local", "SITE_NAME = 'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' #", "if submission_error %}<br> and resolve the errors before making a", "listed as a point of contact for HPO Site <em>{{", "before making your submission.<br> To view the full set of", "f\"Mandrill API key not set in environment variable {MANDRILL_API_KEY}\" CONTACT_LIST_QUERY", "columns SITE_NAME = 'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact'", "'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill API constants MAIL_TO =", "= 'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY = \"\"\" <p style=\"font-size:115%;\">Hi", "list table columns SITE_NAME = 'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT", "}}</b> {% if submission_error %}was NOT successfully loaded on {{", "the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before making your submission.<br> To", "'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY = \"\"\"", "resolve the errors before making a new submission{% endif %}.<br>", "'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill API", "<a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before making your submission.<br> To view", "href=\"{{ ehr_ops_site_url }}\">EHR Ops website</a>.</p> <p style=\"font-size:115%;\">You are receiving this", "FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK = '<EMAIL>'", "timestamp }}.<br> {% endif %} Please review the <code>results.html</code> submission", "SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` \"\"\" EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK", "MAIL_TO = 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY", "this email{% if submission_error %}<br> and resolve the errors before", "{% if submission_error %}was NOT successfully loaded on {{ timestamp", "For more information on the reports and how to download", "# Mandrill API constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload'", "= 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY =", "full set of curation reports, please visit the submission folder", "EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>'", "refer to our <a href=\"{{ ehr_ops_site_url }}\">EHR Ops website</a>.</p> <p", "have additional questions or wish to no longer receive these", "= 'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill", "href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before making your submission.<br> To view the", "'<EMAIL>' NO_DATA_STEWARD = 'no data steward' # HPO contact list", "on {{ timestamp }}.<br> {% endif %} Please review the", "of curation reports, please visit the submission folder in your", "information on the reports and how to download them, please", "HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill API constants", "files have not been successfully uploaded, please run the <a", "style=\"font-size:115%;\">You are receiving this email because you are listed as", "style=\"font-size:115%;\">Your submission <b>{{ folder }}</b> {% if submission_error %}was NOT", "href=\"{{ submission_folder_url }}\">here</a>.<br> For more information on the reports and", "submission_folder_url }}\">here</a>.<br> For more information on the reports and how", "EMAIL_BODY = \"\"\" <p style=\"font-size:115%;\">Hi {{ site_name }},</p> <p style=\"font-size:115%;\">Your", "the reports and how to download them, please refer to", "<a href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk }}</a>.</p> <p style=\"font-size:115%;\">EHR Ops team,", "please run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before making your", "If any of your files have not been successfully uploaded,", "website</a>.</p> <p style=\"font-size:115%;\">You are receiving this email because you are", "<p style=\"font-size:115%;\">You are receiving this email because you are listed", "reply/send an email to <a href=\"mailto:{{ eo_zendesk }}\">{{ eo_zendesk }}</a>.</p>", "style=\"font-size:115%;\">Hi {{ site_name }},</p> <p style=\"font-size:115%;\">Your submission <b>{{ folder }}</b>", "for HPO Site <em>{{ site_name }}</em>.<br> If you have additional", "'EHR Ops' EHR_OPS_ZENDESK = '<EMAIL>' DATA_CURATION_LISTSERV = '<EMAIL>' NO_REPLY_ADDRESS =", "run the <a href=\"https://github.com/all-of-us/aou-ehr-file-check\">local file check</a> before making your submission.<br>", "= 'site_point_of_contact' # Mandrill API constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL", "<em>{{ site_name }}</em>.<br> If you have additional questions or wish", "submission folder in your GCS bucket <a href=\"{{ submission_folder_url }}\">here</a>.<br>", "'no data steward' # HPO contact list table columns SITE_NAME", "'site_point_of_contact' # Mandrill API constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL =", "contact list table columns SITE_NAME = 'site_name' HPO_ID = 'hpo_id'", "'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY = \"\"\" <p style=\"font-size:115%;\">Hi {{", "# Email content EMAIL_BODY = \"\"\" <p style=\"font-size:115%;\">Hi {{ site_name", "attached to this email{% if submission_error %}<br> and resolve the", "API constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email", "visit the submission folder in your GCS bucket <a href=\"{{", "content EMAIL_BODY = \"\"\" <p style=\"font-size:115%;\">Hi {{ site_name }},</p> <p", "%}was successfully loaded on {{ timestamp }}.<br> {% endif %}" ]
[ "except Exception as ex: os.remove(TEMP_FILE) print('remove text failed.', ex) def", "clip_file: for text in clip_file.read().split('\\n'): if text.strip() == \"\": continue", "in clip_file.read().split('\\n'): print(text) def get_text(key): with open(CLIP_FILE, 'r') as clip_file:", "= os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE' def add_text(key, text): if", "args.add[1] add_text(key, value) elif args.list: list_texts() elif args.get: key =", "\"\\n\") def list_texts(): with open(CLIP_FILE, 'r') as clip_file: for text", "\"\": continue key_val = text.split(':') if key_val[0].strip() != key: temp_file.write(text+\"\\n\")", "Path CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE' def add_text(key,", "'a' else: open_mode = 'w+' with open(CLIP_FILE, open_mode) as clip_file:", "\": \" + text + \"\\n\") def list_texts(): with open(CLIP_FILE,", "parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args() if args.add: key, value", "as clip_file: clip_file.write(key + \": \" + text + \"\\n\")", "key_val[0].strip() == key: print(key_val[1].strip(), end='') def delete_text(key): exists = False", "'--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true') args =", "args.add: key, value = args.add[0], args.add[1] add_text(key, value) elif args.list:", "in clip_file.read().split('\\n'): key_val = text.split(':') if key_val[0].strip() == key: print(key_val[1].strip(),", "with open(TEMP_FILE, 'w+') as temp_file: with open(CLIP_FILE, 'r') as clip_file:", "key, value = args.add[0], args.add[1] add_text(key, value) elif args.list: list_texts()", "with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'): key_val", "in the clip store\") try: os.rename(TEMP_FILE, CLIP_FILE) except Exception as", "key_val = text.split(':') if key_val[0].strip() != key: temp_file.write(text+\"\\n\") else: exists", "continue key_val = text.split(':') if key_val[0].strip() != key: temp_file.write(text+\"\\n\") else:", "clip_file: for text in clip_file.read().split('\\n'): print(text) def get_text(key): with open(CLIP_FILE,", "= parser.parse_args() if args.add: key, value = args.add[0], args.add[1] add_text(key,", "from pathlib import Path CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE =", "TEMP_FILE = '.TEMP_FILE' def add_text(key, text): if os.path.exists(CLIP_FILE): open_mode =", "ex: os.remove(TEMP_FILE) print('remove text failed.', ex) def main(): parser =", "args.get: key = args.get[0] get_text(key) elif args.delete: key = args.delete[0]", "action='store_true') args = parser.parse_args() if args.add: key, value = args.add[0],", "saves texts from the command line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g',", "temp_file.write(text+\"\\n\") else: exists = True if not exists: print(\"key:\", key,", "if key_val[0].strip() == key: print(key_val[1].strip(), end='') def delete_text(key): exists =", "= '.TEMP_FILE' def add_text(key, text): if os.path.exists(CLIP_FILE): open_mode = 'a'", "args = parser.parse_args() if args.add: key, value = args.add[0], args.add[1]", "print(\"key:\", key, \"was not found in the clip store\") try:", "'w+') as temp_file: with open(CLIP_FILE, 'r') as clip_file: for text", "main(): parser = argparse.ArgumentParser(description='clips and saves texts from the command", "if key_val[0].strip() != key: temp_file.write(text+\"\\n\") else: exists = True if", "\" + text + \"\\n\") def list_texts(): with open(CLIP_FILE, 'r')", "'r') as clip_file: for text in clip_file.read().split('\\n'): if text.strip() ==", "add_text(key, text): if os.path.exists(CLIP_FILE): open_mode = 'a' else: open_mode =", "key: print(key_val[1].strip(), end='') def delete_text(key): exists = False with open(TEMP_FILE,", "text.split(':') if key_val[0].strip() == key: print(key_val[1].strip(), end='') def delete_text(key): exists", "+ \"\\n\") def list_texts(): with open(CLIP_FILE, 'r') as clip_file: for", "key, \"was not found in the clip store\") try: os.rename(TEMP_FILE,", "'.TEMP_FILE' def add_text(key, text): if os.path.exists(CLIP_FILE): open_mode = 'a' else:", "= 'a' else: open_mode = 'w+' with open(CLIP_FILE, open_mode) as", "in clip_file.read().split('\\n'): if text.strip() == \"\": continue key_val = text.split(':')", "list_texts(): with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'):", "as ex: os.remove(TEMP_FILE) print('remove text failed.', ex) def main(): parser", "found in the clip store\") try: os.rename(TEMP_FILE, CLIP_FILE) except Exception", "argparse.ArgumentParser(description='clips and saves texts from the command line') parser.add_argument('-a', '--add',", "args.list: list_texts() elif args.get: key = args.get[0] get_text(key) elif args.delete:", "with open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key + \": \" +", "for text in clip_file.read().split('\\n'): if text.strip() == \"\": continue key_val", "def get_text(key): with open(CLIP_FILE, 'r') as clip_file: for text in", "from the command line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get', nargs=1)", "open(TEMP_FILE, 'w+') as temp_file: with open(CLIP_FILE, 'r') as clip_file: for", "exists = True if not exists: print(\"key:\", key, \"was not", "and saves texts from the command line') parser.add_argument('-a', '--add', nargs=2)", "import Path CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE' def", "'--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args() if args.add:", "not found in the clip store\") try: os.rename(TEMP_FILE, CLIP_FILE) except", "= args.add[0], args.add[1] add_text(key, value) elif args.list: list_texts() elif args.get:", "argparse from pathlib import Path CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE", "= 'w+' with open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key + \":", "print(text) def get_text(key): with open(CLIP_FILE, 'r') as clip_file: for text", "key = args.delete[0] delete_text(key) else: parser.print_usage() if __name__ == '__main__':", "open_mode = 'w+' with open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key +", "command line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete',", "args.get[0] get_text(key) elif args.delete: key = args.delete[0] delete_text(key) else: parser.print_usage()", "delete_text(key): exists = False with open(TEMP_FILE, 'w+') as temp_file: with", "elif args.get: key = args.get[0] get_text(key) elif args.delete: key =", "clip_file.read().split('\\n'): print(text) def get_text(key): with open(CLIP_FILE, 'r') as clip_file: for", "= text.split(':') if key_val[0].strip() == key: print(key_val[1].strip(), end='') def delete_text(key):", "if text.strip() == \"\": continue key_val = text.split(':') if key_val[0].strip()", "parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l',", "parser = argparse.ArgumentParser(description='clips and saves texts from the command line')", "list_texts() elif args.get: key = args.get[0] get_text(key) elif args.delete: key", "elif args.delete: key = args.delete[0] delete_text(key) else: parser.print_usage() if __name__", "def main(): parser = argparse.ArgumentParser(description='clips and saves texts from the", "os.rename(TEMP_FILE, CLIP_FILE) except Exception as ex: os.remove(TEMP_FILE) print('remove text failed.',", "ex) def main(): parser = argparse.ArgumentParser(description='clips and saves texts from", "open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'): key_val =", "clip_file.read().split('\\n'): key_val = text.split(':') if key_val[0].strip() == key: print(key_val[1].strip(), end='')", "os.path.exists(CLIP_FILE): open_mode = 'a' else: open_mode = 'w+' with open(CLIP_FILE,", "clip_file: for text in clip_file.read().split('\\n'): key_val = text.split(':') if key_val[0].strip()", "open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key + \": \" + text", "for text in clip_file.read().split('\\n'): print(text) def get_text(key): with open(CLIP_FILE, 'r')", "<filename>clip/clip.py import os import argparse from pathlib import Path CLIP_FILE", "if not exists: print(\"key:\", key, \"was not found in the", "Exception as ex: os.remove(TEMP_FILE) print('remove text failed.', ex) def main():", "args.delete: key = args.delete[0] delete_text(key) else: parser.print_usage() if __name__ ==", "key_val[0].strip() != key: temp_file.write(text+\"\\n\") else: exists = True if not", "as clip_file: for text in clip_file.read().split('\\n'): key_val = text.split(':') if", "= True if not exists: print(\"key:\", key, \"was not found", "store\") try: os.rename(TEMP_FILE, CLIP_FILE) except Exception as ex: os.remove(TEMP_FILE) print('remove", "not exists: print(\"key:\", key, \"was not found in the clip", "else: open_mode = 'w+' with open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key", "+ \": \" + text + \"\\n\") def list_texts(): with", "end='') def delete_text(key): exists = False with open(TEMP_FILE, 'w+') as", "text in clip_file.read().split('\\n'): print(text) def get_text(key): with open(CLIP_FILE, 'r') as", "open_mode = 'a' else: open_mode = 'w+' with open(CLIP_FILE, open_mode)", "False with open(TEMP_FILE, 'w+') as temp_file: with open(CLIP_FILE, 'r') as", "if os.path.exists(CLIP_FILE): open_mode = 'a' else: open_mode = 'w+' with", "key: temp_file.write(text+\"\\n\") else: exists = True if not exists: print(\"key:\",", "'--list', action='store_true') args = parser.parse_args() if args.add: key, value =", "text.split(':') if key_val[0].strip() != key: temp_file.write(text+\"\\n\") else: exists = True", "os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE' def add_text(key, text): if os.path.exists(CLIP_FILE):", "= text.split(':') if key_val[0].strip() != key: temp_file.write(text+\"\\n\") else: exists =", "+ text + \"\\n\") def list_texts(): with open(CLIP_FILE, 'r') as", "as clip_file: for text in clip_file.read().split('\\n'): print(text) def get_text(key): with", "for text in clip_file.read().split('\\n'): key_val = text.split(':') if key_val[0].strip() ==", "pathlib import Path CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE'", "clip_file: clip_file.write(key + \": \" + text + \"\\n\") def", "the command line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d',", "nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true')", "nargs=1) parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args() if args.add: key,", "value = args.add[0], args.add[1] add_text(key, value) elif args.list: list_texts() elif", "text in clip_file.read().split('\\n'): if text.strip() == \"\": continue key_val =", "else: exists = True if not exists: print(\"key:\", key, \"was", "if args.add: key, value = args.add[0], args.add[1] add_text(key, value) elif", "!= key: temp_file.write(text+\"\\n\") else: exists = True if not exists:", "get_text(key) elif args.delete: key = args.delete[0] delete_text(key) else: parser.print_usage() if", "open_mode) as clip_file: clip_file.write(key + \": \" + text +", "def list_texts(): with open(CLIP_FILE, 'r') as clip_file: for text in", "os.remove(TEMP_FILE) print('remove text failed.', ex) def main(): parser = argparse.ArgumentParser(description='clips", "parser.parse_args() if args.add: key, value = args.add[0], args.add[1] add_text(key, value)", "line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1)", "== \"\": continue key_val = text.split(':') if key_val[0].strip() != key:", "print('remove text failed.', ex) def main(): parser = argparse.ArgumentParser(description='clips and", "with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'): print(text)", "'w+' with open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key + \": \"", "CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE' def add_text(key, text):", "as temp_file: with open(CLIP_FILE, 'r') as clip_file: for text in", "CLIP_FILE) except Exception as ex: os.remove(TEMP_FILE) print('remove text failed.', ex)", "= args.delete[0] delete_text(key) else: parser.print_usage() if __name__ == '__main__': main()", "clip_file.write(key + \": \" + text + \"\\n\") def list_texts():", "text in clip_file.read().split('\\n'): key_val = text.split(':') if key_val[0].strip() == key:", "elif args.list: list_texts() elif args.get: key = args.get[0] get_text(key) elif", "text): if os.path.exists(CLIP_FILE): open_mode = 'a' else: open_mode = 'w+'", "exists = False with open(TEMP_FILE, 'w+') as temp_file: with open(CLIP_FILE,", "get_text(key): with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'):", "def delete_text(key): exists = False with open(TEMP_FILE, 'w+') as temp_file:", "value) elif args.list: list_texts() elif args.get: key = args.get[0] get_text(key)", "clip store\") try: os.rename(TEMP_FILE, CLIP_FILE) except Exception as ex: os.remove(TEMP_FILE)", "the clip store\") try: os.rename(TEMP_FILE, CLIP_FILE) except Exception as ex:", "os import argparse from pathlib import Path CLIP_FILE = os.path.join(Path.home(),", "temp_file: with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'):", "import os import argparse from pathlib import Path CLIP_FILE =", "True if not exists: print(\"key:\", key, \"was not found in", "failed.', ex) def main(): parser = argparse.ArgumentParser(description='clips and saves texts", "key = args.get[0] get_text(key) elif args.delete: key = args.delete[0] delete_text(key)", "= argparse.ArgumentParser(description='clips and saves texts from the command line') parser.add_argument('-a',", "'--add', nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list',", "text + \"\\n\") def list_texts(): with open(CLIP_FILE, 'r') as clip_file:", "nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args()", "open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'): if text.strip()", "parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args() if", "key_val = text.split(':') if key_val[0].strip() == key: print(key_val[1].strip(), end='') def", "with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'): if", "print(key_val[1].strip(), end='') def delete_text(key): exists = False with open(TEMP_FILE, 'w+')", "def add_text(key, text): if os.path.exists(CLIP_FILE): open_mode = 'a' else: open_mode", "'.clip') TEMP_FILE = '.TEMP_FILE' def add_text(key, text): if os.path.exists(CLIP_FILE): open_mode", "clip_file.read().split('\\n'): if text.strip() == \"\": continue key_val = text.split(':') if", "\"was not found in the clip store\") try: os.rename(TEMP_FILE, CLIP_FILE)", "open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\\n'): print(text) def", "exists: print(\"key:\", key, \"was not found in the clip store\")", "texts from the command line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get',", "parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true') args", "try: os.rename(TEMP_FILE, CLIP_FILE) except Exception as ex: os.remove(TEMP_FILE) print('remove text", "add_text(key, value) elif args.list: list_texts() elif args.get: key = args.get[0]", "'r') as clip_file: for text in clip_file.read().split('\\n'): print(text) def get_text(key):", "text failed.', ex) def main(): parser = argparse.ArgumentParser(description='clips and saves", "args.add[0], args.add[1] add_text(key, value) elif args.list: list_texts() elif args.get: key", "as clip_file: for text in clip_file.read().split('\\n'): if text.strip() == \"\":", "'r') as clip_file: for text in clip_file.read().split('\\n'): key_val = text.split(':')", "import argparse from pathlib import Path CLIP_FILE = os.path.join(Path.home(), '.clip')", "= False with open(TEMP_FILE, 'w+') as temp_file: with open(CLIP_FILE, 'r')", "text.strip() == \"\": continue key_val = text.split(':') if key_val[0].strip() !=", "== key: print(key_val[1].strip(), end='') def delete_text(key): exists = False with", "= args.get[0] get_text(key) elif args.delete: key = args.delete[0] delete_text(key) else:" ]
[ "# Can't declare it directly in a loop as it", "def fetch_value(i): return lambda x: x[i] metrics = [fetch_value(i) for", "typing import List, Tuple from gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort,", "(2, 4)]) three_five, five_three, two_four = pareto assert not three_five.dominates(five_three)", "pareto crowding_distance_assignment(pareto) assert three_five.distance == float(\"inf\") assert five_three.distance == float(\"inf\")", "(5, 3), (4, 4)]) three_five, five_three, four_four = pareto crowding_distance_assignment(pareto)", "four_four = pareto assert three_five.values == (3, 5) assert five_three.values", "= _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01, 3.99), (4.5,", "4) def test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2,", "assert three_five.distance == float(\"inf\") assert five_three.distance == float(\"inf\") assert four_four.distance", "NSGAMeta objects. \"\"\" # Can't declare it directly in a", "for t in tuples] def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5),", "_tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)]) three_five, five_three, two_four =", "pareto crowding_distance_assignment(pareto) assert three_inf.distance == float(\"inf\") assert five_three.distance == float(\"inf\")", "assert four_four.distance == 2 def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")),", "new scope. def fetch_value(i): return lambda x: x[i] metrics =", "float(\"inf\")), (5, 3), (4, 4)]) three_inf, five_three, four_four = pareto", "five_three.dominates(three_five) assert three_five.dominates(two_four) assert not two_four.dominates(three_five) assert not five_three.dominates(two_four) assert", "pareto fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto) # assigns distance assert", "in a loop as it does not create a new", "pareto assert not three_five.dominates(five_three) assert not five_three.dominates(three_five) assert three_five.dominates(two_four) assert", "def test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)])", "(4, 4)]) three_five, five_three, four_four = pareto assert three_five.values ==", "crowding_distance_assignment(pareto) assert three_inf.distance == float(\"inf\") assert five_three.distance == float(\"inf\") #", "gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple])", "three_five.values == (3, 5) assert five_three.values == (5, 3) assert", "tuples] def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4,", "== float(\"inf\") assert five_three.distance == float(\"inf\") assert four_four.distance == 2", "not create a new scope. def fetch_value(i): return lambda x:", "(5, 3), (4, 4)]) three_five, five_three, four_four = pareto assert", "= _tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3), (4, 4)]) three_inf, five_three, four_four", "four_four.distance == 1 def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5), (5,", "half_half = pareto fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto) # assigns", "metrics) for t in tuples] def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3,", "all([three_five.crowd_compare(other) == -1 for other in pareto[2:]]) assert all([five_three.crowd_compare(other) ==", "List[Tuple]) -> List[NSGAMeta]: \"\"\" Converts a list of tuples to", "to NSGAMeta objects. \"\"\" # Can't declare it directly in", "= pareto crowding_distance_assignment(pareto) assert three_inf.distance == float(\"inf\") assert five_three.distance ==", "5) assert five_three.values == (5, 3) assert four_four.values == (4,", "-> List[NSGAMeta]: \"\"\" Converts a list of tuples to NSGAMeta", "List, Tuple from gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, )", "== float(\"inf\") assert four_four.distance == 2 def test_crowding_distance_assignment_inf(): pareto =", "4)]) three_five, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_five.distance ==", "test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five,", "five_three, four_four = pareto assert three_five.values == (3, 5) assert", "float(\"inf\") # In our implementation, we ignore 'axis' that contain", "i in range(len(tuples[0]))] return [NSGAMeta(t, metrics) for t in tuples]", "assert three_five.dominates(two_four) assert not two_four.dominates(three_five) assert not five_three.dominates(two_four) assert not", "# assigns distance assert all([three_five.crowd_compare(other) == -1 for other in", "three_five, five_three, four_four = pareto assert three_five.values == (3, 5)", "five_three.dominates(two_four) assert not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5),", "3), (4, 4)]) three_inf, five_three, four_four = pareto crowding_distance_assignment(pareto) assert", "three_five, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_five.distance == float(\"inf\")", "def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4),", "test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01,", "== -1 for other in pareto[2:]]) assert all([five_three.crowd_compare(other) == -1", "three_inf.distance == float(\"inf\") assert five_three.distance == float(\"inf\") # In our", "(4.01, 3.99), (4.5, 3.5)]) three_five, five_three, four_four, approx_four_four, half_half =", "of tuples to NSGAMeta objects. \"\"\" # Can't declare it", "two_four = pareto assert not three_five.dominates(five_three) assert not five_three.dominates(three_five) assert", "assert not five_three.dominates(three_five) assert three_five.dominates(two_four) assert not two_four.dominates(three_five) assert not", "not three_five.dominates(five_three) assert not five_three.dominates(three_five) assert three_five.dominates(two_four) assert not two_four.dominates(three_five)", "crowding_distance_assignment(pareto) assert three_five.distance == float(\"inf\") assert five_three.distance == float(\"inf\") assert", "not five_three.dominates(two_four) assert not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3,", "= _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five, five_three, four_four", "assert five_three.distance == float(\"inf\") assert four_four.distance == 2 def test_crowding_distance_assignment_inf():", "four_four, approx_four_four, half_half = pareto fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto)", "[NSGAMeta(t, metrics) for t in tuples] def test_nsgameta_value_assignment(): pareto =", "(4, 4)]) three_five, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_five.distance", "create a new scope. def fetch_value(i): return lambda x: x[i]", "crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: \"\"\" Converts a", "assert all([three_five.crowd_compare(other) == -1 for other in pareto[2:]]) assert all([five_three.crowd_compare(other)", "(5, 3) assert four_four.values == (4, 4) def test_dominates(): pareto", "_tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01, 3.99), (4.5, 3.5)])", "NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: \"\"\"", "== (3, 5) assert five_three.values == (5, 3) assert four_four.values", "assert three_five.values == (3, 5) assert five_three.values == (5, 3)", "float(\"inf\") assert five_three.distance == float(\"inf\") # In our implementation, we", "pareto assert three_five.values == (3, 5) assert five_three.values == (5,", "we ignore 'axis' that contain inf values. assert four_four.distance ==", "fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: \"\"\" Converts", "inf values. assert four_four.distance == 1 def test_crowd_compare(): pareto =", "three_five, five_three, two_four = pareto assert not three_five.dominates(five_three) assert not", "does not create a new scope. def fetch_value(i): return lambda", "-1 for other in pareto[2:]]) assert all([five_three.crowd_compare(other) == -1 for", "our implementation, we ignore 'axis' that contain inf values. assert", "contain inf values. assert four_four.distance == 1 def test_crowd_compare(): pareto", "4)]) three_five, five_three, two_four = pareto assert not three_five.dominates(five_three) assert", "not two_four.dominates(three_five) assert not five_three.dominates(two_four) assert not two_four.dominates(five_three) def test_crowding_distance_assignment():", "= _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)]) three_five, five_three, two_four", "test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3), (4, 4)]) three_inf,", "objects. \"\"\" # Can't declare it directly in a loop", "four_four = pareto crowding_distance_assignment(pareto) assert three_five.distance == float(\"inf\") assert five_three.distance", "it directly in a loop as it does not create", "lambda x: x[i] metrics = [fetch_value(i) for i in range(len(tuples[0]))]", "three_five, five_three, four_four, approx_four_four, half_half = pareto fast_non_dominated_sort(pareto) # assigns", "== (5, 3) assert four_four.values == (4, 4) def test_dominates():", "5), (5, 3), (4, 4)]) three_five, five_three, four_four = pareto", "3.99), (4.5, 3.5)]) three_five, five_three, four_four, approx_four_four, half_half = pareto", "float(\"inf\") assert five_three.distance == float(\"inf\") assert four_four.distance == 2 def", "== 2 def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3),", "fetch_value(i): return lambda x: x[i] metrics = [fetch_value(i) for i", "return [NSGAMeta(t, metrics) for t in tuples] def test_nsgameta_value_assignment(): pareto", "return lambda x: x[i] metrics = [fetch_value(i) for i in", "3), (4, 4)]) three_five, five_three, four_four = pareto assert three_five.values", "in range(len(tuples[0]))] return [NSGAMeta(t, metrics) for t in tuples] def", "3), (4, 4)]) three_five, five_three, four_four = pareto crowding_distance_assignment(pareto) assert", "list of tuples to NSGAMeta objects. \"\"\" # Can't declare", "pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3), (4, 4)]) three_inf, five_three,", "4)]) three_five, five_three, four_four = pareto assert three_five.values == (3,", "metrics = [fetch_value(i) for i in range(len(tuples[0]))] return [NSGAMeta(t, metrics)", "3), (2, 4)]) three_five, five_three, two_four = pareto assert not", "ignore 'axis' that contain inf values. assert four_four.distance == 1", "five_three, four_four, approx_four_four, half_half = pareto fast_non_dominated_sort(pareto) # assigns rank", "= pareto assert three_five.values == (3, 5) assert five_three.values ==", "pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)]) three_five, five_three,", "five_three.distance == float(\"inf\") # In our implementation, we ignore 'axis'", "test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five,", "assert four_four.distance == 1 def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5),", "t in tuples] def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5,", "1 def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4,", "# In our implementation, we ignore 'axis' that contain inf", "not five_three.dominates(three_five) assert three_five.dominates(two_four) assert not two_four.dominates(three_five) assert not five_three.dominates(two_four)", "assert five_three.distance == float(\"inf\") # In our implementation, we ignore", "five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_five.distance == float(\"inf\") assert", "five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_inf.distance == float(\"inf\") assert", "= [fetch_value(i) for i in range(len(tuples[0]))] return [NSGAMeta(t, metrics) for", "( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]:", "_tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five, five_three, four_four =", "three_five.dominates(two_four) assert not two_four.dominates(three_five) assert not five_three.dominates(two_four) assert not two_four.dominates(five_three)", "declare it directly in a loop as it does not", "(5, 3), (4, 4)]) three_inf, five_three, four_four = pareto crowding_distance_assignment(pareto)", "five_three, two_four = pareto assert not three_five.dominates(five_three) assert not five_three.dominates(three_five)", "(5, 3), (4, 4), (4.01, 3.99), (4.5, 3.5)]) three_five, five_three,", "for other in pareto[2:]]) assert all([five_three.crowd_compare(other) == -1 for other", "in tuples] def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3),", "four_four = pareto crowding_distance_assignment(pareto) assert three_inf.distance == float(\"inf\") assert five_three.distance", "Can't declare it directly in a loop as it does", "(3, 5) assert five_three.values == (5, 3) assert four_four.values ==", "== (4, 4) def test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5), (5,", "assert not five_three.dominates(two_four) assert not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto =", "= pareto crowding_distance_assignment(pareto) assert three_five.distance == float(\"inf\") assert five_three.distance ==", "<filename>tests/unit/test_nsga2.py from typing import List, Tuple from gama.genetic_programming.nsga2 import (", "(4, 4) def test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3),", "directly in a loop as it does not create a", "'axis' that contain inf values. assert four_four.distance == 1 def", "4), (4.01, 3.99), (4.5, 3.5)]) three_five, five_three, four_four, approx_four_four, half_half", "for i in range(len(tuples[0]))] return [NSGAMeta(t, metrics) for t in", "tuples to NSGAMeta objects. \"\"\" # Can't declare it directly", "assert not two_four.dominates(three_five) assert not five_three.dominates(two_four) assert not two_four.dominates(five_three) def", "not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3),", "_tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3), (4, 4)]) three_inf, five_three, four_four =", "that contain inf values. assert four_four.distance == 1 def test_crowd_compare():", "pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01, 3.99),", "from typing import List, Tuple from gama.genetic_programming.nsga2 import ( NSGAMeta,", "two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4,", "\"\"\" Converts a list of tuples to NSGAMeta objects. \"\"\"", "In our implementation, we ignore 'axis' that contain inf values.", "a new scope. def fetch_value(i): return lambda x: x[i] metrics", "def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3), (4, 4)])", "5), (5, 3), (2, 4)]) three_five, five_three, two_four = pareto", "a list of tuples to NSGAMeta objects. \"\"\" # Can't", "(4, 4), (4.01, 3.99), (4.5, 3.5)]) three_five, five_three, four_four, approx_four_four,", "== float(\"inf\") assert five_three.distance == float(\"inf\") # In our implementation,", "rank crowding_distance_assignment(pareto) # assigns distance assert all([three_five.crowd_compare(other) == -1 for", "four_four.distance == 2 def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")), (5,", "three_inf, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_inf.distance == float(\"inf\")", "implementation, we ignore 'axis' that contain inf values. assert four_four.distance", "5), (5, 3), (4, 4), (4.01, 3.99), (4.5, 3.5)]) three_five,", "approx_four_four, half_half = pareto fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto) #", "def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)])", "(5, 3), (2, 4)]) three_five, five_three, two_four = pareto assert", "import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple]) ->", "3.5)]) three_five, five_three, four_four, approx_four_four, half_half = pareto fast_non_dominated_sort(pareto) #", "# assigns rank crowding_distance_assignment(pareto) # assigns distance assert all([three_five.crowd_compare(other) ==", "assert five_three.values == (5, 3) assert four_four.values == (4, 4)", "other in pareto[2:]]) assert all([five_three.crowd_compare(other) == -1 for other in", "loop as it does not create a new scope. def", "Converts a list of tuples to NSGAMeta objects. \"\"\" #", "x[i] metrics = [fetch_value(i) for i in range(len(tuples[0]))] return [NSGAMeta(t,", "[fetch_value(i) for i in range(len(tuples[0]))] return [NSGAMeta(t, metrics) for t", "distance assert all([three_five.crowd_compare(other) == -1 for other in pareto[2:]]) assert", "pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five, five_three,", ") def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: \"\"\" Converts a list", "values. assert four_four.distance == 1 def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3,", "3), (4, 4), (4.01, 3.99), (4.5, 3.5)]) three_five, five_three, four_four,", "assigns rank crowding_distance_assignment(pareto) # assigns distance assert all([three_five.crowd_compare(other) == -1", "range(len(tuples[0]))] return [NSGAMeta(t, metrics) for t in tuples] def test_nsgameta_value_assignment():", "assigns distance assert all([three_five.crowd_compare(other) == -1 for other in pareto[2:]])", "from gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples:", "five_three.distance == float(\"inf\") assert four_four.distance == 2 def test_crowding_distance_assignment_inf(): pareto", "import List, Tuple from gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment,", "three_five.distance == float(\"inf\") assert five_three.distance == float(\"inf\") assert four_four.distance ==", "4)]) three_inf, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_inf.distance ==", "def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)])", "as it does not create a new scope. def fetch_value(i):", "Tuple from gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def", "it does not create a new scope. def fetch_value(i): return", "3) assert four_four.values == (4, 4) def test_dominates(): pareto =", "a loop as it does not create a new scope.", "assert three_inf.distance == float(\"inf\") assert five_three.distance == float(\"inf\") # In", "\"\"\" # Can't declare it directly in a loop as", "== float(\"inf\") # In our implementation, we ignore 'axis' that", "= pareto assert not three_five.dominates(five_three) assert not five_three.dominates(three_five) assert three_five.dominates(two_four)", "= pareto fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto) # assigns distance", "assert not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5,", "2 def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float(\"inf\")), (5, 3), (4,", "four_four.values == (4, 4) def test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5),", "fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto) # assigns distance assert all([three_five.crowd_compare(other)", "x: x[i] metrics = [fetch_value(i) for i in range(len(tuples[0]))] return", "five_three.values == (5, 3) assert four_four.values == (4, 4) def", "assert four_four.values == (4, 4) def test_dominates(): pareto = _tuples_to_NSGAMeta([(3,", "assert not three_five.dominates(five_three) assert not five_three.dominates(three_five) assert three_five.dominates(two_four) assert not", "def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: \"\"\" Converts a list of", "scope. def fetch_value(i): return lambda x: x[i] metrics = [fetch_value(i)", "crowding_distance_assignment(pareto) # assigns distance assert all([three_five.crowd_compare(other) == -1 for other", "two_four.dominates(three_five) assert not five_three.dominates(two_four) assert not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto", "three_five.dominates(five_three) assert not five_three.dominates(three_five) assert three_five.dominates(two_four) assert not two_four.dominates(three_five) assert", "(4, 4)]) three_inf, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_inf.distance", "float(\"inf\") assert four_four.distance == 2 def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3,", "(4.5, 3.5)]) three_five, five_three, four_four, approx_four_four, half_half = pareto fast_non_dominated_sort(pareto)", "List[NSGAMeta]: \"\"\" Converts a list of tuples to NSGAMeta objects.", "== 1 def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3),", "_tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: \"\"\" Converts a list of tuples", "test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)]) three_five,", "in pareto[2:]]) assert all([five_three.crowd_compare(other) == -1 for other in pareto[2:]])" ]
[ "Data Server catalog.\"\"\" def walk(cat, depth=1): \"\"\"Return a generator walking", "parser=parser) nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract global attributes out", "{} for e in elems: a = e.attrib if a[\"name\"].startswith(hidden_prefix):", "attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract group attributes gr", "dictionary. Ignore attributes with names starting with _ \"\"\" hidden_prefix", "Ignore attributes with names starting with _ \"\"\" hidden_prefix =", "0: for name, ref in cat.catalog_refs.items(): child = ref.follow() yield", "return only datasets within the top-level catalog. If None, depth", "data catalog for datasets. Parameters ---------- cat : TDSCatalog THREDDS", "= _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract variable attributes va = {}", "= {} for e in elems: a = e.attrib if", "for a dataset. Returns ------- dict Global attribute values keyed", "keyed by facet names, with variable attributes in `__variable__` nested", "in nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]]", "global attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract group attributes", "= doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract global attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\",", "set to 1000. \"\"\" yield from cat.datasets.items() if depth is", "attrs_from_ds(ds): \"\"\"Extract attributes from TDS Dataset.\"\"\" url = ds.access_urls[\"NCML\"] attrs", "if depth > 0: for name, ref in cat.catalog_refs.items(): child", "def attrs_from_ds(ds): \"\"\"Extract attributes from TDS Dataset.\"\"\" url = ds.access_urls[\"NCML\"]", "top-level catalog. If None, depth is set to 1000. \"\"\"", "for variable in nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns):", "= 1000 if depth > 0: for name, ref in", "ref in cat.catalog_refs.items(): child = ref.follow() yield from walk(child, depth=depth-1)", "attrs_from_ncml(url): \"\"\"Extract attributes from NcML file. Parameters ---------- url :", "TDS Dataset.\"\"\" url = ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url) attrs[\"__services__\"] =", "File taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py \"\"\"Utility function to parse metadata from", "Dataset.\"\"\" url = ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls", "to NcML service of THREDDS server for a dataset. Returns", "1000 if depth > 0: for name, ref in cat.catalog_refs.items():", "bytes xml = requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser) nc =", "return attrs def attrs_from_ncml(url): \"\"\"Extract attributes from NcML file. Parameters", "recursive depth. Setting 0 will return only datasets within the", "_ \"\"\" hidden_prefix = \"_\" out = {} for e", "as bytes xml = requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser) nc", "Extract variable attributes va = {} for variable in nc.xpath(\"ncml:variable\",", "Parse XML content - UTF-8 encoded documents need to be", "to 1000. \"\"\" yield from cat.datasets.items() if depth is None:", "from walk(child, depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract attributes from TDS Dataset.\"\"\"", "va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] = gr out[\"__variable__\"] = va", "= e.attrib if a[\"name\"].startswith(hidden_prefix): continue out[a[\"name\"]] = a[\"value\"] return out", "_attrib_to_dict(elems): \"\"\"Convert element attributes to dictionary. Ignore attributes with names", "# Extract group attributes gr = {} for group in", "= \"_\" out = {} for e in elems: a", "datasets. Parameters ---------- cat : TDSCatalog THREDDS catalog. depth :", "parser = lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML", "= lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML content", "namespaces=ns)) # Extract group attributes gr = {} for group", "for name, ref in cat.catalog_refs.items(): child = ref.follow() yield from", "names, with variable attributes in `__variable__` nested dict, and additional", "\"\"\"Return a generator walking a THREDDS data catalog for datasets.", "depth. Setting 0 will return only datasets within the top-level", "lxml.etree import requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"}", "import lxml.etree import requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\":", "of THREDDS server for a dataset. Returns ------- dict Global", "XML content - UTF-8 encoded documents need to be read", "hidden_prefix = \"_\" out = {} for e in elems:", "catalog. If None, depth is set to 1000. \"\"\" yield", "THREDDS catalog. depth : int Maximum recursive depth. Setting 0", "file. Parameters ---------- url : str Link to NcML service", "ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML content - UTF-8", "= {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML content - UTF-8 encoded", "def walk(cat, depth=1): \"\"\"Return a generator walking a THREDDS data", "UTF-8 encoded documents need to be read as bytes xml", "read as bytes xml = requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser)", "Extract global attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract group", "Parameters ---------- cat : TDSCatalog THREDDS catalog. depth : int", "doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract global attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns))", "depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract attributes from TDS Dataset.\"\"\" url =", "facet names, with variable attributes in `__variable__` nested dict, and", "ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls return attrs def", "= ref.follow() yield from walk(child, depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract attributes", "namespaces=ns)) out[\"__group__\"] = gr out[\"__variable__\"] = va return out def", "attributes in `__group__` nested dict. \"\"\" import lxml.etree import requests", "starting with _ \"\"\" hidden_prefix = \"_\" out = {}", "attributes from NcML file. Parameters ---------- url : str Link", "'_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"]", "attributes from TDS Dataset.\"\"\" url = ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url)", "# Extract global attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract", "for e in elems: a = e.attrib if a[\"name\"].startswith(hidden_prefix): continue", "walk(cat, depth=1): \"\"\"Return a generator walking a THREDDS data catalog", "---------- cat : TDSCatalog THREDDS catalog. depth : int Maximum", "with variable attributes in `__variable__` nested dict, and additional specialized", "\"\"\"Extract attributes from TDS Dataset.\"\"\" url = ds.access_urls[\"NCML\"] attrs =", "yield from walk(child, depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract attributes from TDS", "= gr out[\"__variable__\"] = va return out def _attrib_to_dict(elems): \"\"\"Convert", "namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract variable attributes va", "variable in nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue", "nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract variable attributes", "dict Global attribute values keyed by facet names, with variable", "for group in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) #", "def _attrib_to_dict(elems): \"\"\"Convert element attributes to dictionary. Ignore attributes with", "service of THREDDS server for a dataset. Returns ------- dict", "metadata from a THREDDS Data Server catalog.\"\"\" def walk(cat, depth=1):", "variable attributes in `__variable__` nested dict, and additional specialized attributes", "Link to NcML service of THREDDS server for a dataset.", "to dictionary. Ignore attributes with names starting with _ \"\"\"", "child = ref.follow() yield from walk(child, depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract", "walk(child, depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract attributes from TDS Dataset.\"\"\" url", "= attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls return attrs def attrs_from_ncml(url): \"\"\"Extract", "dict. \"\"\" import lxml.etree import requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns", "only datasets within the top-level catalog. If None, depth is", "documents need to be read as bytes xml = requests.get(url).content", "group in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract", "a generator walking a THREDDS data catalog for datasets. Parameters", "depth = 1000 if depth > 0: for name, ref", "lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract global attributes", "depth : int Maximum recursive depth. Setting 0 will return", "url = ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls return", "Setting 0 will return only datasets within the top-level catalog.", "lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML content -", "xml = requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath(\"/ncml:netcdf\",", "`__group__` nested dict. \"\"\" import lxml.etree import requests parser =", "= ds.access_urls return attrs def attrs_from_ncml(url): \"\"\"Extract attributes from NcML", "content - UTF-8 encoded documents need to be read as", "Extract group attributes gr = {} for group in nc.xpath(\"ncml:group\",", "attrs = attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls return attrs def attrs_from_ncml(url):", "attrs[\"__services__\"] = ds.access_urls return attrs def attrs_from_ncml(url): \"\"\"Extract attributes from", "= {} for variable in nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType' in", "out = {} for e in elems: a = e.attrib", "out[\"__variable__\"] = va return out def _attrib_to_dict(elems): \"\"\"Convert element attributes", "server for a dataset. Returns ------- dict Global attribute values", "variable attributes va = {} for variable in nc.xpath(\"ncml:variable\", namespaces=ns):", "= va return out def _attrib_to_dict(elems): \"\"\"Convert element attributes to", "be read as bytes xml = requests.get(url).content doc = lxml.etree.fromstring(xml,", "in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] =", "specialized attributes in `__group__` nested dict. \"\"\" import lxml.etree import", "in cat.catalog_refs.items(): child = ref.follow() yield from walk(child, depth=depth-1) def", "# File taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py \"\"\"Utility function to parse metadata", "a THREDDS data catalog for datasets. Parameters ---------- cat :", "dataset. Returns ------- dict Global attribute values keyed by facet", "gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract variable attributes va =", "attributes in `__variable__` nested dict, and additional specialized attributes in", "if depth is None: depth = 1000 if depth >", "None, depth is set to 1000. \"\"\" yield from cat.datasets.items()", "elems: a = e.attrib if a[\"name\"].startswith(hidden_prefix): continue out[a[\"name\"]] = a[\"value\"]", "{} for group in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns))", "parse metadata from a THREDDS Data Server catalog.\"\"\" def walk(cat,", "url : str Link to NcML service of THREDDS server", "\"\"\" yield from cat.datasets.items() if depth is None: depth =", "element attributes to dictionary. Ignore attributes with names starting with", "in elems: a = e.attrib if a[\"name\"].startswith(hidden_prefix): continue out[a[\"name\"]] =", "a THREDDS Data Server catalog.\"\"\" def walk(cat, depth=1): \"\"\"Return a", "> 0: for name, ref in cat.catalog_refs.items(): child = ref.follow()", "taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py \"\"\"Utility function to parse metadata from a", "nested dict, and additional specialized attributes in `__group__` nested dict.", "= {} for group in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\",", "namespaces=ns): continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] = gr out[\"__variable__\"]", "\"_\" out = {} for e in elems: a =", "to parse metadata from a THREDDS Data Server catalog.\"\"\" def", "{\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML content - UTF-8 encoded documents", "\"\"\" hidden_prefix = \"_\" out = {} for e in", "a = e.attrib if a[\"name\"].startswith(hidden_prefix): continue out[a[\"name\"]] = a[\"value\"] return", "attributes with names starting with _ \"\"\" hidden_prefix = \"_\"", "to be read as bytes xml = requests.get(url).content doc =", "nested dict. \"\"\" import lxml.etree import requests parser = lxml.etree.XMLParser(encoding='UTF-8')", "the top-level catalog. If None, depth is set to 1000.", "str Link to NcML service of THREDDS server for a", "out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract group attributes gr =", "is None: depth = 1000 if depth > 0: for", "depth > 0: for name, ref in cat.catalog_refs.items(): child =", "# Extract variable attributes va = {} for variable in", "additional specialized attributes in `__group__` nested dict. \"\"\" import lxml.etree", "gr out[\"__variable__\"] = va return out def _attrib_to_dict(elems): \"\"\"Convert element", "attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls return attrs def attrs_from_ncml(url): \"\"\"Extract attributes", "= requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0]", "walking a THREDDS data catalog for datasets. Parameters ---------- cat", "= _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] = gr out[\"__variable__\"] = va return", "nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]] =", "\"\"\"Convert element attributes to dictionary. Ignore attributes with names starting", "= ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url) attrs[\"__services__\"] = ds.access_urls return attrs", "in `__group__` nested dict. \"\"\" import lxml.etree import requests parser", "va return out def _attrib_to_dict(elems): \"\"\"Convert element attributes to dictionary.", "attributes va = {} for variable in nc.xpath(\"ncml:variable\", namespaces=ns): if", "THREDDS server for a dataset. Returns ------- dict Global attribute", "requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] #", "THREDDS Data Server catalog.\"\"\" def walk(cat, depth=1): \"\"\"Return a generator", "= lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract global", "yield from cat.datasets.items() if depth is None: depth = 1000", "Parameters ---------- url : str Link to NcML service of", "from NcML file. Parameters ---------- url : str Link to", "requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse", "attributes gr = {} for group in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]]", "need to be read as bytes xml = requests.get(url).content doc", "THREDDS data catalog for datasets. Parameters ---------- cat : TDSCatalog", "e in elems: a = e.attrib if a[\"name\"].startswith(hidden_prefix): continue out[a[\"name\"]]", "with _ \"\"\" hidden_prefix = \"_\" out = {} for", "int Maximum recursive depth. Setting 0 will return only datasets", "0 will return only datasets within the top-level catalog. If", "\"\"\"Utility function to parse metadata from a THREDDS Data Server", "catalog. depth : int Maximum recursive depth. Setting 0 will", "gr = {} for group in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] =", "if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns))", "catalog for datasets. Parameters ---------- cat : TDSCatalog THREDDS catalog.", "1000. \"\"\" yield from cat.datasets.items() if depth is None: depth", "from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py \"\"\"Utility function to parse metadata from a THREDDS", "a dataset. Returns ------- dict Global attribute values keyed by", "from a THREDDS Data Server catalog.\"\"\" def walk(cat, depth=1): \"\"\"Return", "dict, and additional specialized attributes in `__group__` nested dict. \"\"\"", "namespaces=ns)[0] # Extract global attributes out = _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) #", "by facet names, with variable attributes in `__variable__` nested dict,", "from cat.datasets.items() if depth is None: depth = 1000 if", "namespaces=ns)) # Extract variable attributes va = {} for variable", "is set to 1000. \"\"\" yield from cat.datasets.items() if depth", "_attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract group attributes gr = {} for", "nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract global attributes out =", "return out def _attrib_to_dict(elems): \"\"\"Convert element attributes to dictionary. Ignore", "ds.access_urls return attrs def attrs_from_ncml(url): \"\"\"Extract attributes from NcML file.", "depth=1): \"\"\"Return a generator walking a THREDDS data catalog for", "{} for variable in nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\",", "names starting with _ \"\"\" hidden_prefix = \"_\" out =", "NcML file. Parameters ---------- url : str Link to NcML", "Maximum recursive depth. Setting 0 will return only datasets within", "doc = lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath(\"/ncml:netcdf\", namespaces=ns)[0] # Extract", ": str Link to NcML service of THREDDS server for", ": int Maximum recursive depth. Setting 0 will return only", "Global attribute values keyed by facet names, with variable attributes", "If None, depth is set to 1000. \"\"\" yield from", "in nc.xpath(\"ncml:group\", namespaces=ns): gr[group.attrib[\"name\"]] = _attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract variable", "namespaces=ns): if '_CoordinateAxisType' in variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\",", "cat.catalog_refs.items(): child = ref.follow() yield from walk(child, depth=depth-1) def attrs_from_ds(ds):", "cat.datasets.items() if depth is None: depth = 1000 if depth", "variable.xpath(\"ncml:attribute/@name\", namespaces=ns): continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] = gr", "out def _attrib_to_dict(elems): \"\"\"Convert element attributes to dictionary. Ignore attributes", "datasets within the top-level catalog. If None, depth is set", "values keyed by facet names, with variable attributes in `__variable__`", "def attrs_from_ncml(url): \"\"\"Extract attributes from NcML file. Parameters ---------- url", "attributes to dictionary. Ignore attributes with names starting with _", "continue va[variable.attrib[\"name\"]] = _attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] = gr out[\"__variable__\"] =", "= _attrib_to_dict(nc.xpath(\"ncml:attribute\", namespaces=ns)) # Extract group attributes gr = {}", "- UTF-8 encoded documents need to be read as bytes", ": TDSCatalog THREDDS catalog. depth : int Maximum recursive depth.", "------- dict Global attribute values keyed by facet names, with", "encoded documents need to be read as bytes xml =", "_attrib_to_dict(group.xpath(\"ncml:attribute\", namespaces=ns)) # Extract variable attributes va = {} for", "TDSCatalog THREDDS catalog. depth : int Maximum recursive depth. Setting", "name, ref in cat.catalog_refs.items(): child = ref.follow() yield from walk(child,", "will return only datasets within the top-level catalog. If None,", "depth is None: depth = 1000 if depth > 0:", "import requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns = {\"ncml\": \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} #", "---------- url : str Link to NcML service of THREDDS", "`__variable__` nested dict, and additional specialized attributes in `__group__` nested", "and additional specialized attributes in `__group__` nested dict. \"\"\" import", "from TDS Dataset.\"\"\" url = ds.access_urls[\"NCML\"] attrs = attrs_from_ncml(url) attrs[\"__services__\"]", "function to parse metadata from a THREDDS Data Server catalog.\"\"\"", "va = {} for variable in nc.xpath(\"ncml:variable\", namespaces=ns): if '_CoordinateAxisType'", "\"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"} # Parse XML content - UTF-8 encoded documents need", "_attrib_to_dict(variable.xpath(\"ncml:attribute\", namespaces=ns)) out[\"__group__\"] = gr out[\"__variable__\"] = va return out", "Server catalog.\"\"\" def walk(cat, depth=1): \"\"\"Return a generator walking a", "None: depth = 1000 if depth > 0: for name,", "within the top-level catalog. If None, depth is set to", "attrs def attrs_from_ncml(url): \"\"\"Extract attributes from NcML file. Parameters ----------", "group attributes gr = {} for group in nc.xpath(\"ncml:group\", namespaces=ns):", "catalog.\"\"\" def walk(cat, depth=1): \"\"\"Return a generator walking a THREDDS", "ref.follow() yield from walk(child, depth=depth-1) def attrs_from_ds(ds): \"\"\"Extract attributes from", "https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py \"\"\"Utility function to parse metadata from a THREDDS Data", "\"\"\"Extract attributes from NcML file. Parameters ---------- url : str", "NcML service of THREDDS server for a dataset. Returns -------", "attribute values keyed by facet names, with variable attributes in", "cat : TDSCatalog THREDDS catalog. depth : int Maximum recursive", "in `__variable__` nested dict, and additional specialized attributes in `__group__`", "Returns ------- dict Global attribute values keyed by facet names,", "\"\"\" import lxml.etree import requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns =", "# Parse XML content - UTF-8 encoded documents need to", "out[\"__group__\"] = gr out[\"__variable__\"] = va return out def _attrib_to_dict(elems):", "with names starting with _ \"\"\" hidden_prefix = \"_\" out", "depth is set to 1000. \"\"\" yield from cat.datasets.items() if", "generator walking a THREDDS data catalog for datasets. Parameters ----------", "for datasets. Parameters ---------- cat : TDSCatalog THREDDS catalog. depth" ]
[ "iterator for masked arrays. Return an iterator yielding pairs of", "which belong to the collections.Mapping abstract base class. Parameters ----------", "columns. Otherwise, the relationship is transposed: each column represents a", "collections.Mapping abstract base class. Parameters ---------- d1 : collections.Mapping Base", "corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None): \"\"\" Return Pearson product-moment correlation", "These relative weights are typically large for observations considered \"important\"", "nodes. \"\"\" nodelist = [] def _get_nodes(tree): \"\"\" Build up", "array_like, optional An additional set of variables and observations. `y`", ":] # Clip real and imaginary parts to [-1, 1].", "yielding pairs of array coordinates and values, with masked values", "int, optional 1-D array of integer freguency weights; the number", "aweights : array_like, optional 1-D array of observation vector weights.", "observations considered less \"important\". If ``ddof=0`` the array of weights", "and each column a single observation of all those variables.", "d1[k] = v return d1 def nodes(tree): \"\"\" Return a", "values skipped. Parameters ---------- marr : MaskedArray Input array. \"\"\"", "coordinates and values, with masked values skipped. Parameters ---------- marr", "\"\"\" Return a list of values at every node of", "be repeated. aweights : array_like, optional 1-D array of observation", "collections.Mapping Dictionary with updated values Returns ------- d1 : collections.Mapping", "except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return", "of the implementation found in numpy, with the removal of", "of variables and observations. `y` has the same shape as", "class. Parameters ---------- d1 : collections.Mapping Base dictionary d2 :", "abstract base class. Parameters ---------- d1 : collections.Mapping Base dictionary", "array_like A 1-D or 2-D array containing multiple variables and", "_get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist def maenumerate(marr): \"\"\"", "and the addition of the fweights and aweights arguments, which", "observations. fweights : array_like, int, optional 1-D array of integer", "of weights can be used to assign probabilities to observation", "# nan if incorrect value (nan, inf, 0), 1 otherwise", "removal of the deperecated bias and ddof keyword arguments, and", "unicode_literals) import collections import itertools import numpy as np class", "Parameters ---------- x : array_like A 1-D or 2-D array", "probabilities to observation vectors. Returns ------- R : ndarray The", "in d2.iteritems(): if isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k, {}), v)", "tree : BinaryTree BinaryTree to extract nodes from. Returns -------", "/ c stddev = np.sqrt(d.real) c /= stddev[:, None] c", "Updated dictionary \"\"\" for k, v in d2.iteritems(): if isinstance(v,", "Returns ------- nodelist : list List of values at tree", "guarantee # abs(a[i,j]) <= 1 for complex arrays, but is", "\"\"\" Module containing miscellaneous utility functions. \"\"\" from __future__ import", "True (default), then each row represents a variable, with observations", "the implementation found in numpy, with the removal of the", "pairs in d2 to d1. Conflicts are resolved in favour", "return nodelist def maenumerate(marr): \"\"\" Multidimensional index iterator for masked", ": MaskedArray Input array. \"\"\" for i, m in itertools.izip(np.ndenumerate(marr),", "updated values Returns ------- d1 : collections.Mapping Updated dictionary \"\"\"", "optional 1-D array of observation vector weights. These relative weights", "smaller for observations considered less \"important\". If ``ddof=0`` the array", "or 2-D array containing multiple variables and observations. Each row", "= v return d1 def nodes(tree): \"\"\" Return a list", "v return d1 def nodes(tree): \"\"\" Return a list of", "a list of nodes. Parameters ---------- tree : BinaryTree BinaryTree", "maenumerate(marr): \"\"\" Multidimensional index iterator for masked arrays. Return an", "the collections.Mapping abstract base class. Parameters ---------- d1 : collections.Mapping", "np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag)", "Conflicts are resolved in favour of d2. Recurses into all", "in numpy, with the removal of the deperecated bias and", "d1 : collections.Mapping Updated dictionary \"\"\" for k, v in", "import numpy as np class BinaryTree(object): def __init__(self): self.left =", "from. Returns ------- None \"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError:", "rowvar : bool, optional If `rowvar` is True (default), then", "represents a variable, while the rows contain observations. fweights :", "def maenumerate(marr): \"\"\" Multidimensional index iterator for masked arrays. Return", "return d1 def nodes(tree): \"\"\" Return a list of values", "has the same shape as `x`. rowvar : bool, optional", "base class. Parameters ---------- d1 : collections.Mapping Base dictionary d2", "Also see `rowvar` below. y : array_like, optional An additional", ": array_like A 1-D or 2-D array containing multiple variables", "of `x` represents a variable, and each column a single", "self.left = None self.right = None self.val = None def", "containing multiple variables and observations. Each row of `x` represents", "list List of values at tree nodes. \"\"\" nodelist =", "Pearson product-moment correlation coefficients. This is a copy of the", "each observation vector should be repeated. aweights : array_like, optional", "and smaller for observations considered less \"important\". If ``ddof=0`` the", "nodes. Parameters ---------- tree : BinaryTree BinaryTree to extract nodes", "for k, v in d2.iteritems(): if isinstance(v, collections.Mapping): d1[k] =", "-1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return", "inf, 0), 1 otherwise return c / c stddev =", "\"\"\" from __future__ import (absolute_import, division, print_function, unicode_literals) import collections", "nodelist = [] def _get_nodes(tree): \"\"\" Build up a list", "the deperecated bias and ddof keyword arguments, and the addition", "\"\"\" Adds key-value pairs in d2 to d1. Conflicts are", "\"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except", "= None self.val = None def deep_update(d1, d2): \"\"\" Adds", "1-D array of observation vector weights. These relative weights are", "stddev[:, None] c /= stddev[None, :] # Clip real and", "ddof keyword arguments, and the addition of the fweights and", "up a list of nodes. Parameters ---------- tree : BinaryTree", "print_function, unicode_literals) import collections import itertools import numpy as np", "does not guarantee # abs(a[i,j]) <= 1 for complex arrays,", "should be repeated. aweights : array_like, optional 1-D array of", "is a copy of the implementation found in numpy, with", "for observations considered less \"important\". If ``ddof=0`` the array of", "Recurses into all values in d2 which belong to the", "`x` represents a variable, and each column a single observation", "of times each observation vector should be repeated. aweights :", "each column represents a variable, while the rows contain observations.", "functions. \"\"\" from __future__ import (absolute_import, division, print_function, unicode_literals) import", "The correlation coefficient matrix of the variables. \"\"\" c =", ": BinaryTree BinaryTree to extract nodes from. Returns ------- nodelist", "If `rowvar` is True (default), then each row represents a", "every node of a tree. Parameters ---------- tree : BinaryTree", "variable, and each column a single observation of all those", "---------- tree : BinaryTree BinaryTree to extract nodes from. Returns", "/= stddev[:, None] c /= stddev[None, :] # Clip real", "considered less \"important\". If ``ddof=0`` the array of weights can", "correlation coefficient matrix of the variables. \"\"\" c = np.cov(x,", "`rowvar` below. y : array_like, optional An additional set of", "tree nodes. \"\"\" nodelist = [] def _get_nodes(tree): \"\"\" Build", "with the removal of the deperecated bias and ddof keyword", "but is the best we can do without # excessive", "k, v in d2.iteritems(): if isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k,", "values, with masked values skipped. Parameters ---------- marr : MaskedArray", "def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None): \"\"\" Return Pearson product-moment", "1-D array of integer freguency weights; the number of times", "of the fweights and aweights arguments, which are pased to", "row represents a variable, with observations in the columns. Otherwise,", "d2 : collections.Mapping Dictionary with updated values Returns ------- d1", "pased to np.cov. Parameters ---------- x : array_like A 1-D", "nodelist.append(tree.right) _get_nodes(tree) return nodelist def maenumerate(marr): \"\"\" Multidimensional index iterator", "1 for complex arrays, but is the best we can", "rows contain observations. fweights : array_like, int, optional 1-D array", "from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import", "arguments, which are pased to np.cov. Parameters ---------- x :", "__init__(self): self.left = None self.right = None self.val = None", "observation vectors. Returns ------- R : ndarray The correlation coefficient", "optional If `rowvar` is True (default), then each row represents", "variables. \"\"\" c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights) try:", "------- R : ndarray The correlation coefficient matrix of the", "for complex arrays, but is the best we can do", "itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield i def corrcoef(x, y=None, rowvar=True,", "if isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k, {}), v) else: d1[k]", "the variables. \"\"\" c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights)", "bool, optional If `rowvar` is True (default), then each row", "iterator yielding pairs of array coordinates and values, with masked", "ValueError: # scalar covariance # nan if incorrect value (nan,", "Returns ------- d1 : collections.Mapping Updated dictionary \"\"\" for k,", "utf-8 -*- \"\"\" Module containing miscellaneous utility functions. \"\"\" from", "aweights=aweights) try: d = np.diag(c) except ValueError: # scalar covariance", "Each row of `x` represents a variable, and each column", "a variable, and each column a single observation of all", "for observations considered \"important\" and smaller for observations considered less", "of all those variables. Also see `rowvar` below. y :", "skipped. Parameters ---------- marr : MaskedArray Input array. \"\"\" for", "[] def _get_nodes(tree): \"\"\" Build up a list of nodes.", "then each row represents a variable, with observations in the", "weights; the number of times each observation vector should be", "try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist def maenumerate(marr):", "resolved in favour of d2. Recurses into all values in", "are resolved in favour of d2. Recurses into all values", "List of values at tree nodes. \"\"\" nodelist = []", "values Returns ------- d1 : collections.Mapping Updated dictionary \"\"\" for", "times each observation vector should be repeated. aweights : array_like,", "if m: yield i def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None):", "containing miscellaneous utility functions. \"\"\" from __future__ import (absolute_import, division,", "\"important\". If ``ddof=0`` the array of weights can be used", "= deep_update(d1.get(k, {}), v) else: d1[k] = v return d1", "itertools import numpy as np class BinaryTree(object): def __init__(self): self.left", "isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k, {}), v) else: d1[k] =", "Parameters ---------- tree : BinaryTree BinaryTree to extract nodes from.", "def __init__(self): self.left = None self.right = None self.val =", ": ndarray The correlation coefficient matrix of the variables. \"\"\"", "d2 which belong to the collections.Mapping abstract base class. Parameters", "0), 1 otherwise return c / c stddev = np.sqrt(d.real)", "rowvar, fweights=fweights, aweights=aweights) try: d = np.diag(c) except ValueError: #", "------- nodelist : list List of values at tree nodes.", "_get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree)", "represents a variable, and each column a single observation of", "numpy, with the removal of the deperecated bias and ddof", "This does not guarantee # abs(a[i,j]) <= 1 for complex", "BinaryTree BinaryTree to extract nodes from. Returns ------- nodelist :", "\"important\" and smaller for observations considered less \"important\". If ``ddof=0``", "we can do without # excessive work. np.clip(c.real, -1, 1,", "used to assign probabilities to observation vectors. Returns ------- R", "correlation coefficients. This is a copy of the implementation found", "---------- d1 : collections.Mapping Base dictionary d2 : collections.Mapping Dictionary", "division, print_function, unicode_literals) import collections import itertools import numpy as", "the fweights and aweights arguments, which are pased to np.cov.", "index iterator for masked arrays. Return an iterator yielding pairs", "array containing multiple variables and observations. Each row of `x`", "c stddev = np.sqrt(d.real) c /= stddev[:, None] c /=", "None def deep_update(d1, d2): \"\"\" Adds key-value pairs in d2", "Return an iterator yielding pairs of array coordinates and values,", "at every node of a tree. Parameters ---------- tree :", "-*- \"\"\" Module containing miscellaneous utility functions. \"\"\" from __future__", "without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c):", "self.right = None self.val = None def deep_update(d1, d2): \"\"\"", "to extract nodes from. Returns ------- nodelist : list List", "bias and ddof keyword arguments, and the addition of the", "x : array_like A 1-D or 2-D array containing multiple", "Return Pearson product-moment correlation coefficients. This is a copy of", "miscellaneous utility functions. \"\"\" from __future__ import (absolute_import, division, print_function,", "copy of the implementation found in numpy, with the removal", "below. y : array_like, optional An additional set of variables", "import itertools import numpy as np class BinaryTree(object): def __init__(self):", "if incorrect value (nan, inf, 0), 1 otherwise return c", "------- d1 : collections.Mapping Updated dictionary \"\"\" for k, v", "fweights : array_like, int, optional 1-D array of integer freguency", "weights are typically large for observations considered \"important\" and smaller", "the number of times each observation vector should be repeated.", "R : ndarray The correlation coefficient matrix of the variables.", "(nan, inf, 0), 1 otherwise return c / c stddev", ": array_like, optional An additional set of variables and observations.", "a variable, with observations in the columns. Otherwise, the relationship", "fweights=None, aweights=None): \"\"\" Return Pearson product-moment correlation coefficients. This is", "complex arrays, but is the best we can do without", "of a tree. Parameters ---------- tree : BinaryTree BinaryTree to", "variables and observations. Each row of `x` represents a variable,", "a variable, while the rows contain observations. fweights : array_like,", "values at every node of a tree. Parameters ---------- tree", "is True (default), then each row represents a variable, with", "collections.Mapping Base dictionary d2 : collections.Mapping Dictionary with updated values", "~marr.mask.ravel()): if m: yield i def corrcoef(x, y=None, rowvar=True, fweights=None,", "np.cov(x, y, rowvar, fweights=fweights, aweights=aweights) try: d = np.diag(c) except", "variables. Also see `rowvar` below. y : array_like, optional An", "can do without # excessive work. np.clip(c.real, -1, 1, out=c.real)", "deperecated bias and ddof keyword arguments, and the addition of", "covariance # nan if incorrect value (nan, inf, 0), 1", "collections import itertools import numpy as np class BinaryTree(object): def", "set of variables and observations. `y` has the same shape", "multiple variables and observations. Each row of `x` represents a", "Return a list of values at every node of a", "implementation found in numpy, with the removal of the deperecated", "repeated. aweights : array_like, optional 1-D array of observation vector", "coefficients. This is a copy of the implementation found in", "are typically large for observations considered \"important\" and smaller for", "self.val = None def deep_update(d1, d2): \"\"\" Adds key-value pairs", "collections.Mapping Updated dictionary \"\"\" for k, v in d2.iteritems(): if", "of integer freguency weights; the number of times each observation", "assign probabilities to observation vectors. Returns ------- R : ndarray", ": array_like, int, optional 1-D array of integer freguency weights;", "BinaryTree to extract nodes from. Returns ------- None \"\"\" nodelist.append(tree.val)", "observations considered \"important\" and smaller for observations considered less \"important\".", "fweights=fweights, aweights=aweights) try: d = np.diag(c) except ValueError: # scalar", "= None def deep_update(d1, d2): \"\"\" Adds key-value pairs in", "import (absolute_import, division, print_function, unicode_literals) import collections import itertools import", "masked values skipped. Parameters ---------- marr : MaskedArray Input array.", "with updated values Returns ------- d1 : collections.Mapping Updated dictionary", "v in d2.iteritems(): if isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k, {}),", "stddev[None, :] # Clip real and imaginary parts to [-1,", "arguments, and the addition of the fweights and aweights arguments,", "to np.cov. Parameters ---------- x : array_like A 1-D or", "aweights arguments, which are pased to np.cov. Parameters ---------- x", "else: d1[k] = v return d1 def nodes(tree): \"\"\" Return", "None self.right = None self.val = None def deep_update(d1, d2):", "# excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag,", "utility functions. \"\"\" from __future__ import (absolute_import, division, print_function, unicode_literals)", "and values, with masked values skipped. Parameters ---------- marr :", "for masked arrays. Return an iterator yielding pairs of array", "nodelist def maenumerate(marr): \"\"\" Multidimensional index iterator for masked arrays.", "of the deperecated bias and ddof keyword arguments, and the", "all values in d2 which belong to the collections.Mapping abstract", ": list List of values at tree nodes. \"\"\" nodelist", "= [] def _get_nodes(tree): \"\"\" Build up a list of", "array of integer freguency weights; the number of times each", "integer freguency weights; the number of times each observation vector", "in d2 which belong to the collections.Mapping abstract base class.", "see `rowvar` below. y : array_like, optional An additional set", "work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1,", "array_like, int, optional 1-D array of integer freguency weights; the", "to d1. Conflicts are resolved in favour of d2. Recurses", "\"\"\" Return Pearson product-moment correlation coefficients. This is a copy", "(default), then each row represents a variable, with observations in", "less \"important\". If ``ddof=0`` the array of weights can be", "key-value pairs in d2 to d1. Conflicts are resolved in", "be used to assign probabilities to observation vectors. Returns -------", "values at tree nodes. \"\"\" nodelist = [] def _get_nodes(tree):", "the columns. Otherwise, the relationship is transposed: each column represents", "extract nodes from. Returns ------- nodelist : list List of", "typically large for observations considered \"important\" and smaller for observations", "None self.val = None def deep_update(d1, d2): \"\"\" Adds key-value", ": BinaryTree BinaryTree to extract nodes from. Returns ------- None", "``ddof=0`` the array of weights can be used to assign", "observations. Each row of `x` represents a variable, and each", "the relationship is transposed: each column represents a variable, while", "= np.diag(c) except ValueError: # scalar covariance # nan if", "\"\"\" for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield", "optional 1-D array of integer freguency weights; the number of", "d1 : collections.Mapping Base dictionary d2 : collections.Mapping Dictionary with", "m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield i def corrcoef(x,", "Base dictionary d2 : collections.Mapping Dictionary with updated values Returns", "addition of the fweights and aweights arguments, which are pased", "= np.cov(x, y, rowvar, fweights=fweights, aweights=aweights) try: d = np.diag(c)", "number of times each observation vector should be repeated. aweights", "variable, while the rows contain observations. fweights : array_like, int,", "def nodes(tree): \"\"\" Return a list of values at every", "c /= stddev[None, :] # Clip real and imaginary parts", "fweights and aweights arguments, which are pased to np.cov. Parameters", "and ddof keyword arguments, and the addition of the fweights", "_get_nodes(tree): \"\"\" Build up a list of nodes. Parameters ----------", "masked arrays. Return an iterator yielding pairs of array coordinates", "nan if incorrect value (nan, inf, 0), 1 otherwise return", "BinaryTree(object): def __init__(self): self.left = None self.right = None self.val", ": bool, optional If `rowvar` is True (default), then each", "freguency weights; the number of times each observation vector should", "nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist def", "excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1,", "belong to the collections.Mapping abstract base class. Parameters ---------- d1", "Clip real and imaginary parts to [-1, 1]. This does", "column a single observation of all those variables. Also see", "nodes from. Returns ------- None \"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left) except", "arrays, but is the best we can do without #", "Input array. \"\"\" for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if", "of values at every node of a tree. Parameters ----------", "scalar covariance # nan if incorrect value (nan, inf, 0),", "d2. Recurses into all values in d2 which belong to", "def _get_nodes(tree): \"\"\" Build up a list of nodes. Parameters", ": collections.Mapping Base dictionary d2 : collections.Mapping Dictionary with updated", "those variables. Also see `rowvar` below. y : array_like, optional", "single observation of all those variables. Also see `rowvar` below.", "a tree. Parameters ---------- tree : BinaryTree BinaryTree to extract", "Parameters ---------- marr : MaskedArray Input array. \"\"\" for i,", ": array_like, optional 1-D array of observation vector weights. These", "\"\"\" Build up a list of nodes. Parameters ---------- tree", "in d2 to d1. Conflicts are resolved in favour of", "Otherwise, the relationship is transposed: each column represents a variable,", "An additional set of variables and observations. `y` has the", "list of values at every node of a tree. Parameters", "row of `x` represents a variable, and each column a", "value (nan, inf, 0), 1 otherwise return c / c", "transposed: each column represents a variable, while the rows contain", "nodes from. Returns ------- nodelist : list List of values", "np.cov. Parameters ---------- x : array_like A 1-D or 2-D", "to the collections.Mapping abstract base class. Parameters ---------- d1 :", "found in numpy, with the removal of the deperecated bias", "np.diag(c) except ValueError: # scalar covariance # nan if incorrect", "the best we can do without # excessive work. np.clip(c.real,", "numpy as np class BinaryTree(object): def __init__(self): self.left = None", "matrix of the variables. \"\"\" c = np.cov(x, y, rowvar,", "array of observation vector weights. These relative weights are typically", "[-1, 1]. This does not guarantee # abs(a[i,j]) <= 1", "except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist def maenumerate(marr): \"\"\" Multidimensional", "i def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None): \"\"\" Return Pearson", "------- None \"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try:", "vector should be repeated. aweights : array_like, optional 1-D array", "None \"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right)", "can be used to assign probabilities to observation vectors. Returns", "nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError:", "Multidimensional index iterator for masked arrays. Return an iterator yielding", "# Clip real and imaginary parts to [-1, 1]. This", "at tree nodes. \"\"\" nodelist = [] def _get_nodes(tree): \"\"\"", "do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if", "This is a copy of the implementation found in numpy,", "class BinaryTree(object): def __init__(self): self.left = None self.right = None", "to assign probabilities to observation vectors. Returns ------- R :", "and imaginary parts to [-1, 1]. This does not guarantee", "the addition of the fweights and aweights arguments, which are", "to [-1, 1]. This does not guarantee # abs(a[i,j]) <=", "2-D array containing multiple variables and observations. Each row of", "d1. Conflicts are resolved in favour of d2. Recurses into", "1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c", "from. Returns ------- nodelist : list List of values at", "is the best we can do without # excessive work.", "`y` has the same shape as `x`. rowvar : bool,", "np class BinaryTree(object): def __init__(self): self.left = None self.right =", "A 1-D or 2-D array containing multiple variables and observations.", "each row represents a variable, with observations in the columns.", "values in d2 which belong to the collections.Mapping abstract base", "collections.Mapping): d1[k] = deep_update(d1.get(k, {}), v) else: d1[k] = v", "AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist", "d = np.diag(c) except ValueError: # scalar covariance # nan", "y : array_like, optional An additional set of variables and", "<= 1 for complex arrays, but is the best we", "not guarantee # abs(a[i,j]) <= 1 for complex arrays, but", "the removal of the deperecated bias and ddof keyword arguments,", "vector weights. These relative weights are typically large for observations", "# abs(a[i,j]) <= 1 for complex arrays, but is the", "additional set of variables and observations. `y` has the same", "v) else: d1[k] = v return d1 def nodes(tree): \"\"\"", "same shape as `x`. rowvar : bool, optional If `rowvar`", "array_like, optional 1-D array of observation vector weights. These relative", "observation of all those variables. Also see `rowvar` below. y", "to observation vectors. Returns ------- R : ndarray The correlation", "a list of values at every node of a tree.", "deep_update(d1, d2): \"\"\" Adds key-value pairs in d2 to d1.", "to extract nodes from. Returns ------- None \"\"\" nodelist.append(tree.val) try:", "\"\"\" Multidimensional index iterator for masked arrays. Return an iterator", "BinaryTree to extract nodes from. Returns ------- nodelist : list", "relationship is transposed: each column represents a variable, while the", "# scalar covariance # nan if incorrect value (nan, inf,", "coding: utf-8 -*- \"\"\" Module containing miscellaneous utility functions. \"\"\"", "list of nodes. Parameters ---------- tree : BinaryTree BinaryTree to", "`x`. rowvar : bool, optional If `rowvar` is True (default),", "stddev = np.sqrt(d.real) c /= stddev[:, None] c /= stddev[None,", "\"\"\" c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights) try: d", "deep_update(d1.get(k, {}), v) else: d1[k] = v return d1 def", "import collections import itertools import numpy as np class BinaryTree(object):", "and observations. `y` has the same shape as `x`. rowvar", "nodes(tree): \"\"\" Return a list of values at every node", ": collections.Mapping Updated dictionary \"\"\" for k, v in d2.iteritems():", "d2 to d1. Conflicts are resolved in favour of d2.", "<reponame>brunel-physics/mva_scikit # -*- coding: utf-8 -*- \"\"\" Module containing miscellaneous", "None] c /= stddev[None, :] # Clip real and imaginary", "each column a single observation of all those variables. Also", "an iterator yielding pairs of array coordinates and values, with", "1]. This does not guarantee # abs(a[i,j]) <= 1 for", "favour of d2. Recurses into all values in d2 which", "a single observation of all those variables. Also see `rowvar`", "observations in the columns. Otherwise, the relationship is transposed: each", "c / c stddev = np.sqrt(d.real) c /= stddev[:, None]", "\"\"\" for k, v in d2.iteritems(): if isinstance(v, collections.Mapping): d1[k]", "which are pased to np.cov. Parameters ---------- x : array_like", "1 otherwise return c / c stddev = np.sqrt(d.real) c", "`rowvar` is True (default), then each row represents a variable,", "---------- x : array_like A 1-D or 2-D array containing", "marr : MaskedArray Input array. \"\"\" for i, m in", "extract nodes from. Returns ------- None \"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left)", "product-moment correlation coefficients. This is a copy of the implementation", "c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights) try: d =", "return c / c stddev = np.sqrt(d.real) c /= stddev[:,", "= None self.right = None self.val = None def deep_update(d1,", "nodelist : list List of values at tree nodes. \"\"\"", "with masked values skipped. Parameters ---------- marr : MaskedArray Input", "of values at tree nodes. \"\"\" nodelist = [] def", "with observations in the columns. Otherwise, the relationship is transposed:", "1-D or 2-D array containing multiple variables and observations. Each", "weights can be used to assign probabilities to observation vectors.", "array. \"\"\" for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m:", "i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield i def", "arrays. Return an iterator yielding pairs of array coordinates and", "/= stddev[None, :] # Clip real and imaginary parts to", "m: yield i def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None): \"\"\"", "observation vector weights. These relative weights are typically large for", "d2): \"\"\" Adds key-value pairs in d2 to d1. Conflicts", "the rows contain observations. fweights : array_like, int, optional 1-D", "incorrect value (nan, inf, 0), 1 otherwise return c /", "of d2. Recurses into all values in d2 which belong", "the same shape as `x`. rowvar : bool, optional If", "If ``ddof=0`` the array of weights can be used to", "observations. `y` has the same shape as `x`. rowvar :", "abs(a[i,j]) <= 1 for complex arrays, but is the best", "of the variables. \"\"\" c = np.cov(x, y, rowvar, fweights=fweights,", "observation vector should be repeated. aweights : array_like, optional 1-D", "represents a variable, with observations in the columns. Otherwise, the", "---------- marr : MaskedArray Input array. \"\"\" for i, m", "MaskedArray Input array. \"\"\" for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()):", "c /= stddev[:, None] c /= stddev[None, :] # Clip", "node of a tree. Parameters ---------- tree : BinaryTree BinaryTree", "in the columns. Otherwise, the relationship is transposed: each column", "in favour of d2. Recurses into all values in d2", "coefficient matrix of the variables. \"\"\" c = np.cov(x, y,", "weights. These relative weights are typically large for observations considered", "dictionary \"\"\" for k, v in d2.iteritems(): if isinstance(v, collections.Mapping):", "# -*- coding: utf-8 -*- \"\"\" Module containing miscellaneous utility", "d2.iteritems(): if isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k, {}), v) else:", "_get_nodes(tree) return nodelist def maenumerate(marr): \"\"\" Multidimensional index iterator for", "(absolute_import, division, print_function, unicode_literals) import collections import itertools import numpy", "rowvar=True, fweights=None, aweights=None): \"\"\" Return Pearson product-moment correlation coefficients. This", "y, rowvar, fweights=fweights, aweights=aweights) try: d = np.diag(c) except ValueError:", "Adds key-value pairs in d2 to d1. Conflicts are resolved", "and observations. Each row of `x` represents a variable, and", "best we can do without # excessive work. np.clip(c.real, -1,", "Returns ------- None \"\"\" nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left)", "AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist def maenumerate(marr): \"\"\" Multidimensional index", "Returns ------- R : ndarray The correlation coefficient matrix of", "aweights=None): \"\"\" Return Pearson product-moment correlation coefficients. This is a", "\"\"\" nodelist = [] def _get_nodes(tree): \"\"\" Build up a", "np.sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] #", "Dictionary with updated values Returns ------- d1 : collections.Mapping Updated", "vectors. Returns ------- R : ndarray The correlation coefficient matrix", "as np class BinaryTree(object): def __init__(self): self.left = None self.right", "is transposed: each column represents a variable, while the rows", "d1[k] = deep_update(d1.get(k, {}), v) else: d1[k] = v return", "ndarray The correlation coefficient matrix of the variables. \"\"\" c", ": collections.Mapping Dictionary with updated values Returns ------- d1 :", "into all values in d2 which belong to the collections.Mapping", "large for observations considered \"important\" and smaller for observations considered", "imaginary parts to [-1, 1]. This does not guarantee #", "d1 def nodes(tree): \"\"\" Return a list of values at", "yield i def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None): \"\"\" Return", "array of weights can be used to assign probabilities to", "except ValueError: # scalar covariance # nan if incorrect value", "are pased to np.cov. Parameters ---------- x : array_like A", "and aweights arguments, which are pased to np.cov. Parameters ----------", "for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield i", "keyword arguments, and the addition of the fweights and aweights", "contain observations. fweights : array_like, int, optional 1-D array of", "column represents a variable, while the rows contain observations. fweights", "relative weights are typically large for observations considered \"important\" and", "{}), v) else: d1[k] = v return d1 def nodes(tree):", "as `x`. rowvar : bool, optional If `rowvar` is True", "Module containing miscellaneous utility functions. \"\"\" from __future__ import (absolute_import,", "= np.sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :]", "y=None, rowvar=True, fweights=None, aweights=None): \"\"\" Return Pearson product-moment correlation coefficients.", "try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right)", "def deep_update(d1, d2): \"\"\" Adds key-value pairs in d2 to", "pairs of array coordinates and values, with masked values skipped.", "optional An additional set of variables and observations. `y` has", "__future__ import (absolute_import, division, print_function, unicode_literals) import collections import itertools", "tree. Parameters ---------- tree : BinaryTree BinaryTree to extract nodes", "-*- coding: utf-8 -*- \"\"\" Module containing miscellaneous utility functions.", "try: d = np.diag(c) except ValueError: # scalar covariance #", "the array of weights can be used to assign probabilities", "while the rows contain observations. fweights : array_like, int, optional", "of observation vector weights. These relative weights are typically large", "of array coordinates and values, with masked values skipped. Parameters", "dictionary d2 : collections.Mapping Dictionary with updated values Returns -------", "array coordinates and values, with masked values skipped. Parameters ----------", "a copy of the implementation found in numpy, with the", "variables and observations. `y` has the same shape as `x`.", "Build up a list of nodes. Parameters ---------- tree :", "shape as `x`. rowvar : bool, optional If `rowvar` is", "considered \"important\" and smaller for observations considered less \"important\". If", "BinaryTree BinaryTree to extract nodes from. Returns ------- None \"\"\"", "parts to [-1, 1]. This does not guarantee # abs(a[i,j])", "of nodes. Parameters ---------- tree : BinaryTree BinaryTree to extract", "in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield i def corrcoef(x, y=None,", "all those variables. Also see `rowvar` below. y : array_like,", "variable, with observations in the columns. Otherwise, the relationship is", "Parameters ---------- d1 : collections.Mapping Base dictionary d2 : collections.Mapping", "real and imaginary parts to [-1, 1]. This does not", "otherwise return c / c stddev = np.sqrt(d.real) c /=" ]
[ "return path @staticmethod def generate_random_system(n, k): locations = np.random.rand(n, 2)", "for i in range(len(arrival_times)): arrivals = arrival_times[i] for j in", "in range(len(path) - 1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i],", "i in range(len(arrival_times)): arrivals = arrival_times[i] for j in range(len(arrivals)", "i in range(n): for j in range(n): if i <", "for _ in range(len(system.sites))] for i in range(len(arrival_times)): arrivals =", "self.get_drone(drone_id).probability num_sites = len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s) site =", "event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times def compute_cost(system, n): arrival_times =", "for i in locations: sites.append(Site(i)) drones = [] for i", "arrival_times.append((drone_id, path[i], path[i+1], t)) return arrival_times def _generate_arrival_times(system, num_drones, length):", "j in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs =", "t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t)) return arrival_times", "numpy as np import random from collections import namedtuple def", "return System(sites, drones) def _compute_arrival_times(path, drone_id, sites, speed): arrival_times =", "self.distance[path[i]][path[i + 1]] return d def compute_path_time(self, path, drone_id): d", "= site distance = np.zeros([n, n]) for i in range(n):", "event[0] site_id = event[2] time = event[3] arrival_times[site_id].append((drone_id, time)) return", "sites.append(Site(i)) drones = [] for i in range(k): speed =", "in range(len(system.sites))] for i in range(len(arrival_times)): arrivals = arrival_times[i] for", "= [[] for _ in range(len(system.sites))] for i in range(len(arrival_times)):", "= np.subtract(sites[i], sites[j]) d = np.linalg.norm(x) distance[i][j] = d distance[j][i]", "[compute_average(i) for i in interarrival_times] return max(interarrival_avgs) def compute_average(data): return", "interarrival_avgs = [compute_average(i) for i in interarrival_times] return max(interarrival_avgs) def", "matrix def categorical(p): return np.random.choice(len(p), 1, p=p)[0] Drone = namedtuple('Drone',", "x = np.subtract(sites[i], sites[j]) d = np.linalg.norm(x) distance[i][j] = d", "= d self.distance = distance def get_site(self, site_id): return self.sites[site_id]", "= [] for i in range(system): pass events.extend(compute_arrival_times(path, i)) def", "[s] P = self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site) while site", "= 0 for i in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return", "for i in range(n): matrix[i][i] = 0 for i in", "n]) for i in range(n): for j in range(n): if", "t, drone_id): path = [s] P = self.get_drone(drone_id).probability site =", "probability') Site = namedtuple('Site', 'location') class System: def __init__(self, sites,", "arrival_times def _generate_arrival_times(system, num_drones, length): arrival_times = [[] for _", "distance[j][i] = d self.distance = distance def get_site(self, site_id): return", "range(n - 1): d += self.distance[path[i]][path[i + 1]] return d", "d += self.distance[path[i]][path[i + 1]] return d def compute_path_time(self, path,", "as np import random from collections import namedtuple def generate_prob_matrix(n):", "collections import namedtuple def generate_prob_matrix(n): matrix = np.random.rand(n, n) for", "self.drones = {} n = len(sites) for i, drone in", "events: drone_id = event[0] site_id = event[2] time = event[3]", "for i, drone in enumerate(drones): self.drones[i] = drone for i,", "arrivals[j][1]) interarrival_avgs = [compute_average(i) for i in interarrival_times] return max(interarrival_avgs)", "return self.drones[drone_id] def compute_path_distance(self, path): n = len(path) d =", "range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p): return np.random.choice(len(p),", "= event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times def compute_cost(system, n): arrival_times", "return self.sites[site_id] def get_drone(self, drone_id): return self.drones[drone_id] def compute_path_distance(self, path):", "path.append(site) return path @staticmethod def generate_random_system(n, k): locations = np.random.rand(n,", "def get_key(item): return item[3] events = sorted(events, key=get_key) for event", "n = len(sites) for i, drone in enumerate(drones): self.drones[i] =", "d self.distance = distance def get_site(self, site_id): return self.sites[site_id] def", "= s for i in range(length): site = categorical(P[site]) path.append(site)", "events = sorted(events, key=get_key) for event in events: drone_id =", "{} self.drones = {} n = len(sites) for i, drone", "0 for i in range(len(path) - 1): t += system.compute_path_time(path[i:i+2],", "len(sites) for i, drone in enumerate(drones): self.drones[i] = drone for", "np.linalg.norm(x) distance[i][j] = d distance[j][i] = d self.distance = distance", "arrival_times = [[] for _ in range(len(system.sites))] events = []", "range(n): for j in range(n): if i < j: x", "def _compute_arrival_times(path, drone_id, sites, speed): arrival_times = [] t =", "+= system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t)) return arrival_times def", "categorical([1/num_sites]*num_sites) path.append(s) site = s for i in range(length): site", "[] for i in range(k): speed = abs(random.random()) probability =", "[] for i in range(system): pass events.extend(compute_arrival_times(path, i)) def get_key(item):", "categorical(p): return np.random.choice(len(p), 1, p=p)[0] Drone = namedtuple('Drone', 'speed probability')", "in range(len(arrival_times)): arrivals = arrival_times[i] for j in range(len(arrivals) -", "return np.random.choice(len(p), 1, p=p)[0] Drone = namedtuple('Drone', 'speed probability') Site", "= arrival_times[i] for j in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] -", "compute_path_time(self, path, drone_id): d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self,", "def compute_cost(system, n): arrival_times = generate_arrival_times(system, n) interarrival_times = [[]", "key=get_key) for event in events: drone_id = event[0] site_id =", "1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs = [compute_average(i) for i in", "self.drones[i] = drone for i, site in enumerate(sites): self.sites[i] =", "= abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites, drones)", "compute_cost(system, n): arrival_times = generate_arrival_times(system, n) interarrival_times = [[] for", "_compute_arrival_times(path, drone_id, sites, speed): arrival_times = [] t = 0", "i < j: x = np.subtract(sites[i], sites[j]) d = np.linalg.norm(x)", "events = [] for i in range(system): pass events.extend(compute_arrival_times(path, i))", "generate_arrival_times(system, n) interarrival_times = [[] for _ in range(len(system.sites))] for", "def get_site(self, site_id): return self.sites[site_id] def get_drone(self, drone_id): return self.drones[drone_id]", "= [compute_average(i) for i in interarrival_times] return max(interarrival_avgs) def compute_average(data):", "drones) def _compute_arrival_times(path, drone_id, sites, speed): arrival_times = [] t", "@staticmethod def generate_random_system(n, k): locations = np.random.rand(n, 2) sites =", "= categorical(P[site]) path.append(site) return path def generate_path(self, s, t, drone_id):", "2) sites = [] for i in locations: sites.append(Site(i)) drones", "= drone for i, site in enumerate(sites): self.sites[i] = site", "0 for i in range(n - 1): d += self.distance[path[i]][path[i", "= sorted(events, key=get_key) for event in events: drone_id = event[0]", "self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id): path = []", "path def generate_path(self, s, t, drone_id): path = [s] P", "1, p=p)[0] Drone = namedtuple('Drone', 'speed probability') Site = namedtuple('Site',", "import random from collections import namedtuple def generate_prob_matrix(n): matrix =", "in range(n): matrix[i][i] = 0 for i in range(n): matrix[i]", "if i < j: x = np.subtract(sites[i], sites[j]) d =", "= generate_arrival_times(system, n) interarrival_times = [[] for _ in range(len(system.sites))]", "j in range(n): if i < j: x = np.subtract(sites[i],", "event in events: drone_id = event[0] site_id = event[2] time", "= distance def get_site(self, site_id): return self.sites[site_id] def get_drone(self, drone_id):", "for event in events: drone_id = event[0] site_id = event[2]", "[] t = 0 for i in range(len(path) - 1):", "_generate_arrival_times(system, num_drones, length): arrival_times = [[] for _ in range(len(system.sites))]", "_ in range(len(system.sites))] events = [] for i in range(system):", "for i in range(system): pass events.extend(compute_arrival_times(path, i)) def get_key(item): return", "arrival_times[site_id].append((drone_id, time)) return arrival_times def compute_cost(system, n): arrival_times = generate_arrival_times(system,", "= event[2] time = event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times def", "!= t: site = categorical(P[site]) path.append(site) return path @staticmethod def", "= generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites, drones) def _compute_arrival_times(path, drone_id,", "np.random.choice(len(p), 1, p=p)[0] Drone = namedtuple('Drone', 'speed probability') Site =", "= categorical([1/num_sites]*num_sites) path.append(s) site = s for i in range(length):", "Drone = namedtuple('Drone', 'speed probability') Site = namedtuple('Site', 'location') class", "enumerate(drones): self.drones[i] = drone for i, site in enumerate(sites): self.sites[i]", "np import random from collections import namedtuple def generate_prob_matrix(n): matrix", "site in enumerate(sites): self.sites[i] = site distance = np.zeros([n, n])", "arrival_times = generate_arrival_times(system, n) interarrival_times = [[] for _ in", "path[i+1], t)) return arrival_times def _generate_arrival_times(system, num_drones, length): arrival_times =", "def categorical(p): return np.random.choice(len(p), 1, p=p)[0] Drone = namedtuple('Drone', 'speed", "1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t)) return", "in range(system): pass events.extend(compute_arrival_times(path, i)) def get_key(item): return item[3] events", "drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t)) return arrival_times def _generate_arrival_times(system, num_drones,", "namedtuple def generate_prob_matrix(n): matrix = np.random.rand(n, n) for i in", "= [] for i in range(k): speed = abs(random.random()) probability", "sites, speed): arrival_times = [] t = 0 for i", "generate_random_system(n, k): locations = np.random.rand(n, 2) sites = [] for", "in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs = [compute_average(i)", "[] P = self.get_drone(drone_id).probability num_sites = len(self.sites) s = categorical([1/num_sites]*num_sites)", "d distance[j][i] = d self.distance = distance def get_site(self, site_id):", "for i in range(k): speed = abs(random.random()) probability = generate_prob_matrix(n)", "return matrix def categorical(p): return np.random.choice(len(p), 1, p=p)[0] Drone =", "num_sites = len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s) site = s", "path.append(s) site = s for i in range(length): site =", "get_site(self, site_id): return self.sites[site_id] def get_drone(self, drone_id): return self.drones[drone_id] def", "1): d += self.distance[path[i]][path[i + 1]] return d def compute_path_time(self,", "arrival_times def compute_cost(system, n): arrival_times = generate_arrival_times(system, n) interarrival_times =", "i, drone in enumerate(drones): self.drones[i] = drone for i, site", "= np.random.rand(n, n) for i in range(n): matrix[i][i] = 0", "in enumerate(drones): self.drones[i] = drone for i, site in enumerate(sites):", "n = len(path) d = 0 for i in range(n", "enumerate(sites): self.sites[i] = site distance = np.zeros([n, n]) for i", "def __init__(self, sites, drones): self.sites = {} self.drones = {}", "range(len(arrival_times)): arrivals = arrival_times[i] for j in range(len(arrivals) - 1):", "pass events.extend(compute_arrival_times(path, i)) def get_key(item): return item[3] events = sorted(events,", "1]] return d def compute_path_time(self, path, drone_id): d = self.compute_path_distance(path)", "[[] for _ in range(len(system.sites))] events = [] for i", "site != t: site = categorical(P[site]) path.append(site) return path @staticmethod", "item[3] events = sorted(events, key=get_key) for event in events: drone_id", "= event[0] site_id = event[2] time = event[3] arrival_times[site_id].append((drone_id, time))", "matrix = np.random.rand(n, n) for i in range(n): matrix[i][i] =", "in enumerate(sites): self.sites[i] = site distance = np.zeros([n, n]) for", "path.append(site) return path def generate_path(self, s, t, drone_id): path =", "= [s] P = self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site) while", "site = categorical(P[s]) path.append(site) while site != t: site =", "arrivals = arrival_times[i] for j in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1]", "interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs = [compute_average(i) for i in interarrival_times]", "return path def generate_path(self, s, t, drone_id): path = [s]", "num_drones, length): arrival_times = [[] for _ in range(len(system.sites))] events", "len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s) site = s for i", "def generate_path(self, s, t, drone_id): path = [s] P =", "generate_prob_matrix(n): matrix = np.random.rand(n, n) for i in range(n): matrix[i][i]", "P = self.get_drone(drone_id).probability num_sites = len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s)", "= {} self.drones = {} n = len(sites) for i,", "[[] for _ in range(len(system.sites))] for i in range(len(arrival_times)): arrivals", "'location') class System: def __init__(self, sites, drones): self.sites = {}", "length): arrival_times = [[] for _ in range(len(system.sites))] events =", "generate_path_of_length(self, length, drone_id): path = [] P = self.get_drone(drone_id).probability num_sites", "np.zeros([n, n]) for i in range(n): for j in range(n):", "system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t)) return arrival_times def _generate_arrival_times(system,", "= self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id): path =", "i, site in enumerate(sites): self.sites[i] = site distance = np.zeros([n,", "site distance = np.zeros([n, n]) for i in range(n): for", "in range(len(system.sites))] events = [] for i in range(system): pass", "event[2] time = event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times def compute_cost(system,", "get_drone(self, drone_id): return self.drones[drone_id] def compute_path_distance(self, path): n = len(path)", "= [] t = 0 for i in range(len(path) -", "= categorical(P[site]) path.append(site) return path @staticmethod def generate_random_system(n, k): locations", "= namedtuple('Drone', 'speed probability') Site = namedtuple('Site', 'location') class System:", "i in range(n - 1): d += self.distance[path[i]][path[i + 1]]", "i in range(length): site = categorical(P[site]) path.append(site) return path def", "in range(k): speed = abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed, probability))", "i in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p):", "= len(sites) for i, drone in enumerate(drones): self.drones[i] = drone", "range(len(path) - 1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1],", "probability)) return System(sites, drones) def _compute_arrival_times(path, drone_id, sites, speed): arrival_times", "events.extend(compute_arrival_times(path, i)) def get_key(item): return item[3] events = sorted(events, key=get_key)", "path = [s] P = self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site)", "categorical(P[site]) path.append(site) return path @staticmethod def generate_random_system(n, k): locations =", "System(sites, drones) def _compute_arrival_times(path, drone_id, sites, speed): arrival_times = []", "{} n = len(sites) for i, drone in enumerate(drones): self.drones[i]", "for i, site in enumerate(sites): self.sites[i] = site distance =", "= [] P = self.get_drone(drone_id).probability num_sites = len(self.sites) s =", "self.sites = {} self.drones = {} n = len(sites) for", "site = s for i in range(length): site = categorical(P[site])", "in range(n): if i < j: x = np.subtract(sites[i], sites[j])", "- 1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t))", "namedtuple('Site', 'location') class System: def __init__(self, sites, drones): self.sites =", "site_id): return self.sites[site_id] def get_drone(self, drone_id): return self.drones[drone_id] def compute_path_distance(self,", "for i in range(n - 1): d += self.distance[path[i]][path[i +", "self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site) while site != t: site", "path.append(site) while site != t: site = categorical(P[site]) path.append(site) return", "np.random.rand(n, n) for i in range(n): matrix[i][i] = 0 for", "i in locations: sites.append(Site(i)) drones = [] for i in", "for i in range(n): for j in range(n): if i", "= 0 for i in range(n - 1): d +=", "np.random.rand(n, 2) sites = [] for i in locations: sites.append(Site(i))", "i in range(system): pass events.extend(compute_arrival_times(path, i)) def get_key(item): return item[3]", "return item[3] events = sorted(events, key=get_key) for event in events:", "probability = generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites, drones) def _compute_arrival_times(path,", "d def compute_path_time(self, path, drone_id): d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed", "def get_drone(self, drone_id): return self.drones[drone_id] def compute_path_distance(self, path): n =", "path): n = len(path) d = 0 for i in", "distance[i][j] = d distance[j][i] = d self.distance = distance def", "s for i in range(length): site = categorical(P[site]) path.append(site) return", "= len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s) site = s for", "generate_path(self, s, t, drone_id): path = [s] P = self.get_drone(drone_id).probability", "locations = np.random.rand(n, 2) sites = [] for i in", "path[i], path[i+1], t)) return arrival_times def _generate_arrival_times(system, num_drones, length): arrival_times", "drone_id): d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id):", "in events: drone_id = event[0] site_id = event[2] time =", "Site = namedtuple('Site', 'location') class System: def __init__(self, sites, drones):", "return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id): path = [] P", "- 1): d += self.distance[path[i]][path[i + 1]] return d def", "P = self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site) while site !=", "d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id): path = [] P =", "sites = [] for i in locations: sites.append(Site(i)) drones =", "= namedtuple('Site', 'location') class System: def __init__(self, sites, drones): self.sites", "in locations: sites.append(Site(i)) drones = [] for i in range(k):", "i in range(n): matrix[i][i] = 0 for i in range(n):", "matrix[i][i] = 0 for i in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i]", "for j in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs", "in range(n - 1): d += self.distance[path[i]][path[i + 1]] return", "d = np.linalg.norm(x) distance[i][j] = d distance[j][i] = d self.distance", "t)) return arrival_times def _generate_arrival_times(system, num_drones, length): arrival_times = [[]", "path, drone_id): d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length,", "range(n): matrix[i][i] = 0 for i in range(n): matrix[i] =", "def compute_path_time(self, path, drone_id): d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def", "length, drone_id): path = [] P = self.get_drone(drone_id).probability num_sites =", "matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p): return np.random.choice(len(p), 1,", "= np.zeros([n, n]) for i in range(n): for j in", "while site != t: site = categorical(P[site]) path.append(site) return path", "get_key(item): return item[3] events = sorted(events, key=get_key) for event in", "distance def get_site(self, site_id): return self.sites[site_id] def get_drone(self, drone_id): return", "drone in enumerate(drones): self.drones[i] = drone for i, site in", "n): arrival_times = generate_arrival_times(system, n) interarrival_times = [[] for _", "drone for i, site in enumerate(sites): self.sites[i] = site distance", "drone_id): path = [s] P = self.get_drone(drone_id).probability site = categorical(P[s])", "site_id = event[2] time = event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times", "s, t, drone_id): path = [s] P = self.get_drone(drone_id).probability site", "< j: x = np.subtract(sites[i], sites[j]) d = np.linalg.norm(x) distance[i][j]", "d = 0 for i in range(n - 1): d", "for i in interarrival_times] return max(interarrival_avgs) def compute_average(data): return (1/len(data))*sum(data)", "in range(n): for j in range(n): if i < j:", "j: x = np.subtract(sites[i], sites[j]) d = np.linalg.norm(x) distance[i][j] =", "categorical(P[site]) path.append(site) return path def generate_path(self, s, t, drone_id): path", "def generate_random_system(n, k): locations = np.random.rand(n, 2) sites = []", "p=p)[0] Drone = namedtuple('Drone', 'speed probability') Site = namedtuple('Site', 'location')", "sites, drones): self.sites = {} self.drones = {} n =", "locations: sites.append(Site(i)) drones = [] for i in range(k): speed", "abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites, drones) def", "drones): self.sites = {} self.drones = {} n = len(sites)", "(1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p): return np.random.choice(len(p), 1, p=p)[0] Drone", "n) for i in range(n): matrix[i][i] = 0 for i", "= [[] for _ in range(len(system.sites))] events = [] for", "range(k): speed = abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed, probability)) return", "range(len(system.sites))] for i in range(len(arrival_times)): arrivals = arrival_times[i] for j", "drones = [] for i in range(k): speed = abs(random.random())", "System: def __init__(self, sites, drones): self.sites = {} self.drones =", "in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p): return", "len(path) d = 0 for i in range(n - 1):", "drone_id, sites, speed): arrival_times = [] t = 0 for", "in range(length): site = categorical(P[site]) path.append(site) return path def generate_path(self,", "[] for i in locations: sites.append(Site(i)) drones = [] for", "interarrival_times = [[] for _ in range(len(system.sites))] for i in", "= self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site) while site != t:", "range(system): pass events.extend(compute_arrival_times(path, i)) def get_key(item): return item[3] events =", "path @staticmethod def generate_random_system(n, k): locations = np.random.rand(n, 2) sites", "site = categorical(P[site]) path.append(site) return path def generate_path(self, s, t,", "drones.append(Drone(speed, probability)) return System(sites, drones) def _compute_arrival_times(path, drone_id, sites, speed):", "= {} n = len(sites) for i, drone in enumerate(drones):", "for i in range(len(path) - 1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id)", "drone_id): return self.drones[drone_id] def compute_path_distance(self, path): n = len(path) d", "= [] for i in locations: sites.append(Site(i)) drones = []", "sites[j]) d = np.linalg.norm(x) distance[i][j] = d distance[j][i] = d", "import numpy as np import random from collections import namedtuple", "def generate_prob_matrix(n): matrix = np.random.rand(n, n) for i in range(n):", "random from collections import namedtuple def generate_prob_matrix(n): matrix = np.random.rand(n,", "range(length): site = categorical(P[site]) path.append(site) return path def generate_path(self, s,", "distance = np.zeros([n, n]) for i in range(n): for j", "drone_id): path = [] P = self.get_drone(drone_id).probability num_sites = len(self.sites)", "self.sites[i] = site distance = np.zeros([n, n]) for i in", "return d def compute_path_time(self, path, drone_id): d = self.compute_path_distance(path) return", "_ in range(len(system.sites))] for i in range(len(arrival_times)): arrivals = arrival_times[i]", "for j in range(n): if i < j: x =", "range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs = [compute_average(i) for", "time = event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times def compute_cost(system, n):", "- 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs = [compute_average(i) for i", "site = categorical(P[site]) path.append(site) return path @staticmethod def generate_random_system(n, k):", "speed = abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites,", "range(len(system.sites))] events = [] for i in range(system): pass events.extend(compute_arrival_times(path,", "= d distance[j][i] = d self.distance = distance def get_site(self,", "categorical(P[s]) path.append(site) while site != t: site = categorical(P[site]) path.append(site)", "self.sites[site_id] def get_drone(self, drone_id): return self.drones[drone_id] def compute_path_distance(self, path): n", "for _ in range(len(system.sites))] events = [] for i in", "import namedtuple def generate_prob_matrix(n): matrix = np.random.rand(n, n) for i", "def compute_path_distance(self, path): n = len(path) d = 0 for", "time)) return arrival_times def compute_cost(system, n): arrival_times = generate_arrival_times(system, n)", "arrival_times = [] t = 0 for i in range(len(path)", "= np.linalg.norm(x) distance[i][j] = d distance[j][i] = d self.distance =", "__init__(self, sites, drones): self.sites = {} self.drones = {} n", "= np.random.rand(n, 2) sites = [] for i in locations:", "= len(path) d = 0 for i in range(n -", "t = 0 for i in range(len(path) - 1): t", "np.subtract(sites[i], sites[j]) d = np.linalg.norm(x) distance[i][j] = d distance[j][i] =", "arrival_times[i] for j in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1])", "+ 1]] return d def compute_path_time(self, path, drone_id): d =", "return arrival_times def compute_cost(system, n): arrival_times = generate_arrival_times(system, n) interarrival_times", "path = [] P = self.get_drone(drone_id).probability num_sites = len(self.sites) s", "d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id): path", "t: site = categorical(P[site]) path.append(site) return path @staticmethod def generate_random_system(n,", "= (1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p): return np.random.choice(len(p), 1, p=p)[0]", "= categorical(P[s]) path.append(site) while site != t: site = categorical(P[site])", "= 0 for i in range(len(path) - 1): t +=", "for i in range(length): site = categorical(P[site]) path.append(site) return path", "= self.get_drone(drone_id).probability num_sites = len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s) site", "- arrivals[j][1]) interarrival_avgs = [compute_average(i) for i in interarrival_times] return", "from collections import namedtuple def generate_prob_matrix(n): matrix = np.random.rand(n, n)", "i)) def get_key(item): return item[3] events = sorted(events, key=get_key) for", "self.distance = distance def get_site(self, site_id): return self.sites[site_id] def get_drone(self,", "s = categorical([1/num_sites]*num_sites) path.append(s) site = s for i in", "+= self.distance[path[i]][path[i + 1]] return d def compute_path_time(self, path, drone_id):", "for i in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix def", "compute_path_distance(self, path): n = len(path) d = 0 for i", "range(n): if i < j: x = np.subtract(sites[i], sites[j]) d", "return arrival_times def _generate_arrival_times(system, num_drones, length): arrival_times = [[] for", "k): locations = np.random.rand(n, 2) sites = [] for i", "sorted(events, key=get_key) for event in events: drone_id = event[0] site_id", "i in range(k): speed = abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed,", "namedtuple('Drone', 'speed probability') Site = namedtuple('Site', 'location') class System: def", "drone_id = event[0] site_id = event[2] time = event[3] arrival_times[site_id].append((drone_id,", "n) interarrival_times = [[] for _ in range(len(system.sites))] for i", "i in range(len(path) - 1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id,", "def generate_path_of_length(self, length, drone_id): path = [] P = self.get_drone(drone_id).probability", "speed): arrival_times = [] t = 0 for i in", "generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites, drones) def _compute_arrival_times(path, drone_id, sites,", "self.drones[drone_id] def compute_path_distance(self, path): n = len(path) d = 0", "'speed probability') Site = namedtuple('Site', 'location') class System: def __init__(self,", "0 for i in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix", "class System: def __init__(self, sites, drones): self.sites = {} self.drones", "def _generate_arrival_times(system, num_drones, length): arrival_times = [[] for _ in" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "under the License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\" from orbit import actions", "# limitations under the License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\" from orbit", "for orbit.actions.conditional_action.\"\"\" from orbit import actions import tensorflow as tf", "Copyright 2022 The Orbit Authors. All Rights Reserved. # #", "distributed under the License is distributed on an \"AS IS\"", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "actions import tensorflow as tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): #", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "self.assertRaises(AssertionError) as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value': True}\") if __name__", "an AssertionError, since we can't in a lambda. def raise_assertion(arg):", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "the License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\" from orbit import actions import", "orbit.actions.conditional_action.\"\"\" from orbit import actions import tensorflow as tf class", "# Nothing is raised. with self.assertRaises(AssertionError) as ctx: conditional_action({'value': True})", "raised. with self.assertRaises(AssertionError) as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value': True}\")", "class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define a function to raise", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Orbit Authors. All Rights Reserved. # # Licensed under the", "2022 The Orbit Authors. All Rights Reserved. # # Licensed", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\" from orbit import actions import tensorflow", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "language governing permissions and # limitations under the License. \"\"\"Tests", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "limitations under the License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\" from orbit import", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value': False})", "License for the specific language governing permissions and # limitations", "Authors. All Rights Reserved. # # Licensed under the Apache", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "with self.assertRaises(AssertionError) as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value': True}\") if", "Reserved. # # Licensed under the Apache License, Version 2.0", "as tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define a function", "raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value':", "# Copyright 2022 The Orbit Authors. All Rights Reserved. #", "from orbit import actions import tensorflow as tf class ConditionalActionTest(tf.test.TestCase):", "the License for the specific language governing permissions and #", "import actions import tensorflow as tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self):", "= actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value': False}) # Nothing", "(the \"License\"); # you may not use this file except", "governing permissions and # limitations under the License. \"\"\"Tests for", "conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value': True}\") if __name__ == '__main__': tf.test.main()", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "Define a function to raise an AssertionError, since we can't", "either express or implied. # See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "The Orbit Authors. All Rights Reserved. # # Licensed under", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "False}) # Nothing is raised. with self.assertRaises(AssertionError) as ctx: conditional_action({'value':", "software # distributed under the License is distributed on an", "a function to raise an AssertionError, since we can't in", "can't in a lambda. def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action =", "permissions and # limitations under the License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\"", "# # Unless required by applicable law or agreed to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "since we can't in a lambda. def raise_assertion(arg): raise AssertionError(str(arg))", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "in a lambda. def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction(", "conditional_action({'value': False}) # Nothing is raised. with self.assertRaises(AssertionError) as ctx:", "conditional_action = actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value': False}) #", "Version 2.0 (the \"License\"); # you may not use this", "a lambda. def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda", "we can't in a lambda. def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action", "law or agreed to in writing, software # distributed under", "condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value': False}) # Nothing is raised.", "actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value': False}) # Nothing is", "x['value'], action=raise_assertion) conditional_action({'value': False}) # Nothing is raised. with self.assertRaises(AssertionError)", "AssertionError, since we can't in a lambda. def raise_assertion(arg): raise", "<filename>orbit/actions/conditional_action_test.py # Copyright 2022 The Orbit Authors. All Rights Reserved.", "implied. # See the License for the specific language governing", "ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define a function to raise an", "is raised. with self.assertRaises(AssertionError) as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value':", "under the Apache License, Version 2.0 (the \"License\"); # you", "tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define a function to", "\"License\"); # you may not use this file except in", "import tensorflow as tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define", "orbit import actions import tensorflow as tf class ConditionalActionTest(tf.test.TestCase): def", "tensorflow as tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define a", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "test_conditional_action(self): # Define a function to raise an AssertionError, since", "\"\"\"Tests for orbit.actions.conditional_action.\"\"\" from orbit import actions import tensorflow as", "lambda. def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda x:", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "def test_conditional_action(self): # Define a function to raise an AssertionError,", "and # limitations under the License. \"\"\"Tests for orbit.actions.conditional_action.\"\"\" from", "Nothing is raised. with self.assertRaises(AssertionError) as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message,", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value': True}\") if __name__ ==", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "raise an AssertionError, since we can't in a lambda. def", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "function to raise an AssertionError, since we can't in a", "action=raise_assertion) conditional_action({'value': False}) # Nothing is raised. with self.assertRaises(AssertionError) as", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "x: x['value'], action=raise_assertion) conditional_action({'value': False}) # Nothing is raised. with", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "raise_assertion(arg): raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion)", "with the License. # You may obtain a copy of", "to raise an AssertionError, since we can't in a lambda.", "this file except in compliance with the License. # You", "def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda x: x['value'],", "the Apache License, Version 2.0 (the \"License\"); # you may", "ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, \"{'value': True}\") if __name__ == '__main__':", "# Define a function to raise an AssertionError, since we" ]
[ "<EMAIL> # 614 692 2050 # # 04/22/2018 Original Construction", "def __init__(self): self.output = [] self.status = STATUS_NOT_EXECUTED def execute(self,", "# # <NAME> # <EMAIL> # 614 692 2050 #", "__init__(self): self.output = [] self.status = STATUS_NOT_EXECUTED def execute(self, cli):", "692 2050 # # 04/22/2018 Original Construction ################################################################################ import traceback", "= [] self.status = STATUS_NOT_EXECUTED def execute(self, cli): try: keys", "import traceback import json class Task: def __init__(self): self.output =", "import json class Task: def __init__(self): self.output = [] self.status", "# <NAME> # <EMAIL> # 614 692 2050 # #", "= 4)) except Exception: self.status = STATUS_EXCEPTION self.output.append(traceback.format_exc()) return self.status", "key in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent = 4)) except Exception:", "# DOCUMENTS # # <NAME> # <EMAIL> # 614 692", "try: keys = cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS for key in", "self.status = STATUS_SUCCESS for key in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent", "# 614 692 2050 # # 04/22/2018 Original Construction ################################################################################", "STATUS_NOT_EXECUTED def execute(self, cli): try: keys = cli.AGTCollections(\"tags\") self.status =", "Original Construction ################################################################################ import traceback import json class Task: def", "in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent = 4)) except Exception: self.status", "# <EMAIL> # 614 692 2050 # # 04/22/2018 Original", "################################################################################ # DOCUMENTS # # <NAME> # <EMAIL> # 614", "# 04/22/2018 Original Construction ################################################################################ import traceback import json class", "Task: def __init__(self): self.output = [] self.status = STATUS_NOT_EXECUTED def", "[] self.status = STATUS_NOT_EXECUTED def execute(self, cli): try: keys =", "#key.set() self.output.append(json.dumps(key.object, indent = 4)) except Exception: self.status = STATUS_EXCEPTION", "Construction ################################################################################ import traceback import json class Task: def __init__(self):", "self.status = STATUS_NOT_EXECUTED def execute(self, cli): try: keys = cli.AGTCollections(\"tags\")", "cli): try: keys = cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS for key", "self.output.append(json.dumps(key.object, indent = 4)) except Exception: self.status = STATUS_EXCEPTION self.output.append(traceback.format_exc())", "for key in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent = 4)) except", "STATUS_SUCCESS for key in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent = 4))", "614 692 2050 # # 04/22/2018 Original Construction ################################################################################ import", "cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS for key in keys.find(): #key.set() self.output.append(json.dumps(key.object,", "################################################################################ import traceback import json class Task: def __init__(self): self.output", "= STATUS_SUCCESS for key in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent =", "= STATUS_NOT_EXECUTED def execute(self, cli): try: keys = cli.AGTCollections(\"tags\") self.status", "# # 04/22/2018 Original Construction ################################################################################ import traceback import json", "json class Task: def __init__(self): self.output = [] self.status =", "04/22/2018 Original Construction ################################################################################ import traceback import json class Task:", "self.output = [] self.status = STATUS_NOT_EXECUTED def execute(self, cli): try:", "keys = cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS for key in keys.find():", "keys.find(): #key.set() self.output.append(json.dumps(key.object, indent = 4)) except Exception: self.status =", "indent = 4)) except Exception: self.status = STATUS_EXCEPTION self.output.append(traceback.format_exc()) return", "execute(self, cli): try: keys = cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS for", "<reponame>phnomcobra/valarie-content #!/usr/bin/python ################################################################################ # DOCUMENTS # # <NAME> # <EMAIL>", "def execute(self, cli): try: keys = cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS", "<NAME> # <EMAIL> # 614 692 2050 # # 04/22/2018", "DOCUMENTS # # <NAME> # <EMAIL> # 614 692 2050", "2050 # # 04/22/2018 Original Construction ################################################################################ import traceback import", "#!/usr/bin/python ################################################################################ # DOCUMENTS # # <NAME> # <EMAIL> #", "class Task: def __init__(self): self.output = [] self.status = STATUS_NOT_EXECUTED", "= cli.AGTCollections(\"tags\") self.status = STATUS_SUCCESS for key in keys.find(): #key.set()", "traceback import json class Task: def __init__(self): self.output = []" ]
[ "sigma = 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes", "hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg,", "if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell \"", "for c in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc')", "seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift # change me", "None sizes = [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for size in", "print \"accumulate cell \" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1])", "size in sizes: for scaling in scalings: size = int", "histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]): #", "numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) # pylab.show() # # print \"hist\",hist.shape", "size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels)", "= vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]): # #print c #", "[5,10,15] for size in sizes: for scaling in scalings: size", "#print c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) # pylab.show()", "1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes = [3,4,5,8,10,15,20,25,40,100]", "# pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) # pylab.show() # #", "from seglib.preprocessing import norm01 from seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift", "cell \" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for c", "seglib.preprocessing import norm01 import seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel as", "scaling in scalings: size = int (size) scaling = float(scaling)", "norm01 import seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel as rdp from", "= numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes = [3,4,5,8,10,15,20,25,40,100] scalings =", "sizes: for scaling in scalings: size = int (size) scaling", "jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift # change me to your", "= int (size) scaling = float(scaling) print size,scaling labels ,nseg=", "= hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32)", "c in range(histImg.shape[2]): # #print c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c])", "= float(scaling) print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64)", "print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid =", "in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg =", "in range(histImg.shape[2]): # #print c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1)", "labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig is None:", "as edp import seglib.region_descriptors.pixel as rdp from seglib.preprocessing import norm01", "import cgp2d from seglib.preprocessing import norm01 import seglib.edge_detectors.pixel as edp", "vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]): # #print c # pylab.imshow(", "to your path img = \"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount", "seglib.region_descriptors.pixel as rdp from seglib.preprocessing import norm01 from seglib.histogram import", "= 30 sigma = 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig =", "change me to your path img = \"img/text.jpg\" img =", "range(histImg.shape[2]): # #print c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) )", "+= (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c", "from seglib.preprocessing import norm01 import seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel", "denseSift # change me to your path img = \"img/text.jpg\"", "norm01(histImg[:,:,c]) ,0,1) ) # pylab.show() # # print \"hist\",hist.shape imgdt", "= vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape)", "histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes = [3,4,5,8,10,15,20,25,40,100] scalings", "for scaling in scalings: size = int (size) scaling =", "from seglib import cgp2d from seglib.preprocessing import norm01 import seglib.edge_detectors.pixel", "# #print c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) #", "img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma = 1.5 histImg", "= 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes =", "None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell \" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma)", "cgp2d.cgpFromLabels(labels) if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell", "import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift # change me to", "import seglib.region_descriptors.pixel as rdp from seglib.preprocessing import norm01 from seglib.histogram", "= numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma = 1.5 histImg =", "is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell \" hist =", "numpy import pylab from seglib import cgp2d from seglib.preprocessing import", "path img = \"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30", "scalings = [5,10,15] for size in sizes: for scaling in", "size = int (size) scaling = float(scaling) print size,scaling labels", "seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel as rdp from seglib.preprocessing import", "hist = hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False)", "\" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for c in", "= cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]): histImg[:,:,c]", ",0,1) ) # pylab.show() # # print \"hist\",hist.shape imgdt =", "# pylab.show() # # print \"hist\",hist.shape imgdt = rdp.deepDetexturize(srcImg=img,img=histImg,nIteration=10, nCluster=10,reductionAlg='pca',nldEdgeThreshold=10.0,nldScale=10.0,distance=None)#'cityblock')", "import denseSift # change me to your path img =", "imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell \" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist", "seglib import cgp2d from seglib.preprocessing import norm01 import seglib.edge_detectors.pixel as", "histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]):", ",nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig", "numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes = [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15]", "in sizes: for scaling in scalings: size = int (size)", "pylab from seglib import cgp2d from seglib.preprocessing import norm01 import", "cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print", "range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0)", "seglib.preprocessing import norm01 from seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import", "seglib.region_descriptors.pixel.sift import denseSift # change me to your path img", "vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig is", ") # pylab.show() # # print \"hist\",hist.shape imgdt = rdp.deepDetexturize(srcImg=img,img=histImg,nIteration=10,", "from seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift # change", "import pylab from seglib import cgp2d from seglib.preprocessing import norm01", "norm01 from seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift #", "(size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in", "import vigra import numpy import pylab from seglib import cgp2d", "scaling = float(scaling) print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels =", "float(scaling) print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid", "= None sizes = [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for size", "for size in sizes: for scaling in scalings: size =", "labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if", "\"accumulate cell \" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for", "cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]): histImg[:,:,c] +=", "cgp2d from seglib.preprocessing import norm01 import seglib.edge_detectors.pixel as edp import", "edp import seglib.region_descriptors.pixel as rdp from seglib.preprocessing import norm01 from", "numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma = 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,))", "in scalings: size = int (size) scaling = float(scaling) print", "30 sigma = 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None", "import numpy import pylab from seglib import cgp2d from seglib.preprocessing", "pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) # pylab.show() # # print", "# change me to your path img = \"img/text.jpg\" img", "= \"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma =", "binCount = 30 sigma = 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig", "your path img = \"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount =", "c in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg", "import norm01 from seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift", "vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp)", "c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) # pylab.show() #", "from seglib.region_descriptors.pixel.sift import denseSift # change me to your path", "import seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel as rdp from seglib.preprocessing", "[3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for size in sizes: for scaling", "int (size) scaling = float(scaling) print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size)", "vigra import numpy import pylab from seglib import cgp2d from", "= [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for size in sizes: for", "hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]):", "rdp from seglib.preprocessing import norm01 from seglib.histogram import jointHistogram,histogram from", "img = \"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma", "\"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma = 1.5", "scalings: size = int (size) scaling = float(scaling) print size,scaling", "= cgp2d.cgpFromLabels(labels) if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate", "#for c in range(histImg.shape[2]): # #print c # pylab.imshow( numpy.swapaxes(", "me to your path img = \"img/text.jpg\" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:]", "as rdp from seglib.preprocessing import norm01 from seglib.histogram import jointHistogram,histogram", "sizes = [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for size in sizes:", "histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]): # #print c", "import norm01 import seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel as rdp", "histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for", "#cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell \" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist =", "= [5,10,15] for size in sizes: for scaling in scalings:", "'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]): # #print", "(size) scaling = float(scaling) print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels", "imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print \"accumulate cell \" hist", "imgBig = None sizes = [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for" ]
[ "def user_directory_path(instance, filename): # file will be uploaded to MEDIA_ROOT/user_<id>/<filename>", "null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def __str__(self): return self.uploaded_file.name", "datetime import date from django.conf import settings from django.db import", "uploaded to MEDIA_ROOT/user_<id>/<filename> today = date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\"))", "from django.conf import settings from django.db import models # Create", "null=True) date_uploaded = models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)", "today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name =", "file_name = models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True, null=True) user =", "models here. def user_directory_path(instance, filename): # file will be uploaded", "from datetime import date from django.conf import settings from django.db", "# Create your models here. def user_directory_path(instance, filename): # file", "MEDIA_ROOT/user_<id>/<filename> today = date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model):", "django.db import models # Create your models here. def user_directory_path(instance,", "filename): # file will be uploaded to MEDIA_ROOT/user_<id>/<filename> today =", "be uploaded to MEDIA_ROOT/user_<id>/<filename> today = date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename,", "= models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True) date_uploaded =", "today = date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file", "your models here. def user_directory_path(instance, filename): # file will be", "upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True, null=True) user", "import date from django.conf import settings from django.db import models", "to MEDIA_ROOT/user_<id>/<filename> today = date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class", "import models # Create your models here. def user_directory_path(instance, filename):", "filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name", "date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file = models.FileField(null=True,", "models # Create your models here. def user_directory_path(instance, filename): #", "will be uploaded to MEDIA_ROOT/user_<id>/<filename> today = date.today() return '{0}/{2}/{1}'.format(instance.user.username,", "file will be uploaded to MEDIA_ROOT/user_<id>/<filename> today = date.today() return", "user_directory_path(instance, filename): # file will be uploaded to MEDIA_ROOT/user_<id>/<filename> today", "'{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path)", "blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True, null=True)", "models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True,", "= models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def __str__(self):", "django.conf import settings from django.db import models # Create your", "date_uploaded = models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def", "# file will be uploaded to MEDIA_ROOT/user_<id>/<filename> today = date.today()", "Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True)", "models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def __str__(self): return", "return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True,", "from django.db import models # Create your models here. def", "import settings from django.db import models # Create your models", "= models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL,", "<reponame>helwete/simple-backup<filename>backup/models.py from datetime import date from django.conf import settings from", "date from django.conf import settings from django.db import models #", "Create your models here. def user_directory_path(instance, filename): # file will", "class Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255,", "= date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime(\"%Y/%m/%d/\")) class Upload(models.Model): uploaded_file =", "uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True) date_uploaded", "models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,", "settings from django.db import models # Create your models here.", "here. def user_directory_path(instance, filename): # file will be uploaded to" ]
[ "引入方法 import Kmeans_AnFany as K_Af # AnFany import Kmeans_Sklearn as", "skru, titl='Sklearn 结果') plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany", "Center: if du: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点", "# 调用不同的方法 # AnFany kresult = K_Af.op_kmeans(X, countcen=6) # Sklearn", "typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio = 0", "# 计算最终的分类结果的成本值 def Cost(Xdata, typedict): center = {} for kk", "n_features=2) # 绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k',", "plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample, center): cen = np.array([center]) sample", "plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))", "设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import numpy as np # 利用sklearn生成数据集", "import Kmeans_Sklearn as K_Sk # Sklearn import matplotlib.pyplot as plt", "Y = make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图 def fig_scatter(exdata, eydata,", "titl = 'AnFany 结果'): du = 1 for jj in", "- cen) ** 2, axis=1) ** 0.5 return usb else:", "** 0.5 return usb else: return 0 # 计算最终的分类结果的成本值 def", "return 0 # 计算最终的分类结果的成本值 def Cost(Xdata, typedict): center = {}", "Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train = sk.fit(X) result", "均值 cio = 0 for cc in typedict: cio +=", "K_Af # AnFany import Kmeans_Sklearn as K_Sk # Sklearn import", "# 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import numpy as np #", "= sk.fit(X) result = sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def", "0 else: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2,", "# 绘制样本散点图 for ss in Center: if du: plt.scatter(ss[0], ss[1],", "2) sca(X, kresult[0], kresult[2]) plt.subplot(2, 2, 3) sca(X, train.cluster_centers_, skru,", "#-*- coding:utf-8 -*- # &Author AnFany # 引入方法 import Kmeans_AnFany", "typeclass = sorted(list(set(eydata))) for ii in range(len(typeclass)): datax = exdata[eydata", "cc in typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio #", "axis=1) ** 0.5 return usb else: return 0 # 计算最终的分类结果的成本值", "作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import", "range(len(typeclass)): datax = exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1],", "'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'): du =", "cen) ** 2, axis=1) ** 0.5 return usb else: return", "signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl =", "dis(sample, center): cen = np.array([center]) sample = np.array(sample) if len(sample)", "for cc in typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio", "train = sk.fit(X) result = sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图", "for ii in range(len(typeclass)): datax = exdata[eydata == typeclass[ii]] plt.scatter(datax[:,", "1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample, center): cen", "# AnFany kresult = K_Af.op_kmeans(X, countcen=6) # Sklearn sk =", "2, axis=1) ** 0.5 return usb else: return 0 #", "else: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1))", "plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 # AnFany kresult = K_Af.op_kmeans(X, countcen=6)", "= 1 for jj in signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:,", "center = {} for kk in typedict: center[kk] = np.mean(Xdata[typedict[kk]],", "make_blobs X, Y = make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图 def", "'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'): du = 1", "cio = 0 for cc in typedict: cio += np.sum(dis(Xdata[typedict[cc]],", "kresult[0], kresult[2]) plt.subplot(2, 2, 3) sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')", "coding:utf-8 -*- # &Author AnFany # 引入方法 import Kmeans_AnFany as", "ss[1], c='k', s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1')", "绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y',", "Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' %", "&Author AnFany # 引入方法 import Kmeans_AnFany as K_Af # AnFany", "mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import numpy", "['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import numpy as np", "= sorted(list(set(eydata))) for ii in range(len(typeclass)): datax = exdata[eydata ==", "du = 0 else: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') #", "def dis(sample, center): cen = np.array([center]) sample = np.array(sample) if", "typedict): center = {} for kk in typedict: center[kk] =", "AnFany import Kmeans_Sklearn as K_Sk # Sklearn import matplotlib.pyplot as", "# 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False", "plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample, center):", "numpy as np # 利用sklearn生成数据集 from sklearn.datasets import make_blobs X,", "plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X,", "from pylab import mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] #", "K_Af.op_kmeans(X, countcen=6) # Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train", "def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'],", "= K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train = sk.fit(X) result = sk.predict(X)", "n_clusters=6, n_init=10) train = sk.fit(X) result = sk.predict(X) skru =", "kresult[2]) plt.subplot(2, 2, 3) sca(X, train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2,", "Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl", "eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass", "利用sklearn生成数据集 from sklearn.datasets import make_blobs X, Y = make_blobs(n_samples=600, centers=6,", "# 定义欧几里得距离 def dis(sample, center): cen = np.array([center]) sample =", "jj) # 绘制样本散点图 for ss in Center: if du: plt.scatter(ss[0],", "kresult = K_Af.op_kmeans(X, countcen=6) # Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6,", "kk in typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio", "return cio # 最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X, Y) plt.subplot(2,", "as np # 利用sklearn生成数据集 from sklearn.datasets import make_blobs X, Y", "0], datax[:, -1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i", "= K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center, signdict, co=['r', 'g', 'y',", "for ss in Center: if du: plt.scatter(ss[0], ss[1], c='k', s=100,", "np.array(sample) if len(sample) != 0: usb = np.sum((sample - cen)", "import Kmeans_AnFany as K_Af # AnFany import Kmeans_Sklearn as K_Sk", "marker=marker[jj], label='%d类' % jj) # 绘制样本散点图 for ss in Center:", "typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 # AnFany kresult", "定义欧几里得距离 def dis(sample, center): cen = np.array([center]) sample = np.array(sample)", "-*- # &Author AnFany # 引入方法 import Kmeans_AnFany as K_Af", "skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center, signdict, co=['r', 'g',", "plt.subplot(2, 2, 3) sca(X, train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2, 2,", "sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'],", "plt.subplot(2, 2, 2) sca(X, kresult[0], kresult[2]) plt.subplot(2, 2, 3) sca(X,", "结果') plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))", "# Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train = sk.fit(X)", "# AnFany import Kmeans_Sklearn as K_Sk # Sklearn import matplotlib.pyplot", "调用不同的方法 # AnFany kresult = K_Af.op_kmeans(X, countcen=6) # Sklearn sk", "if du: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点 du", "signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50,", "for i in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法", "cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio # 最终的结果展示 plt.subplot(2, 2,", "1 for jj in signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0],", "1) fig_scatter(X, Y) plt.subplot(2, 2, 2) sca(X, kresult[0], kresult[2]) plt.subplot(2,", "0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru)) plt.show()", "X, Y = make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图 def fig_scatter(exdata,", "co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata)))", "'AnFany 结果'): du = 1 for jj in signdict: xdata", "# 均值 cio = 0 for cc in typedict: cio", "3) sca(X, train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2, 2, 4) plt.axis('off')", "jj in signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1],", "= np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio = 0 for cc", "from sklearn.datasets import make_blobs X, Y = make_blobs(n_samples=600, centers=6, n_features=2)", "'m'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'): du = 1 for", "titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass =", "Cost(Xdata, typedict): center = {} for kk in typedict: center[kk]", "else: return 0 # 计算最终的分类结果的成本值 def Cost(Xdata, typedict): center =", "plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl)", "绘制样本散点图 for ss in Center: if du: plt.scatter(ss[0], ss[1], c='k',", "= 0 else: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点", "最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X, Y) plt.subplot(2, 2, 2) sca(X,", "= 0 for cc in typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))", "plt.subplot(2, 2, 1) fig_scatter(X, Y) plt.subplot(2, 2, 2) sca(X, kresult[0],", "AnFany kresult = K_Af.op_kmeans(X, countcen=6) # Sklearn sk = K_Sk.KMeans(init='k-means++',", "= Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类'", "0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) #", "#绘制类别中心点 du = 0 else: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8')", "in typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio =", "+= np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio # 最终的结果展示 plt.subplot(2, 2, 1)", "2, 3) sca(X, train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2, 2, 4)", "titl='Sklearn 结果') plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X,", "Kmeans_AnFany as K_Af # AnFany import Kmeans_Sklearn as K_Sk #", "c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2,", "as plt from pylab import mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] =", "marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for ii in range(len(typeclass)): datax =", "cio # 最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X, Y) plt.subplot(2, 2,", "sk.fit(X) result = sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata,", "usb else: return 0 # 计算最终的分类结果的成本值 def Cost(Xdata, typedict): center", "du = 1 for jj in signdict: xdata = Xdata[signdict[jj]]", "# 利用sklearn生成数据集 from sklearn.datasets import make_blobs X, Y = make_blobs(n_samples=600,", "label='%d类' % jj) # 绘制样本散点图 for ss in Center: if", "ii in range(len(typeclass)): datax = exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0],", "s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图 for ss in", "# 最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X, Y) plt.subplot(2, 2, 2)", "center[cc])) return cio # 最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X, Y)", "plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample, center): cen = np.array([center])", "plt from pylab import mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong']", "!= 0: usb = np.sum((sample - cen) ** 2, axis=1)", "in Center: if du: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心')", "cen = np.array([center]) sample = np.array(sample) if len(sample) != 0:", "as K_Sk # Sklearn import matplotlib.pyplot as plt from pylab", "sample = np.array(sample) if len(sample) != 0: usb = np.sum((sample", "# &Author AnFany # 引入方法 import Kmeans_AnFany as K_Af #", "= 'AnFany 结果'): du = 1 for jj in signdict:", "== typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl)", "** 2, axis=1) ** 0.5 return usb else: return 0", "K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b',", "for jj in signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:,", "typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio # 最终的结果展示 plt.subplot(2,", "4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3, 0.3, 'Sklearn", "datax[:, -1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i in", "countcen=6) # Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train =", "for kk in typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值", "False import numpy as np # 利用sklearn生成数据集 from sklearn.datasets import", "c='k', s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2')", "= exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50,", "sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train = sk.fit(X) result =", "'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):", "center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio = 0 for", "sklearn.datasets import make_blobs X, Y = make_blobs(n_samples=600, centers=6, n_features=2) #", "import make_blobs X, Y = make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图", "plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点 du = 0", "label='类别中心') #绘制类别中心点 du = 0 else: plt.scatter(ss[0], ss[1], c='k', s=100,", "np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio # 最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X,", "= np.sum((sample - cen) ** 2, axis=1) ** 0.5 return", "plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj)", "2, 4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3, 0.3,", "fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):", "mpl.rcParams['axes.unicode_minus'] = False import numpy as np # 利用sklearn生成数据集 from", "# Sklearn import matplotlib.pyplot as plt from pylab import mpl", "import numpy as np # 利用sklearn生成数据集 from sklearn.datasets import make_blobs", "c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图 for ss", "= np.array([center]) sample = np.array(sample) if len(sample) != 0: usb", "np.sum((sample - cen) ** 2, axis=1) ** 0.5 return usb", "= np.array(sample) if len(sample) != 0: usb = np.sum((sample -", "marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离", "{} for kk in typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0) #", "Sklearn import matplotlib.pyplot as plt from pylab import mpl #", "plt.title(titl) #plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2')", "usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5", "% jj) # 绘制样本散点图 for ss in Center: if du:", "plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample, center): cen =", "co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany", "marker='8', label='类别中心') #绘制类别中心点 du = 0 else: plt.scatter(ss[0], ss[1], c='k',", "K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train = sk.fit(X) result = sk.predict(X) skru", "= ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import numpy as", "in range(len(typeclass)): datax = exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0], datax[:,", "np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio = 0 for cc in", "'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for ii", "# 绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b',", "0: usb = np.sum((sample - cen) ** 2, axis=1) **", "plt.ylabel('特征2') # 调用不同的方法 # AnFany kresult = K_Af.op_kmeans(X, countcen=6) #", "np.array([center]) sample = np.array(sample) if len(sample) != 0: usb =", "make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r',", "Kmeans_Sklearn as K_Sk # Sklearn import matplotlib.pyplot as plt from", "#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') #", "typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i", "i in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 #", "= K_Af.op_kmeans(X, countcen=6) # Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)", "in signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj],", "train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3, 0.6,", "import matplotlib.pyplot as plt from pylab import mpl # 作图显示中文", "matplotlib.pyplot as plt from pylab import mpl # 作图显示中文 mpl.rcParams['font.sans-serif']", "= {} for kk in typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0)", "xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图", "2, 2) sca(X, kresult[0], kresult[2]) plt.subplot(2, 2, 3) sca(X, train.cluster_centers_,", "计算最终的分类结果的成本值 def Cost(Xdata, typedict): center = {} for kk in", "= make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图',", "Y) plt.subplot(2, 2, 2) sca(X, kresult[0], kresult[2]) plt.subplot(2, 2, 3)", "len(sample) != 0: usb = np.sum((sample - cen) ** 2,", "du: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点 du =", "as K_Af # AnFany import Kmeans_Sklearn as K_Sk # Sklearn", "'y', 'm'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for ii in range(len(typeclass)):", "in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 # AnFany", "axis=0) # 均值 cio = 0 for cc in typedict:", "sorted(list(set(eydata))) for ii in range(len(typeclass)): datax = exdata[eydata == typeclass[ii]]", "# 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def", "in typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio # 最终的结果展示", "2, 1) fig_scatter(X, Y) plt.subplot(2, 2, 2) sca(X, kresult[0], kresult[2])", "def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'],", "n_init=10) train = sk.fit(X) result = sk.predict(X) skru = K_Sk.trans(result)", "= False import numpy as np # 利用sklearn生成数据集 from sklearn.datasets", "datax = exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1], c=co[ii],", "xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj],", "np # 利用sklearn生成数据集 from sklearn.datasets import make_blobs X, Y =", "#绘制算法后的类别的散点图 def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c',", "ss in Center: if du: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8',", "结果'): du = 1 for jj in signdict: xdata =", "0.5 return usb else: return 0 # 计算最终的分类结果的成本值 def Cost(Xdata,", "0 for cc in typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return", "plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for", "sca(X, kresult[0], kresult[2]) plt.subplot(2, 2, 3) sca(X, train.cluster_centers_, skru, titl='Sklearn", "sca(X, train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3,", "'m'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for ii in range(len(typeclass)): datax", "center): cen = np.array([center]) sample = np.array(sample) if len(sample) !=", "exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])", "'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for", "# 引入方法 import Kmeans_AnFany as K_Af # AnFany import Kmeans_Sklearn", "K_Sk # Sklearn import matplotlib.pyplot as plt from pylab import", "'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'): du", "s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))", "c='k', s=100, marker='8', label='类别中心') #绘制类别中心点 du = 0 else: plt.scatter(ss[0],", "AnFany # 引入方法 import Kmeans_AnFany as K_Af # AnFany import", "marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'): du = 1 for jj", "-1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图 for", "'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for ii in", "s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') #", "= sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center, signdict,", "0 # 计算最终的分类结果的成本值 def Cost(Xdata, typedict): center = {} for", "-1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i in typeclass],", "marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1')", "mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] =", "s=100, marker='8', label='类别中心') #绘制类别中心点 du = 0 else: plt.scatter(ss[0], ss[1],", "return usb else: return 0 # 计算最终的分类结果的成本值 def Cost(Xdata, typedict):", "0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 # AnFany kresult = K_Af.op_kmeans(X,", "result = sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center,", "ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点 du = 0 else:", "绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample,", "centers=6, n_features=2) # 绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g',", "plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3,", "bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 # AnFany kresult =", "if len(sample) != 0: usb = np.sum((sample - cen) **", "pylab import mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体", "fig_scatter(X, Y) plt.subplot(2, 2, 2) sca(X, kresult[0], kresult[2]) plt.subplot(2, 2,", "def Cost(Xdata, typedict): center = {} for kk in typedict:", "sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center, signdict, co=['r',", "import mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus']" ]
[ "size:\\n\")) if(dir == 'x'): x += step elif(dir == 'y'):", "current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) elif(cmd", "while(b_n not in [1,2,3,4,5,6]): b_n = int(raw_input(\"Enter bottle number from", "from delete_models import delete_all, delete_model def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial()", "elif(cmd == 'pgr'): # print gripper postiion pos = robot.get_gripper_pose()", "str(bottle.split('_')[1]) + \" was spawned\") elif(cmd == 'exit'): # exit", "robot.get_gripper_pose() print(\"Current gripper coordinates: \" + str(pos)) elif(cmd == 'parm'):", "'spawn'): spawn_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \" was spawned\")", "t2 = raw_input(\"Enter theta_2: \") t3 = raw_input(\"Enter theta_3: \")", "robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects))", "float(raw_input(\"Enter step size:\\n\")) if(dir == 'x'): x += step elif(dir", "not in ['x','y','z']): dir = raw_input(\"Enter coord: x,y or z:\\n\")", "'gtc'): # go to cup # simulation x,y,z = get_object_position('cup_1')", "on pouring angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0)", "setj, att, box,\\n del, dela, spawn, exit:\\n\") if(cmd == 'open'):", "print gripper postiion pos = robot.get_gripper_pose() print(\"Current gripper coordinates: \"", "+ str(pos)) elif(cmd == 'pj'): # print arm joints current_joints", "!= ''): joints[3] = float(t3) joints[4] = 0 robot.set_joints(joints) elif(cmd", "from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models from delete_models import", "bottle b_n = int(raw_input(\"Enter bottle number from 1 to 6\\n\"))", "import time from constants import * from spawn_models import reset_model_position,", "0) elif(cmd == 'cb'): # change bottle b_n = int(raw_input(\"Enter", "raw_input(\"Enter theta_3: \") if(t1 != ''): joints[1] = float(t1) if(t2", "was deleted\") elif(cmd == 'dela'): delete_all() print(\"All models were deleted\")", "get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) elif(cmd == 'rb'):", "6\\n\")) while(b_n not in [1,2,3,4,5,6]): b_n = int(raw_input(\"Enter bottle number", "command:\\n open, close, init,\\n gtb, hover, gtc, move,\\n pour, cb,", "spawn_model, spawn_all_models from delete_models import delete_all, delete_model def control_panel(): robot", "\"\"\" import pddl_solver as pddl import ik import rospy from", "coord: x,y or z:\\n\") while(dir not in ['x','y','z']): dir =", "str(attached_objects)) elif(cmd == 'box'): robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached", "import get_object_position import time from constants import * from spawn_models", "import rospy from get_object_position import get_object_position import time from constants", "positions reset_all() elif(cmd == 'pgr'): # print gripper postiion pos", "current_bottle_orig_pos[-1] += BZS while(True): print() cmd = raw_input(\"Enter command:\\n open,", "pose robot.go_to_init_state() elif(cmd == 'gtb'): # go to bottle x,y,z", "!= ''): joints[1] = float(t1) if(t2 != ''): joints[2] =", "theta_0\")) # We don't want to change the arm direction", "= 0) elif(cmd == 'cb'): # change bottle b_n =", "z:\\n\") step = float(raw_input(\"Enter step size:\\n\")) if(dir == 'x'): x", "Real_world_PourPos[cup] # x,y,z = pos robot.go_to_xyz(x, y, CUO) elif(cmd ==", "if(t2 != ''): joints[2] = float(t2) if(t3 != ''): joints[3]", "= float(t2) if(t3 != ''): joints[3] = float(t3) joints[4] =", "'init'): # go to initial pose robot.go_to_init_state() elif(cmd == 'gtb'):", "x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd == 'gtc'): #", "= int(raw_input(\"Enter bottle number from 1 to 6\\n\")) while(b_n not", "\") t2 = raw_input(\"Enter theta_2: \") t3 = raw_input(\"Enter theta_3:", "hover, gtc, move,\\n pour, cb, rb, ra,\\n pgr, parm, pj,\\n", "from 1 to 6\\n\")) bottle = 'bottle_' + str(b_n) #", "goal = float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\")) robot.close_gripper(goal)", "set robot joint angles joints = robot.get_arm_joints() # joints[0] =", "gripper on pouring angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle =", "'parm'): # print arm postiion pos = robot.get_arm_pose() print(\"Current arm", "number from 1 to 6\\n\")) bottle = 'bottle_' + str(b_n)", "float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd ==", "= float(raw_input(\"Enter step size:\\n\")) if(dir == 'x'): x += step", "-0.12): goal = float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\"))", "x,y,z = robot.get_arm_pose() dir = raw_input(\"Enter coord: x,y or z:\\n\")", "# go to cup x,y,z = robot.get_arm_pose() dir = raw_input(\"Enter", "= Real_world_PourPos[cup] # x,y,z = pos robot.go_to_xyz(x, y, CUO) elif(cmd", "\") if(h == \"\"): h = BZS else: h =", "over the bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd", "def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper() bottle", "+ h) elif(cmd == 'hover'): # hover over the bottle", "robot.open_gripper() elif(cmd == 'close'): # close the gripper goal =", "= raw_input(\"Set z level: \") if(h == \"\"): h =", "box,\\n del, dela, spawn, exit:\\n\") if(cmd == 'open'): # open", "'y'): y += step elif(dir == 'z'): z += step", "gripper postiion pos = robot.get_gripper_pose() print(\"Current gripper coordinates: \" +", "step size:\\n\")) if(dir == 'x'): x += step elif(dir ==", "+ \" was deleted\") elif(cmd == 'dela'): delete_all() print(\"All models", "= float(h) robot.go_to_xyz(x, y, z + h) elif(cmd == 'hover'):", "level: \") if(h == \"\"): h = BZS else: h", "delete_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \" was deleted\") elif(cmd", "to cup # simulation x,y,z = get_object_position('cup_1') # real_world #", "or z:\\n\") while(dir not in ['x','y','z']): dir = raw_input(\"Enter coord:", "\" + str(attached_objects)) elif(cmd == 'box'): robot.add_box() robot.attach_object('box') attached_objects =", "== 'dela'): delete_all() print(\"All models were deleted\") elif(cmd == 'spawn'):", "ik import rospy from get_object_position import get_object_position import time from", "= raw_input(\"Enter coord: x,y or z:\\n\") while(dir not in ['x','y','z']):", "= float(t3) joints[4] = 0 robot.set_joints(joints) elif(cmd == 'att'): #", "# go to initial pose robot.go_to_init_state() elif(cmd == 'gtb'): #", "raw_input(\"Set z level: \") if(h == \"\"): h = BZS", "Real_poses(bottle) elif(cmd == 'rb'): # reset bottle position reset_model_position(bottle) elif(cmd", "# change bottle b_n = int(raw_input(\"Enter bottle number from 1", "6\\n\")) bottle = 'bottle_' + str(b_n) # simulatiuon current_bottle_orig_pos =", "= get_object_position('cup_1') # real_world # pos, angle = Real_world_PourPos[cup] #", "or z:\\n\") step = float(raw_input(\"Enter step size:\\n\")) if(dir == 'x'):", "spawn_all_models from delete_models import delete_all, delete_model def control_panel(): robot =", "t1 = raw_input(\"Enter theta_1: \") t2 = raw_input(\"Enter theta_2: \")", "str(current_joints)) elif(cmd == 'setj'): # set robot joint angles joints", "z) elif(cmd == 'pour'): # turn gripper on pouring angle", "gripper coordinates: \" + str(pos)) elif(cmd == 'parm'): # print", "elif(cmd == 'cb'): # change bottle b_n = int(raw_input(\"Enter bottle", "range [-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd == 'init'): # go to", "robot.set_joints(joints) elif(cmd == 'att'): # attaches object to the gripper", "y, z) elif(cmd == 'pour'): # turn gripper on pouring", "theta_1: \") t2 = raw_input(\"Enter theta_2: \") t3 = raw_input(\"Enter", "CUO) elif(cmd == 'move'): # go to cup x,y,z =", "joint angles joints = robot.get_arm_joints() # joints[0] = float(raw_input(\"Enter theta_0\"))", "if(cmd == 'open'): # open the gripper robot.open_gripper() elif(cmd ==", "> 0 or goal < -0.12): goal = float(raw_input(\"Enter closing", "if(t3 != ''): joints[3] = float(t3) joints[4] = 0 robot.set_joints(joints)", "del, dela, spawn, exit:\\n\") if(cmd == 'open'): # open the", "go to cup # simulation x,y,z = get_object_position('cup_1') # real_world", "deleted\") elif(cmd == 'spawn'): spawn_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) +", "elif(cmd == 'exit'): # exit control panel script print('Finish performance')", "elif(dir == 'z'): z += step robot.go_to_xyz(x, y, z) elif(cmd", "postiion pos = robot.get_arm_pose() print(\"Current arm coordinates: \" + str(pos))", "reset bottle position reset_model_position(bottle) elif(cmd == 'ra'): # reset all", "== 'spawn'): spawn_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \" was", "theta_3: \") if(t1 != ''): joints[1] = float(t1) if(t2 !=", "if(t1 != ''): joints[1] = float(t1) if(t2 != ''): joints[2]", "x,y or z:\\n\") while(dir not in ['x','y','z']): dir = raw_input(\"Enter", "'rb'): # reset bottle position reset_model_position(bottle) elif(cmd == 'ra'): #", "rospy from get_object_position import get_object_position import time from constants import", "pos = robot.get_arm_pose() print(\"Current arm coordinates: \" + str(pos)) elif(cmd", "want to change the arm direction t1 = raw_input(\"Enter theta_1:", "y += step elif(dir == 'z'): z += step robot.go_to_xyz(x,", "x,y,z = pos robot.go_to_xyz(x, y, CUO) elif(cmd == 'move'): #", "attaches object to the gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached", "robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper() bottle = 'bottle_1'", "python \"\"\" Control panel file \"\"\" import pddl_solver as pddl", "# open the gripper robot.open_gripper() elif(cmd == 'close'): # close", "import delete_all, delete_model def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state()", "robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd", "current_bottle_orig_pos h = raw_input(\"Set z level: \") if(h == \"\"):", "hover over the bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y, BUO)", "dir = raw_input(\"Enter coord: x,y or z:\\n\") while(dir not in", "gtc, move,\\n pour, cb, rb, ra,\\n pgr, parm, pj,\\n setj,", "= float(raw_input(\"Enter theta_0\")) # We don't want to change the", "bottle = 'bottle_1' # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world", "= pos robot.go_to_xyz(x, y, CUO) elif(cmd == 'move'): # go", "gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects))", "== 'box'): robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \"", "pos robot.go_to_xyz(x, y, CUO) elif(cmd == 'move'): # go to", "== 'setj'): # set robot joint angles joints = robot.get_arm_joints()", "# robot.go_to_init_state() # robot.open_gripper() bottle = 'bottle_1' # simulatiuon current_bottle_orig_pos", "z level: \") if(h == \"\"): h = BZS else:", "# x,y,z = pos robot.go_to_xyz(x, y, CUO) elif(cmd == 'move'):", "objects: \" + str(attached_objects)) elif(cmd == 'del'): delete_model(bottle) print(\"Bottle \"", "in [1,2,3,4,5,6]): b_n = int(raw_input(\"Enter bottle number from 1 to", "print arm postiion pos = robot.get_arm_pose() print(\"Current arm coordinates: \"", "robot.go_to_xyz(x, y, CUO) elif(cmd == 'move'): # go to cup", "the arm direction t1 = raw_input(\"Enter theta_1: \") t2 =", "control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper() bottle =", "# print arm postiion pos = robot.get_arm_pose() print(\"Current arm coordinates:", "the gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" +", "elif(cmd == 'dela'): delete_all() print(\"All models were deleted\") elif(cmd ==", "ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper() bottle = 'bottle_1' # simulatiuon", "= 1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd == 'cb'): #", "go to cup x,y,z = robot.get_arm_pose() dir = raw_input(\"Enter coord:", "gripper robot.open_gripper() elif(cmd == 'close'): # close the gripper goal", "coord: x,y or z:\\n\") step = float(raw_input(\"Enter step size:\\n\")) if(dir", "bottle = 'bottle_' + str(b_n) # simulatiuon current_bottle_orig_pos = get_object_position(bottle)", "elif(cmd == 'setj'): # set robot joint angles joints =", "= current_bottle_orig_pos h = raw_input(\"Set z level: \") if(h ==", "x,y,z = get_object_position('cup_1') # real_world # pos, angle = Real_world_PourPos[cup]", "simulation x,y,z = get_object_position('cup_1') # real_world # pos, angle =", "script print('Finish performance') return else: print('Wrong command') if __name__ ==", "robot joint angles joints = robot.get_arm_joints() # joints[0] = float(raw_input(\"Enter", "pouring angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd", "'open'): # open the gripper robot.open_gripper() elif(cmd == 'close'): #", "joints poistion: \" + str(current_joints)) elif(cmd == 'setj'): # set", "parm, pj,\\n setj, att, box,\\n del, dela, spawn, exit:\\n\") if(cmd", "z += step robot.go_to_xyz(x, y, z) elif(cmd == 'pour'): #", "import ik import rospy from get_object_position import get_object_position import time", "= Real_poses(bottle) elif(cmd == 'rb'): # reset bottle position reset_model_position(bottle)", "change the arm direction t1 = raw_input(\"Enter theta_1: \") t2", "open the gripper robot.open_gripper() elif(cmd == 'close'): # close the", "= get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1]", "ra,\\n pgr, parm, pj,\\n setj, att, box,\\n del, dela, spawn,", "joints current_joints = robot.get_arm_joints() print(\"Current joints poistion: \" + str(current_joints))", "= get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) elif(cmd ==", "position reset_model_position(bottle) elif(cmd == 'ra'): # reset all models positions", "== 'init'): # go to initial pose robot.go_to_init_state() elif(cmd ==", "elif(cmd == 'close'): # close the gripper goal = float(raw_input(\"Enter", "'del'): delete_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \" was deleted\")", "== 'gtb'): # go to bottle x,y,z = current_bottle_orig_pos h", "'pj'): # print arm joints current_joints = robot.get_arm_joints() print(\"Current joints", "# go to bottle x,y,z = current_bottle_orig_pos h = raw_input(\"Set", "float(h) robot.go_to_xyz(x, y, z + h) elif(cmd == 'hover'): #", "1 to 6\\n\")) while(b_n not in [1,2,3,4,5,6]): b_n = int(raw_input(\"Enter", "bottle number from 1 to 6\\n\")) bottle = 'bottle_' +", "print arm joints current_joints = robot.get_arm_joints() print(\"Current joints poistion: \"", "print(\"Current gripper coordinates: \" + str(pos)) elif(cmd == 'parm'): #", "reset all models positions reset_all() elif(cmd == 'pgr'): # print", "float(raw_input(\"Enter theta_0\")) # We don't want to change the arm", "delete_model def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper()", "robot.get_arm_joints() # joints[0] = float(raw_input(\"Enter theta_0\")) # We don't want", "= 0 robot.set_joints(joints) elif(cmd == 'att'): # attaches object to", "bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd == 'gtc'):", "= robot.get_arm_pose() dir = raw_input(\"Enter coord: x,y or z:\\n\") while(dir", "the bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd ==", "#!/usr/bin/env python \"\"\" Control panel file \"\"\" import pddl_solver as", "cup # simulation x,y,z = get_object_position('cup_1') # real_world # pos,", "''): joints[1] = float(t1) if(t2 != ''): joints[2] = float(t2)", "spawn, exit:\\n\") if(cmd == 'open'): # open the gripper robot.open_gripper()", "h = BZS else: h = float(h) robot.go_to_xyz(x, y, z", "deleted\") elif(cmd == 'dela'): delete_all() print(\"All models were deleted\") elif(cmd", "robot.open_gripper() bottle = 'bottle_1' # simulatiuon current_bottle_orig_pos = get_object_position(bottle) #", "\"\"\" Control panel file \"\"\" import pddl_solver as pddl import", "h) elif(cmd == 'hover'): # hover over the bottle x,y,z", "coordinates: \" + str(pos)) elif(cmd == 'parm'): # print arm", "to cup x,y,z = robot.get_arm_pose() dir = raw_input(\"Enter coord: x,y", "move,\\n pour, cb, rb, ra,\\n pgr, parm, pj,\\n setj, att,", "== 'ra'): # reset all models positions reset_all() elif(cmd ==", "performance') return else: print('Wrong command') if __name__ == '__main__': control_panel()", "We don't want to change the arm direction t1 =", "# print gripper postiion pos = robot.get_gripper_pose() print(\"Current gripper coordinates:", "goal in range [-0.12; 0]:\\n\")) if(goal==\"\"): goal = -0.075 while(goal", "goal < -0.12): goal = float(raw_input(\"Enter closing goal in range", "b_n = int(raw_input(\"Enter bottle number from 1 to 6\\n\")) bottle", "+ str(bottle.split('_')[1]) + \" was spawned\") elif(cmd == 'exit'): #", "\" + str(pos)) elif(cmd == 'pj'): # print arm joints", "* from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models from delete_models", "!= ''): joints[2] = float(t2) if(t3 != ''): joints[3] =", "arm coordinates: \" + str(pos)) elif(cmd == 'pj'): # print", "# current_bottle_orig_pos[-1] += BZS while(True): print() cmd = raw_input(\"Enter command:\\n", "== 'z'): z += step robot.go_to_xyz(x, y, z) elif(cmd ==", "print('Finish performance') return else: print('Wrong command') if __name__ == '__main__':", "z + h) elif(cmd == 'hover'): # hover over the", "str(b_n) # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos", "to change the arm direction t1 = raw_input(\"Enter theta_1: \")", "print(\"Attached objects: \" + str(attached_objects)) elif(cmd == 'box'): robot.add_box() robot.attach_object('box')", "initial pose robot.go_to_init_state() elif(cmd == 'gtb'): # go to bottle", "not in [1,2,3,4,5,6]): b_n = int(raw_input(\"Enter bottle number from 1", "= robot.get_arm_joints() print(\"Current joints poistion: \" + str(current_joints)) elif(cmd ==", "elif(cmd == 'pj'): # print arm joints current_joints = robot.get_arm_joints()", "# close the gripper goal = float(raw_input(\"Enter closing goal in", "y, CUO) elif(cmd == 'move'): # go to cup x,y,z", "= -0.075 while(goal > 0 or goal < -0.12): goal", "[-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd == 'init'): # go to initial", "print() cmd = raw_input(\"Enter command:\\n open, close, init,\\n gtb, hover,", "elif(cmd == 'del'): delete_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \"", "# current_bottle_orig_pos = Real_poses(bottle) elif(cmd == 'rb'): # reset bottle", "= Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS while(True): print() cmd =", "\" + str(pos)) elif(cmd == 'parm'): # print arm postiion", "x += step elif(dir == 'y'): y += step elif(dir", "robot.go_to_xyz(x, y, z) elif(cmd == 'pour'): # turn gripper on", "raw_input(\"Enter coord: x,y or z:\\n\") step = float(raw_input(\"Enter step size:\\n\"))", "angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd ==", "rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd == 'cb'): # change bottle", "== 'pj'): # print arm joints current_joints = robot.get_arm_joints() print(\"Current", "robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd == 'box'): robot.add_box()", "0 robot.set_joints(joints) elif(cmd == 'att'): # attaches object to the", "= 'bottle_1' # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world #", "get_object_position import get_object_position import time from constants import * from", "== 'x'): x += step elif(dir == 'y'): y +=", "reset_model_position(bottle) elif(cmd == 'ra'): # reset all models positions reset_all()", "'bottle_1' # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos", "arm direction t1 = raw_input(\"Enter theta_1: \") t2 = raw_input(\"Enter", "delete_all, delete_model def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() #", "'pgr'): # print gripper postiion pos = robot.get_gripper_pose() print(\"Current gripper", "in ['x','y','z']): dir = raw_input(\"Enter coord: x,y or z:\\n\") step", "print(\"Current joints poistion: \" + str(current_joints)) elif(cmd == 'setj'): #", "get_object_position import time from constants import * from spawn_models import", "# We don't want to change the arm direction t1", "= raw_input(\"Enter theta_3: \") if(t1 != ''): joints[1] = float(t1)", "# joints[0] = float(raw_input(\"Enter theta_0\")) # We don't want to", "'bottle_' + str(b_n) # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world", "t3 = raw_input(\"Enter theta_3: \") if(t1 != ''): joints[1] =", "float(t2) if(t3 != ''): joints[3] = float(t3) joints[4] = 0", "time from constants import * from spawn_models import reset_model_position, reset_all,", "robot.go_to_xyz(x, y, z + h) elif(cmd == 'hover'): # hover", "== 'move'): # go to cup x,y,z = robot.get_arm_pose() dir", "control panel script print('Finish performance') return else: print('Wrong command') if", "init,\\n gtb, hover, gtc, move,\\n pour, cb, rb, ra,\\n pgr,", "0]:\\n\")) robot.close_gripper(goal) elif(cmd == 'init'): # go to initial pose", "'close'): # close the gripper goal = float(raw_input(\"Enter closing goal", "\" was deleted\") elif(cmd == 'dela'): delete_all() print(\"All models were", "\") if(t1 != ''): joints[1] = float(t1) if(t2 != ''):", "float(t3) joints[4] = 0 robot.set_joints(joints) elif(cmd == 'att'): # attaches", "pos, angle = Real_world_PourPos[cup] # x,y,z = pos robot.go_to_xyz(x, y,", "delete_all() print(\"All models were deleted\") elif(cmd == 'spawn'): spawn_model(bottle) print(\"Bottle", "delete_models import delete_all, delete_model def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() #", "if(h == \"\"): h = BZS else: h = float(h)", "+ str(bottle.split('_')[1]) + \" was deleted\") elif(cmd == 'dela'): delete_all()", "spawned\") elif(cmd == 'exit'): # exit control panel script print('Finish", "robot.get_arm_pose() print(\"Current arm coordinates: \" + str(pos)) elif(cmd == 'pj'):", "elif(cmd == 'rb'): # reset bottle position reset_model_position(bottle) elif(cmd ==", "elif(cmd == 'box'): robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects:", "print(\"Bottle \" + str(bottle.split('_')[1]) + \" was deleted\") elif(cmd ==", "bottle position reset_model_position(bottle) elif(cmd == 'ra'): # reset all models", "cb, rb, ra,\\n pgr, parm, pj,\\n setj, att, box,\\n del,", "import * from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models from", "close, init,\\n gtb, hover, gtc, move,\\n pour, cb, rb, ra,\\n", "robot.go_to_xyz(x, y, BUO) elif(cmd == 'gtc'): # go to cup", "step robot.go_to_xyz(x, y, z) elif(cmd == 'pour'): # turn gripper", "pos = robot.get_gripper_pose() print(\"Current gripper coordinates: \" + str(pos)) elif(cmd", "# real_world # pos, angle = Real_world_PourPos[cup] # x,y,z =", "+ str(pos)) elif(cmd == 'parm'): # print arm postiion pos", "else: h = float(h) robot.go_to_xyz(x, y, z + h) elif(cmd", "< -0.12): goal = float(raw_input(\"Enter closing goal in range [-0.12;", "BUO) elif(cmd == 'gtc'): # go to cup # simulation", "y, z + h) elif(cmd == 'hover'): # hover over", "+= BZS while(True): print() cmd = raw_input(\"Enter command:\\n open, close,", "goal = float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\")) if(goal==\"\"):", "closing goal in range [-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd == 'init'):", "elif(cmd == 'spawn'): spawn_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \"", "pj,\\n setj, att, box,\\n del, dela, spawn, exit:\\n\") if(cmd ==", "bottle number from 1 to 6\\n\")) while(b_n not in [1,2,3,4,5,6]):", "== 'cb'): # change bottle b_n = int(raw_input(\"Enter bottle number", "in range [-0.12; 0]:\\n\")) if(goal==\"\"): goal = -0.075 while(goal >", "== 'gtc'): # go to cup # simulation x,y,z =", "joints[4] = 0 robot.set_joints(joints) elif(cmd == 'att'): # attaches object", "y, BUO) elif(cmd == 'gtc'): # go to cup #", "robot.go_to_init_state() elif(cmd == 'gtb'): # go to bottle x,y,z =", "# reset all models positions reset_all() elif(cmd == 'pgr'): #", "theta_2: \") t3 = raw_input(\"Enter theta_3: \") if(t1 != ''):", "# hover over the bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y,", "elif(cmd == 'move'): # go to cup x,y,z = robot.get_arm_pose()", "== 'pgr'): # print gripper postiion pos = robot.get_gripper_pose() print(\"Current", "+= step elif(dir == 'y'): y += step elif(dir ==", "from get_object_position import get_object_position import time from constants import *", "0 or goal < -0.12): goal = float(raw_input(\"Enter closing goal", "''): joints[3] = float(t3) joints[4] = 0 robot.set_joints(joints) elif(cmd ==", "gtb, hover, gtc, move,\\n pour, cb, rb, ra,\\n pgr, parm,", "robot.get_arm_pose() dir = raw_input(\"Enter coord: x,y or z:\\n\") while(dir not", "float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\")) if(goal==\"\"): goal =", "object to the gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects:", "= 'bottle_' + str(b_n) # simulatiuon current_bottle_orig_pos = get_object_position(bottle) #", "robot.go_to_init_state() # robot.open_gripper() bottle = 'bottle_1' # simulatiuon current_bottle_orig_pos =", "float(t1) if(t2 != ''): joints[2] = float(t2) if(t3 != ''):", "reset_all, spawn_model, spawn_all_models from delete_models import delete_all, delete_model def control_panel():", "== 'y'): y += step elif(dir == 'z'): z +=", "simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle)", "# exit control panel script print('Finish performance') return else: print('Wrong", "'cb'): # change bottle b_n = int(raw_input(\"Enter bottle number from", "# set robot joint angles joints = robot.get_arm_joints() # joints[0]", "'gtb'): # go to bottle x,y,z = current_bottle_orig_pos h =", "1 to 6\\n\")) bottle = 'bottle_' + str(b_n) # simulatiuon", "print(\"Current arm coordinates: \" + str(pos)) elif(cmd == 'pj'): #", "pddl_solver as pddl import ik import rospy from get_object_position import", "''): joints[2] = float(t2) if(t3 != ''): joints[3] = float(t3)", "panel script print('Finish performance') return else: print('Wrong command') if __name__", "raw_input(\"Enter coord: x,y or z:\\n\") while(dir not in ['x','y','z']): dir", "int(raw_input(\"Enter bottle number from 1 to 6\\n\")) bottle = 'bottle_'", "angle = Real_world_PourPos[cup] # x,y,z = pos robot.go_to_xyz(x, y, CUO)", "= raw_input(\"Enter theta_1: \") t2 = raw_input(\"Enter theta_2: \") t3", "the gripper goal = float(raw_input(\"Enter closing goal in range [-0.12;", "Control panel file \"\"\" import pddl_solver as pddl import ik", "step elif(dir == 'y'): y += step elif(dir == 'z'):", "== 'exit'): # exit control panel script print('Finish performance') return", "arm joints current_joints = robot.get_arm_joints() print(\"Current joints poistion: \" +", "elif(cmd == 'att'): # attaches object to the gripper robot.attach_object(bottle)", "joints[1] = float(t1) if(t2 != ''): joints[2] = float(t2) if(t3", "= robot.get_gripper_pose() print(\"Current gripper coordinates: \" + str(pos)) elif(cmd ==", "\" + str(attached_objects)) elif(cmd == 'del'): delete_model(bottle) print(\"Bottle \" +", "close the gripper goal = float(raw_input(\"Enter closing goal in range", "bottle x,y,z = current_bottle_orig_pos h = raw_input(\"Set z level: \")", "current_joints = robot.get_arm_joints() print(\"Current joints poistion: \" + str(current_joints)) elif(cmd", "# real_world # current_bottle_orig_pos = Real_poses(bottle) elif(cmd == 'rb'): #", "'z'): z += step robot.go_to_xyz(x, y, z) elif(cmd == 'pour'):", "att, box,\\n del, dela, spawn, exit:\\n\") if(cmd == 'open'): #", "import reset_model_position, reset_all, spawn_model, spawn_all_models from delete_models import delete_all, delete_model", "dir = raw_input(\"Enter coord: x,y or z:\\n\") step = float(raw_input(\"Enter", "h = float(h) robot.go_to_xyz(x, y, z + h) elif(cmd ==", "= robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd == 'box'):", "\"\"): h = BZS else: h = float(h) robot.go_to_xyz(x, y,", "= current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd == 'gtc'): # go", "1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd == 'cb'): # change", "str(pos)) elif(cmd == 'parm'): # print arm postiion pos =", "current_bottle_orig_pos = Real_poses(bottle) elif(cmd == 'rb'): # reset bottle position", "import pddl_solver as pddl import ik import rospy from get_object_position", "# reset bottle position reset_model_position(bottle) elif(cmd == 'ra'): # reset", "goal = -0.075 while(goal > 0 or goal < -0.12):", "== 'hover'): # hover over the bottle x,y,z = current_bottle_orig_pos", "== 'close'): # close the gripper goal = float(raw_input(\"Enter closing", "[1,2,3,4,5,6]): b_n = int(raw_input(\"Enter bottle number from 1 to 6\\n\"))", "panel file \"\"\" import pddl_solver as pddl import ik import", "elif(cmd == 'pour'): # turn gripper on pouring angle robot.rotate_gripper(angle", "go to initial pose robot.go_to_init_state() elif(cmd == 'gtb'): # go", "['x','y','z']): dir = raw_input(\"Enter coord: x,y or z:\\n\") step =", "attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd ==", "current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd == 'gtc'): # go to", "robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd == 'cb'):", "+ str(current_joints)) elif(cmd == 'setj'): # set robot joint angles", "arm postiion pos = robot.get_arm_pose() print(\"Current arm coordinates: \" +", "str(bottle.split('_')[1]) + \" was deleted\") elif(cmd == 'dela'): delete_all() print(\"All", "= robot.get_arm_pose() print(\"Current arm coordinates: \" + str(pos)) elif(cmd ==", "\" was spawned\") elif(cmd == 'exit'): # exit control panel", "don't want to change the arm direction t1 = raw_input(\"Enter", "BZS else: h = float(h) robot.go_to_xyz(x, y, z + h)", "print(\"Bottle \" + str(bottle.split('_')[1]) + \" was spawned\") elif(cmd ==", "exit control panel script print('Finish performance') return else: print('Wrong command')", "print(\"All models were deleted\") elif(cmd == 'spawn'): spawn_model(bottle) print(\"Bottle \"", "from constants import * from spawn_models import reset_model_position, reset_all, spawn_model,", "elif(cmd == 'init'): # go to initial pose robot.go_to_init_state() elif(cmd", "elif(cmd == 'parm'): # print arm postiion pos = robot.get_arm_pose()", "number from 1 to 6\\n\")) while(b_n not in [1,2,3,4,5,6]): b_n", "cmd = raw_input(\"Enter command:\\n open, close, init,\\n gtb, hover, gtc,", "robot.get_arm_joints() print(\"Current joints poistion: \" + str(current_joints)) elif(cmd == 'setj'):", "pddl import ik import rospy from get_object_position import get_object_position import", "== 'parm'): # print arm postiion pos = robot.get_arm_pose() print(\"Current", "elif(dir == 'y'): y += step elif(dir == 'z'): z", "dela, spawn, exit:\\n\") if(cmd == 'open'): # open the gripper", "elif(cmd == 'gtc'): # go to cup # simulation x,y,z", "'box'): robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" +", "'x'): x += step elif(dir == 'y'): y += step", "x,y or z:\\n\") step = float(raw_input(\"Enter step size:\\n\")) if(dir ==", "real_world # current_bottle_orig_pos = Real_poses(bottle) elif(cmd == 'rb'): # reset", "'exit'): # exit control panel script print('Finish performance') return else:", "goal in range [-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd == 'init'): #", "elif(cmd == 'hover'): # hover over the bottle x,y,z =", "angles joints = robot.get_arm_joints() # joints[0] = float(raw_input(\"Enter theta_0\")) #", "gripper goal = float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\"))", "x,y,z = current_bottle_orig_pos h = raw_input(\"Set z level: \") if(h", "'att'): # attaches object to the gripper robot.attach_object(bottle) attached_objects =", "= ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper() bottle = 'bottle_1' #", "while(dir not in ['x','y','z']): dir = raw_input(\"Enter coord: x,y or", "'dela'): delete_all() print(\"All models were deleted\") elif(cmd == 'spawn'): spawn_model(bottle)", "cup x,y,z = robot.get_arm_pose() dir = raw_input(\"Enter coord: x,y or", "pour, cb, rb, ra,\\n pgr, parm, pj,\\n setj, att, box,\\n", "joints[3] = float(t3) joints[4] = 0 robot.set_joints(joints) elif(cmd == 'att'):", "\" + str(current_joints)) elif(cmd == 'setj'): # set robot joint", "# real_world # current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS", "= raw_input(\"Enter command:\\n open, close, init,\\n gtb, hover, gtc, move,\\n", "as pddl import ik import rospy from get_object_position import get_object_position", "# pos, angle = Real_world_PourPos[cup] # x,y,z = pos robot.go_to_xyz(x,", "[-0.12; 0]:\\n\")) if(goal==\"\"): goal = -0.075 while(goal > 0 or", "# simulation x,y,z = get_object_position('cup_1') # real_world # pos, angle", "closing goal in range [-0.12; 0]:\\n\")) if(goal==\"\"): goal = -0.075", "to 6\\n\")) while(b_n not in [1,2,3,4,5,6]): b_n = int(raw_input(\"Enter bottle", "robot.close_gripper(goal) elif(cmd == 'init'): # go to initial pose robot.go_to_init_state()", "str(pos)) elif(cmd == 'pj'): # print arm joints current_joints =", "joints = robot.get_arm_joints() # joints[0] = float(raw_input(\"Enter theta_0\")) # We", "real_world # current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS while(True):", "change bottle b_n = int(raw_input(\"Enter bottle number from 1 to", "to initial pose robot.go_to_init_state() elif(cmd == 'gtb'): # go to", "int(raw_input(\"Enter bottle number from 1 to 6\\n\")) while(b_n not in", "from 1 to 6\\n\")) while(b_n not in [1,2,3,4,5,6]): b_n =", "were deleted\") elif(cmd == 'spawn'): spawn_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1])", "real_world # pos, angle = Real_world_PourPos[cup] # x,y,z = pos", "robot.rotate_gripper(angle = 0) elif(cmd == 'cb'): # change bottle b_n", "models were deleted\") elif(cmd == 'spawn'): spawn_model(bottle) print(\"Bottle \" +", "while(goal > 0 or goal < -0.12): goal = float(raw_input(\"Enter", "if(dir == 'x'): x += step elif(dir == 'y'): y", "h = raw_input(\"Set z level: \") if(h == \"\"): h", "step elif(dir == 'z'): z += step robot.go_to_xyz(x, y, z)", "# go to cup # simulation x,y,z = get_object_position('cup_1') #", "'pour'): # turn gripper on pouring angle robot.rotate_gripper(angle = 1)", "= float(t1) if(t2 != ''): joints[2] = float(t2) if(t3 !=", "\" + str(bottle.split('_')[1]) + \" was spawned\") elif(cmd == 'exit'):", "models positions reset_all() elif(cmd == 'pgr'): # print gripper postiion", "current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS while(True): print() cmd", "== 'pour'): # turn gripper on pouring angle robot.rotate_gripper(angle =", "str(attached_objects)) elif(cmd == 'del'): delete_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) +", "constants import * from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models", "turn gripper on pouring angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle", "go to bottle x,y,z = current_bottle_orig_pos h = raw_input(\"Set z", "elif(cmd == 'gtb'): # go to bottle x,y,z = current_bottle_orig_pos", "b_n = int(raw_input(\"Enter bottle number from 1 to 6\\n\")) while(b_n", "# attaches object to the gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle])", "+= step robot.go_to_xyz(x, y, z) elif(cmd == 'pour'): # turn", "range [-0.12; 0]:\\n\")) if(goal==\"\"): goal = -0.075 while(goal > 0", "reset_model_position, reset_all, spawn_model, spawn_all_models from delete_models import delete_all, delete_model def", "BZS while(True): print() cmd = raw_input(\"Enter command:\\n open, close, init,\\n", "+ str(attached_objects)) elif(cmd == 'box'): robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle])", "+ str(b_n) # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world #", "'ra'): # reset all models positions reset_all() elif(cmd == 'pgr'):", "'setj'): # set robot joint angles joints = robot.get_arm_joints() #", "direction t1 = raw_input(\"Enter theta_1: \") t2 = raw_input(\"Enter theta_2:", "== \"\"): h = BZS else: h = float(h) robot.go_to_xyz(x,", "current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) #", "joints[0] = float(raw_input(\"Enter theta_0\")) # We don't want to change", "= raw_input(\"Enter theta_2: \") t3 = raw_input(\"Enter theta_3: \") if(t1", "= float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd", "== 'rb'): # reset bottle position reset_model_position(bottle) elif(cmd == 'ra'):", "+ \" was spawned\") elif(cmd == 'exit'): # exit control", "== 'att'): # attaches object to the gripper robot.attach_object(bottle) attached_objects", "coordinates: \" + str(pos)) elif(cmd == 'pj'): # print arm", "to bottle x,y,z = current_bottle_orig_pos h = raw_input(\"Set z level:", "== 'del'): delete_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \" was", "-0.075 while(goal > 0 or goal < -0.12): goal =", "reset_all() elif(cmd == 'pgr'): # print gripper postiion pos =", "pgr, parm, pj,\\n setj, att, box,\\n del, dela, spawn, exit:\\n\")", "= robot.get_arm_joints() # joints[0] = float(raw_input(\"Enter theta_0\")) # We don't", "Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS while(True): print() cmd = raw_input(\"Enter", "= int(raw_input(\"Enter bottle number from 1 to 6\\n\")) bottle =", "step = float(raw_input(\"Enter step size:\\n\")) if(dir == 'x'): x +=", "\" + str(bottle.split('_')[1]) + \" was deleted\") elif(cmd == 'dela'):", "= float(raw_input(\"Enter closing goal in range [-0.12; 0]:\\n\")) if(goal==\"\"): goal", "== 'open'): # open the gripper robot.open_gripper() elif(cmd == 'close'):", "raw_input(\"Enter theta_1: \") t2 = raw_input(\"Enter theta_2: \") t3 =", "elif(cmd == 'ra'): # reset all models positions reset_all() elif(cmd", "file \"\"\" import pddl_solver as pddl import ik import rospy", "if(goal==\"\"): goal = -0.075 while(goal > 0 or goal <", "get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1] +=", "raw_input(\"Enter command:\\n open, close, init,\\n gtb, hover, gtc, move,\\n pour,", "'move'): # go to cup x,y,z = robot.get_arm_pose() dir =", "spawn_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1]) + \" was spawned\") elif(cmd", "or goal < -0.12): goal = float(raw_input(\"Enter closing goal in", "get_object_position('cup_1') # real_world # pos, angle = Real_world_PourPos[cup] # x,y,z", "# current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS while(True): print()", "# print arm joints current_joints = robot.get_arm_joints() print(\"Current joints poistion:", "\") t3 = raw_input(\"Enter theta_3: \") if(t1 != ''): joints[1]", "# simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos =", "'hover'): # hover over the bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x,", "# turn gripper on pouring angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5)", "rb, ra,\\n pgr, parm, pj,\\n setj, att, box,\\n del, dela,", "# robot.open_gripper() bottle = 'bottle_1' # simulatiuon current_bottle_orig_pos = get_object_position(bottle)", "joints[2] = float(t2) if(t3 != ''): joints[3] = float(t3) joints[4]", "to the gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \"", "poistion: \" + str(current_joints)) elif(cmd == 'setj'): # set robot", "robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd", "= BZS else: h = float(h) robot.go_to_xyz(x, y, z +", "z:\\n\") while(dir not in ['x','y','z']): dir = raw_input(\"Enter coord: x,y", "= robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd == 'del'):", "the gripper robot.open_gripper() elif(cmd == 'close'): # close the gripper", "while(True): print() cmd = raw_input(\"Enter command:\\n open, close, init,\\n gtb,", "to 6\\n\")) bottle = 'bottle_' + str(b_n) # simulatiuon current_bottle_orig_pos", "= raw_input(\"Enter coord: x,y or z:\\n\") step = float(raw_input(\"Enter step", "+ str(attached_objects)) elif(cmd == 'del'): delete_model(bottle) print(\"Bottle \" + str(bottle.split('_')[1])", "in range [-0.12; 0]:\\n\")) robot.close_gripper(goal) elif(cmd == 'init'): # go", "raw_input(\"Enter theta_2: \") t3 = raw_input(\"Enter theta_3: \") if(t1 !=", "all models positions reset_all() elif(cmd == 'pgr'): # print gripper", "robot.scene.get_attached_objects([bottle]) print(\"Attached objects: \" + str(attached_objects)) elif(cmd == 'del'): delete_model(bottle)", "postiion pos = robot.get_gripper_pose() print(\"Current gripper coordinates: \" + str(pos))", "0]:\\n\")) if(goal==\"\"): goal = -0.075 while(goal > 0 or goal", "+= step elif(dir == 'z'): z += step robot.go_to_xyz(x, y,", "open, close, init,\\n gtb, hover, gtc, move,\\n pour, cb, rb,", "spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models from delete_models import delete_all,", "print(\"Attached objects: \" + str(attached_objects)) elif(cmd == 'del'): delete_model(bottle) print(\"Bottle", "was spawned\") elif(cmd == 'exit'): # exit control panel script", "exit:\\n\") if(cmd == 'open'): # open the gripper robot.open_gripper() elif(cmd", "objects: \" + str(attached_objects)) elif(cmd == 'box'): robot.add_box() robot.attach_object('box') attached_objects" ]
[ "self.each(char,False); return string; def each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char) for", ", reflector = Reflector(\"A\")): self.rotors = rotors for i in", "= self.Plugboard.get(char) for rotor in self.rotors: if flag: output =", "in self.rotors: if flag: output = rotor.scramble(output) else: output =", "= \"\"; for char in data: string += self.each(char,False); return", "data.upper(); string = \"\"; for char in data: string +=", "self.Reflector.get(output) for rotor in self.rotors[::-1]: if flag: output = rotor.scramble(output)", "data: string += self.each(char,False); return string; def each(self,char,flag): self.rotors[0].step() output", "def encrypt(self,data): data = data.upper().replace(\" \",\"\"); string = \"\"; for", "i in range(len(rotors)): if i + 1 < len(rotors): rotors[i].on(\"Sidereal\",", "char in data: string += self.each(char,False); return string; def each(self,char,flag):", "len(rotors): rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector =", "output = self.Reflector.get(output) for rotor in self.rotors[::-1]: if flag: output", "= self.Reflector.get(output) for rotor in self.rotors[::-1]: if flag: output =", "+= self.each(char,False); return string; def each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char)", "char in data: string += self.each(char,True); return string; def decrypt(self,data):", "flag: output = rotor.scramble(output) else: output = rotor.unscramble(output) output =", ", plugboard = Plugboard() , reflector = Reflector(\"A\")): self.rotors =", "+ 1 < len(rotors): rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step()) self.Plugboard =", "if i + 1 < len(rotors): rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step())", ", Rotor(0,\"IIIC\") ] , plugboard = Plugboard() , reflector =", "+= self.each(char,True); return string; def decrypt(self,data): data = data.upper(); string", "reflector = Reflector(\"A\")): self.rotors = rotors for i in range(len(rotors)):", "= plugboard; self.Reflector = reflector; def encrypt(self,data): data = data.upper().replace(\"", "if flag: output = rotor.scramble(output) else: output = rotor.unscramble(output) return", "in data: string += self.each(char,True); return string; def decrypt(self,data): data", "[ Rotor(0,\"IC\") , Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ] , plugboard =", "= data.upper().replace(\" \",\"\"); string = \"\"; for char in data:", "rotor.unscramble(output) output = self.Reflector.get(output) for rotor in self.rotors[::-1]: if flag:", "= rotor.scramble(output) else: output = rotor.unscramble(output) output = self.Reflector.get(output) for", "lambda *args: rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector = reflector; def", "plugboard = Plugboard() , reflector = Reflector(\"A\")): self.rotors = rotors", "Plugboard() , reflector = Reflector(\"A\")): self.rotors = rotors for i", "plugboard; self.Reflector = reflector; def encrypt(self,data): data = data.upper().replace(\" \",\"\");", "data: string += self.each(char,True); return string; def decrypt(self,data): data =", "string += self.each(char,False); return string; def each(self,char,flag): self.rotors[0].step() output =", "encrypt(self,data): data = data.upper().replace(\" \",\"\"); string = \"\"; for char", "from Enigma.Reflector import Reflector from Enigma.Plugboard import Plugboard class Enigma:", "decrypt(self,data): data = data.upper(); string = \"\"; for char in", "rotor in self.rotors[::-1]: if flag: output = rotor.scramble(output) else: output", "data.upper().replace(\" \",\"\"); string = \"\"; for char in data: string", "in range(len(rotors)): if i + 1 < len(rotors): rotors[i].on(\"Sidereal\", lambda", "< len(rotors): rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector", "= rotor.unscramble(output) output = self.Reflector.get(output) for rotor in self.rotors[::-1]: if", "self.rotors[0].step() output = self.Plugboard.get(char) for rotor in self.rotors: if flag:", "flag: output = rotor.scramble(output) else: output = rotor.unscramble(output) return self.Plugboard.get(output);", "= \"\"; for char in data: string += self.each(char,True); return", "data = data.upper().replace(\" \",\"\"); string = \"\"; for char in", "Reflector(\"A\")): self.rotors = rotors for i in range(len(rotors)): if i", "self.each(char,True); return string; def decrypt(self,data): data = data.upper(); string =", "Enigma.Rotor import Rotor from Enigma.Reflector import Reflector from Enigma.Plugboard import", "= Reflector(\"A\")): self.rotors = rotors for i in range(len(rotors)): if", "Rotor(0,\"IC\") , Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ] , plugboard = Plugboard()", "string = \"\"; for char in data: string += self.each(char,True);", "for rotor in self.rotors[::-1]: if flag: output = rotor.scramble(output) else:", "self.Reflector = reflector; def encrypt(self,data): data = data.upper().replace(\" \",\"\"); string", "*args: rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector = reflector; def encrypt(self,data):", ", rotors = [ Rotor(0,\"IC\") , Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ]", "= reflector; def encrypt(self,data): data = data.upper().replace(\" \",\"\"); string =", "i + 1 < len(rotors): rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step()) self.Plugboard", "rotors for i in range(len(rotors)): if i + 1 <", "\",\"\"); string = \"\"; for char in data: string +=", "Enigma: def __init__(self , rotors = [ Rotor(0,\"IC\") , Rotor(0,\"IIC\")", "rotor.scramble(output) else: output = rotor.unscramble(output) output = self.Reflector.get(output) for rotor", "= [ Rotor(0,\"IC\") , Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ] , plugboard", "rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector = reflector; def encrypt(self,data): data", "import Reflector from Enigma.Plugboard import Plugboard class Enigma: def __init__(self", "for rotor in self.rotors: if flag: output = rotor.scramble(output) else:", "Reflector from Enigma.Plugboard import Plugboard class Enigma: def __init__(self ,", "self.rotors: if flag: output = rotor.scramble(output) else: output = rotor.unscramble(output)", "output = rotor.scramble(output) else: output = rotor.unscramble(output) output = self.Reflector.get(output)", "from Enigma.Rotor import Rotor from Enigma.Reflector import Reflector from Enigma.Plugboard", "self.Plugboard.get(char) for rotor in self.rotors: if flag: output = rotor.scramble(output)", "range(len(rotors)): if i + 1 < len(rotors): rotors[i].on(\"Sidereal\", lambda *args:", "Rotor from Enigma.Reflector import Reflector from Enigma.Plugboard import Plugboard class", "data = data.upper(); string = \"\"; for char in data:", "return string; def decrypt(self,data): data = data.upper(); string = \"\";", "string; def each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char) for rotor in", "self.rotors[::-1]: if flag: output = rotor.scramble(output) else: output = rotor.unscramble(output)", "import Plugboard class Enigma: def __init__(self , rotors = [", "\"\"; for char in data: string += self.each(char,True); return string;", "] , plugboard = Plugboard() , reflector = Reflector(\"A\")): self.rotors", "rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector = reflector;", "def each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char) for rotor in self.rotors:", "string = \"\"; for char in data: string += self.each(char,False);", "Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ] , plugboard = Plugboard() , reflector", "self.Plugboard = plugboard; self.Reflector = reflector; def encrypt(self,data): data =", "\"\"; for char in data: string += self.each(char,False); return string;", "each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char) for rotor in self.rotors: if", "self.rotors = rotors for i in range(len(rotors)): if i +", "for char in data: string += self.each(char,True); return string; def", "rotor in self.rotors: if flag: output = rotor.scramble(output) else: output", "if flag: output = rotor.scramble(output) else: output = rotor.unscramble(output) output", "def __init__(self , rotors = [ Rotor(0,\"IC\") , Rotor(0,\"IIC\") ,", "def decrypt(self,data): data = data.upper(); string = \"\"; for char", "for i in range(len(rotors)): if i + 1 < len(rotors):", "= Plugboard() , reflector = Reflector(\"A\")): self.rotors = rotors for", "Enigma.Plugboard import Plugboard class Enigma: def __init__(self , rotors =", "reflector; def encrypt(self,data): data = data.upper().replace(\" \",\"\"); string = \"\";", "in self.rotors[::-1]: if flag: output = rotor.scramble(output) else: output =", "for char in data: string += self.each(char,False); return string; def", "return string; def each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char) for rotor", "in data: string += self.each(char,False); return string; def each(self,char,flag): self.rotors[0].step()", "string; def decrypt(self,data): data = data.upper(); string = \"\"; for", "= rotors for i in range(len(rotors)): if i + 1", "= data.upper(); string = \"\"; for char in data: string", "Enigma.Reflector import Reflector from Enigma.Plugboard import Plugboard class Enigma: def", "__init__(self , rotors = [ Rotor(0,\"IC\") , Rotor(0,\"IIC\") , Rotor(0,\"IIIC\")", "rotors = [ Rotor(0,\"IC\") , Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ] ,", "import Rotor from Enigma.Reflector import Reflector from Enigma.Plugboard import Plugboard", "class Enigma: def __init__(self , rotors = [ Rotor(0,\"IC\") ,", "string += self.each(char,True); return string; def decrypt(self,data): data = data.upper();", "from Enigma.Plugboard import Plugboard class Enigma: def __init__(self , rotors", "Rotor(0,\"IIIC\") ] , plugboard = Plugboard() , reflector = Reflector(\"A\")):", "output = self.Plugboard.get(char) for rotor in self.rotors: if flag: output", "Plugboard class Enigma: def __init__(self , rotors = [ Rotor(0,\"IC\")", "1 < len(rotors): rotors[i].on(\"Sidereal\", lambda *args: rotors[i+1].step()) self.Plugboard = plugboard;", ", Rotor(0,\"IIC\") , Rotor(0,\"IIIC\") ] , plugboard = Plugboard() ,", "output = rotor.unscramble(output) output = self.Reflector.get(output) for rotor in self.rotors[::-1]:", "else: output = rotor.unscramble(output) output = self.Reflector.get(output) for rotor in" ]
[ "# SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class", "a Cross-platform C++ library and a command line utility to", "metadata \"\"\" homepage = \"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\" version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934')", "and a command line utility to manage image metadata \"\"\"", "Exiv2(CMakePackage): \"\"\"Exiv2 is a Cross-platform C++ library and a command", "library and a command line utility to manage image metadata", "for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack", "Spack Project Developers. See the top-level COPYRIGHT file for details.", "image metadata \"\"\" homepage = \"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\" version('0.27.2',", "OR MIT) from spack import * class Exiv2(CMakePackage): \"\"\"Exiv2 is", "National Security, LLC and other # Spack Project Developers. See", "LLC and other # Spack Project Developers. See the top-level", "MIT) from spack import * class Exiv2(CMakePackage): \"\"\"Exiv2 is a", "\"\"\" homepage = \"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\" version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934') depends_on('zlib',", "homepage = \"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\" version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934') depends_on('zlib', type='link')", "to manage image metadata \"\"\" homepage = \"https://www.exiv2.org/\" url =", "Security, LLC and other # Spack Project Developers. See the", "details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import", "class Exiv2(CMakePackage): \"\"\"Exiv2 is a Cross-platform C++ library and a", "(Apache-2.0 OR MIT) from spack import * class Exiv2(CMakePackage): \"\"\"Exiv2", "SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Exiv2(CMakePackage):", "See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier:", "a command line utility to manage image metadata \"\"\" homepage", "and other # Spack Project Developers. See the top-level COPYRIGHT", "manage image metadata \"\"\" homepage = \"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\"", "from spack import * class Exiv2(CMakePackage): \"\"\"Exiv2 is a Cross-platform", "# # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *", "* class Exiv2(CMakePackage): \"\"\"Exiv2 is a Cross-platform C++ library and", "Cross-platform C++ library and a command line utility to manage", "import * class Exiv2(CMakePackage): \"\"\"Exiv2 is a Cross-platform C++ library", "the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0", "file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from", "# Spack Project Developers. See the top-level COPYRIGHT file for", "spack import * class Exiv2(CMakePackage): \"\"\"Exiv2 is a Cross-platform C++", "is a Cross-platform C++ library and a command line utility", "Project Developers. See the top-level COPYRIGHT file for details. #", "Lawrence Livermore National Security, LLC and other # Spack Project", "command line utility to manage image metadata \"\"\" homepage =", "Developers. See the top-level COPYRIGHT file for details. # #", "line utility to manage image metadata \"\"\" homepage = \"https://www.exiv2.org/\"", "\"\"\"Exiv2 is a Cross-platform C++ library and a command line", "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other", "C++ library and a command line utility to manage image", "Livermore National Security, LLC and other # Spack Project Developers.", "other # Spack Project Developers. See the top-level COPYRIGHT file", "2013-2020 Lawrence Livermore National Security, LLC and other # Spack", "Copyright 2013-2020 Lawrence Livermore National Security, LLC and other #", "utility to manage image metadata \"\"\" homepage = \"https://www.exiv2.org/\" url", "= \"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\" version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934') depends_on('zlib', type='link') depends_on('expat@2.2.6:',", "COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT)", "top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR", "\"https://www.exiv2.org/\" url = \"https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz\" version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934') depends_on('zlib', type='link') depends_on('expat@2.2.6:', type='link')" ]
[ "= { \"token\": token, \"user\": user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS", "import get_current_site from django.core.mail import send_mail from django.template import loader", "= magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\": user_email} user = user_class.objects.get(**field_lookup) return", "text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context = { \"token\":", ") sg.send(mail) def send_token(self, user_email, extra_context=None): user = self.get_user_from_email(user_email) token", "case the Django app has customised the User class) -", "Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail)", "= magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context = { \"token\": token,", "get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail from", "of User (in case the Django app has customised the", "the user corresponding to the email. - We use get_user_model()", "= magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context =", "from django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail from django.template", "token = MagicToken.objects.create(user=user) return token def get_user_from_email(self, user_email): \"\"\" Query", "Query the DB for the user corresponding to the email.", "(in case the Django app has customised the User class)", "import settings as magicauth_settings from django.conf import settings as django_settings", "the name of the field in the user model. By", "default \"username\" but not always. \"\"\" user_class = get_user_model() email_field", "not always. \"\"\" user_class = get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup", "the email. - We use get_user_model() instead of User (in", "subject=email_subject, html_content=html_message ) sg.send(mail) def send_token(self, user_email, extra_context=None): user =", "containing a link containing the MagicToken. \"\"\" def create_token(self, user):", "html_message = loader.render_to_string(html_template, context) mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER", "user def send_email(self, user, user_email, token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT", "return token def get_user_from_email(self, user_email): \"\"\" Query the DB for", "user_email): \"\"\" Query the DB for the user corresponding to", "/ 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context) text_message =", "of the field in the user model. By default \"username\"", "} if extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template, context) html_message =", "settings as django_settings from magicauth.models import MagicToken import sendgrid from", "import MagicToken import sendgrid from sendgrid import SendGridAPIClient from sendgrid.helpers.mail", "from magicauth import settings as magicauth_settings from django.conf import settings", "import math from django.contrib.auth import get_user_model from django.contrib.sites.shortcuts import get_current_site", "extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE", "sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\" Helper for sending an email containing", "from django.core.mail import send_mail from django.template import loader from magicauth", "name of the field in the user model. By default", "django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail) def send_token(self, user_email,", "= magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email =", "has customised the User class) - We use magicauth_settings.EMAIL_FIELD, which", "create_token(self, user): token = MagicToken.objects.create(user=user) return token def get_user_from_email(self, user_email):", "context.update(extra_context) text_message = loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template, context) mail", "\"\"\" Query the DB for the user corresponding to the", "{f\"{email_field}__iexact\": user_email} user = user_class.objects.get(**field_lookup) return user def send_email(self, user,", "django_settings from magicauth.models import MagicToken import sendgrid from sendgrid import", "settings as magicauth_settings from django.conf import settings as django_settings from", "), to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail) def send_token(self, user_email, extra_context=None):", "html_content=html_message ) sg.send(mail) def send_token(self, user_email, extra_context=None): user = self.get_user_from_email(user_email)", "import send_mail from django.template import loader from magicauth import settings", "\"token\": token, \"user\": user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60),", "user, user_email, token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE", "the user model. By default \"username\" but not always. \"\"\"", "math from django.contrib.auth import get_user_model from django.contrib.sites.shortcuts import get_current_site from", "MagicToken. \"\"\" def create_token(self, user): token = MagicToken.objects.create(user=user) return token", "from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail sg =", "user corresponding to the email. - We use get_user_model() instead", "= user_class.objects.get(**field_lookup) return user def send_email(self, user, user_email, token, extra_context=None):", "in the user model. By default \"username\" but not always.", "the Django app has customised the User class) - We", "magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context = {", "context) mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject,", "\"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template, context)", "email_field = magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\": user_email} user = user_class.objects.get(**field_lookup)", "\"user\": user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS,", "sendgrid from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail sg", "if extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template,", "60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template,", "magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\": user_email} user = user_class.objects.get(**field_lookup) return user", "django.template import loader from magicauth import settings as magicauth_settings from", "magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template, context) html_message", "\"\"\" Helper for sending an email containing a link containing", "the MagicToken. \"\"\" def create_token(self, user): token = MagicToken.objects.create(user=user) return", "= loader.render_to_string(html_template, context) mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ),", "django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail from django.template import", "extra_context=None): user = self.get_user_from_email(user_email) token = self.create_token(user) self.send_email(user, user_email, token,", "sendgrid.helpers.mail import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\" Helper", "sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY)", "import sendgrid from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail", "user): token = MagicToken.objects.create(user=user) return token def get_user_from_email(self, user_email): \"\"\"", "{ \"token\": token, \"user\": user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS /", "user = user_class.objects.get(**field_lookup) return user def send_email(self, user, user_email, token,", "context = { \"token\": token, \"user\": user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\":", "from_email = magicauth_settings.FROM_EMAIL context = { \"token\": token, \"user\": user,", "field in the user model. By default \"username\" but not", "magicauth_settings.EMAIL_FIELD, which is the name of the field in the", "for sending an email containing a link containing the MagicToken.", "\"\"\" def create_token(self, user): token = MagicToken.objects.create(user=user) return token def", "loader from magicauth import settings as magicauth_settings from django.conf import", "an email containing a link containing the MagicToken. \"\"\" def", "sending an email containing a link containing the MagicToken. \"\"\"", "= Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message )", "sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\" Helper for sending an", "but not always. \"\"\" user_class = get_user_model() email_field = magicauth_settings.EMAIL_FIELD", "= {f\"{email_field}__iexact\": user_email} user = user_class.objects.get(**field_lookup) return user def send_email(self,", "from django.template import loader from magicauth import settings as magicauth_settings", "magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL", "a link containing the MagicToken. \"\"\" def create_token(self, user): token", "magicauth import settings as magicauth_settings from django.conf import settings as", "is the name of the field in the user model.", "get_user_from_email(self, user_email): \"\"\" Query the DB for the user corresponding", "django.contrib.auth import get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.mail import", "the User class) - We use magicauth_settings.EMAIL_FIELD, which is the", "MagicToken.objects.create(user=user) return token def get_user_from_email(self, user_email): \"\"\" Query the DB", "which is the name of the field in the user", "class SendTokenMixin(object): \"\"\" Helper for sending an email containing a", "user_email, token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template", "user_email, extra_context=None): user = self.get_user_from_email(user_email) token = self.create_token(user) self.send_email(user, user_email,", "the field in the user model. By default \"username\" but", "the DB for the user corresponding to the email. -", "return user def send_email(self, user, user_email, token, extra_context=None): email_subject =", "magicauth_settings.FROM_EMAIL context = { \"token\": token, \"user\": user, \"site\": get_current_site(self.request),", "as django_settings from magicauth.models import MagicToken import sendgrid from sendgrid", "def create_token(self, user): token = MagicToken.objects.create(user=user) return token def get_user_from_email(self,", "for the user corresponding to the email. - We use", "mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message", "use magicauth_settings.EMAIL_FIELD, which is the name of the field in", "send_email(self, user, user_email, token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template =", "= sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\" Helper for sending an email", "from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail) def", "Django app has customised the User class) - We use", "By default \"username\" but not always. \"\"\" user_class = get_user_model()", "def get_user_from_email(self, user_email): \"\"\" Query the DB for the user", "from magicauth.models import MagicToken import sendgrid from sendgrid import SendGridAPIClient", "User (in case the Django app has customised the User", "link containing the MagicToken. \"\"\" def create_token(self, user): token =", "user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, }", "send_mail from django.template import loader from magicauth import settings as", "user_class.objects.get(**field_lookup) return user def send_email(self, user, user_email, token, extra_context=None): email_subject", "SendGridAPIClient from sendgrid.helpers.mail import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object):", "html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context", "get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context:", "text_message = loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template, context) mail =", "always. \"\"\" user_class = get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup =", "get_current_site from django.core.mail import send_mail from django.template import loader from", "field_lookup = {f\"{email_field}__iexact\": user_email} user = user_class.objects.get(**field_lookup) return user def", "django.conf import settings as django_settings from magicauth.models import MagicToken import", "django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail) def send_token(self,", "DB for the user corresponding to the email. - We", "extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template, context)", "We use magicauth_settings.EMAIL_FIELD, which is the name of the field", "import get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail", "email containing a link containing the MagicToken. \"\"\" def create_token(self,", "MagicToken import sendgrid from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import", "magicauth_settings from django.conf import settings as django_settings from magicauth.models import", "email. - We use get_user_model() instead of User (in case", "magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context = { \"token\": token, \"user\":", "magicauth.models import MagicToken import sendgrid from sendgrid import SendGridAPIClient from", "corresponding to the email. - We use get_user_model() instead of", "use get_user_model() instead of User (in case the Django app", "= loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template, context) mail = Mail(", "send_token(self, user_email, extra_context=None): user = self.get_user_from_email(user_email) token = self.create_token(user) self.send_email(user,", "- We use get_user_model() instead of User (in case the", "SendTokenMixin(object): \"\"\" Helper for sending an email containing a link", "get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\": user_email} user =", "to the email. - We use get_user_model() instead of User", "customised the User class) - We use magicauth_settings.EMAIL_FIELD, which is", "def send_email(self, user, user_email, token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template", "loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template, context) mail = Mail( from_email=(", "import SendGridAPIClient from sendgrid.helpers.mail import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class", "model. By default \"username\" but not always. \"\"\" user_class =", "user model. By default \"username\" but not always. \"\"\" user_class", "get_user_model() instead of User (in case the Django app has", "from sendgrid.helpers.mail import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\"", "User class) - We use magicauth_settings.EMAIL_FIELD, which is the name", "math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context) text_message", "loader.render_to_string(html_template, context) mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email],", "from django.contrib.auth import get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.mail", "def send_token(self, user_email, extra_context=None): user = self.get_user_from_email(user_email) token = self.create_token(user)", "user = self.get_user_from_email(user_email) token = self.create_token(user) self.send_email(user, user_email, token, extra_context)", "user_email} user = user_class.objects.get(**field_lookup) return user def send_email(self, user, user_email,", "token, \"user\": user, \"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\":", "token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template =", "token def get_user_from_email(self, user_email): \"\"\" Query the DB for the", "We use get_user_model() instead of User (in case the Django", "import loader from magicauth import settings as magicauth_settings from django.conf", "\"\"\" user_class = get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\":", "context) html_message = loader.render_to_string(html_template, context) mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL,", "user_class = get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\": user_email}", "email_subject = magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email", "sg.send(mail) def send_token(self, user_email, extra_context=None): user = self.get_user_from_email(user_email) token =", "to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail) def send_token(self, user_email, extra_context=None): user", "django.core.mail import send_mail from django.template import loader from magicauth import", "app has customised the User class) - We use magicauth_settings.EMAIL_FIELD,", "from django.conf import settings as django_settings from magicauth.models import MagicToken", "class) - We use magicauth_settings.EMAIL_FIELD, which is the name of", "Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\" Helper for sending", "\"site\": get_current_site(self.request), \"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if", "Helper for sending an email containing a link containing the", "\"username\" but not always. \"\"\" user_class = get_user_model() email_field =", "instead of User (in case the Django app has customised", "containing the MagicToken. \"\"\" def create_token(self, user): token = MagicToken.objects.create(user=user)", "import settings as django_settings from magicauth.models import MagicToken import sendgrid", "= MagicToken.objects.create(user=user) return token def get_user_from_email(self, user_email): \"\"\" Query the", "import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): \"\"\" Helper for", "= magicauth_settings.FROM_EMAIL context = { \"token\": token, \"user\": user, \"site\":", "= get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup = {f\"{email_field}__iexact\": user_email} user", "as magicauth_settings from django.conf import settings as django_settings from magicauth.models", "\"TOKEN_DURATION_MINUTES\": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), \"TOKEN_DURATION_SECONDS\": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context)", "- We use magicauth_settings.EMAIL_FIELD, which is the name of the" ]
[ "in any possible lettercase permutation. Returns module object if available,", "for path in paths: if os.path.exists(os.path.join(path, filename)): file_found = True", "\"\"\"Given an os.pathsep divided `search_path`, find first occurrence of `filename`.", "else: break return modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths):", "def search_file(filename, search_path): \"\"\"Given an os.pathsep divided `search_path`, find first", "path to file if found or None if unfound. \"\"\"", "file_found = False paths = search_path.split(os.pathsep) # paths = string.split(search_path,", "full path to file if found or None if unfound.", "ImportError: pass else: break return modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def", "available, None if not. `lenv` is list (not str) of", "of `filename`. Returns full path to file if found or", "lenv=None): \"\"\"Function to import *module* in any possible lettercase permutation.", "members to try. \"\"\" lenv = [] if lenv is", "def __init__(self, paths): # paths must be list self.paths =", "paths: if os.path.exists(os.path.join(path, filename)): file_found = True break if file_found:", "def __exit__(self, exc_type, exc_value, traceback): for pth in self.paths: sys.path.remove(pth)", "be list self.paths = paths def __enter__(self): for pth in", "= string.split(search_path, os.pathsep) for path in paths: if os.path.exists(os.path.join(path, filename)):", "return os.path.abspath(os.path.join(path, filename)) else: return None ## end of http://code.activestate.com/recipes/52224/", "import_ignorecase(module, lenv=None): \"\"\"Function to import *module* in any possible lettercase", "str) of addl sys.path members to try. \"\"\" lenv =", "None for per in list(all_casings(module)): try: modobj = __import__(per) except", "pass else: break return modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self,", "lenv = [] if lenv is None else lenv with", "for per in list(all_casings(module)): try: modobj = __import__(per) except ImportError:", "not input_string: yield \"\" else: first = input_string[:1] if first.lower()", "http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path): \"\"\"Given an os.pathsep divided `search_path`,", "list (not str) of addl sys.path members to try. \"\"\"", "paths = string.split(search_path, os.pathsep) for path in paths: if os.path.exists(os.path.join(path,", "sub_casing in all_casings(input_string[1:]): yield first.lower() + sub_casing yield first.upper() +", "`lenv` is list (not str) of addl sys.path members to", "(not str) of addl sys.path members to try. \"\"\" lenv", "if file_found: return os.path.abspath(os.path.join(path, filename)) else: return None ## end", "sys.path.insert(0, pth) def __exit__(self, exc_type, exc_value, traceback): for pth in", "`search_path`, find first occurrence of `filename`. Returns full path to", "break if file_found: return os.path.abspath(os.path.join(path, filename)) else: return None ##", "in list(all_casings(module)): try: modobj = __import__(per) except ImportError: pass else:", "return modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths): # paths", "## {{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path): \"\"\"Given an os.pathsep", "if not. `lenv` is list (not str) of addl sys.path", "lenv with add_path(lenv): modobj = None for per in list(all_casings(module)):", "found or None if unfound. \"\"\" file_found = False paths", "for sub_casing in all_casings(input_string[1:]): yield first + sub_casing else: for", "of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): \"\"\"Function to return a generator", "sub_casing yield first.upper() + sub_casing def import_ignorecase(module, lenv=None): \"\"\"Function to", "import *module* in any possible lettercase permutation. Returns module object", "def __enter__(self): for pth in reversed(self.paths): sys.path.insert(0, pth) def __exit__(self,", "if unfound. \"\"\" file_found = False paths = search_path.split(os.pathsep) #", "in all_casings(input_string[1:]): yield first.lower() + sub_casing yield first.upper() + sub_casing", "first.upper() + sub_casing def import_ignorecase(module, lenv=None): \"\"\"Function to import *module*", "self.paths = paths def __enter__(self): for pth in reversed(self.paths): sys.path.insert(0,", "paths must be list self.paths = paths def __enter__(self): for", "return None ## end of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): \"\"\"Function", "possible lettercase permutation. Returns module object if available, None if", "\"\" else: first = input_string[:1] if first.lower() == first.upper(): for", "def import_ignorecase(module, lenv=None): \"\"\"Function to import *module* in any possible", "try: modobj = __import__(per) except ImportError: pass else: break return", "all_casings(input_string[1:]): yield first.lower() + sub_casing yield first.upper() + sub_casing def", "= __import__(per) except ImportError: pass else: break return modobj class", "an os.pathsep divided `search_path`, find first occurrence of `filename`. Returns", "http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): \"\"\"Function to return a generator of", "lettercase permutations of *input_string*. \"\"\" if not input_string: yield \"\"", "## end of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): \"\"\"Function to return", "else: first = input_string[:1] if first.lower() == first.upper(): for sub_casing", "reversed(self.paths): sys.path.insert(0, pth) def __exit__(self, exc_type, exc_value, traceback): for pth", "with add_path(lenv): modobj = None for per in list(all_casings(module)): try:", "# paths = string.split(search_path, os.pathsep) for path in paths: if", "yield first + sub_casing else: for sub_casing in all_casings(input_string[1:]): yield", "False paths = search_path.split(os.pathsep) # paths = string.split(search_path, os.pathsep) for", "Returns module object if available, None if not. `lenv` is", "= [] if lenv is None else lenv with add_path(lenv):", "in paths: if os.path.exists(os.path.join(path, filename)): file_found = True break if", "[] if lenv is None else lenv with add_path(lenv): modobj", "= None for per in list(all_casings(module)): try: modobj = __import__(per)", "add_path(lenv): modobj = None for per in list(all_casings(module)): try: modobj", "divided `search_path`, find first occurrence of `filename`. Returns full path", "first.lower() + sub_casing yield first.upper() + sub_casing def import_ignorecase(module, lenv=None):", "in reversed(self.paths): sys.path.insert(0, pth) def __exit__(self, exc_type, exc_value, traceback): for", "break return modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths): #", "file_found: return os.path.abspath(os.path.join(path, filename)) else: return None ## end of", "search_path): \"\"\"Given an os.pathsep divided `search_path`, find first occurrence of", "== first.upper(): for sub_casing in all_casings(input_string[1:]): yield first + sub_casing", "*module* in any possible lettercase permutation. Returns module object if", "pth) def __exit__(self, exc_type, exc_value, traceback): for pth in self.paths:", "unfound. \"\"\" file_found = False paths = search_path.split(os.pathsep) # paths", "True break if file_found: return os.path.abspath(os.path.join(path, filename)) else: return None", "sys ## {{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path): \"\"\"Given an", "addl sys.path members to try. \"\"\" lenv = [] if", "= True break if file_found: return os.path.abspath(os.path.join(path, filename)) else: return", "module object if available, None if not. `lenv` is list", "else: for sub_casing in all_casings(input_string[1:]): yield first.lower() + sub_casing yield", "list self.paths = paths def __enter__(self): for pth in reversed(self.paths):", "os.path.abspath(os.path.join(path, filename)) else: return None ## end of http://code.activestate.com/recipes/52224/ }}}", "modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths): # paths must", "Returns full path to file if found or None if", "+ sub_casing yield first.upper() + sub_casing def import_ignorecase(module, lenv=None): \"\"\"Function", "add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths): # paths must be list", "string.split(search_path, os.pathsep) for path in paths: if os.path.exists(os.path.join(path, filename)): file_found", "object if available, None if not. `lenv` is list (not", "\"\"\"Function to import *module* in any possible lettercase permutation. Returns", "file if found or None if unfound. \"\"\" file_found =", "for sub_casing in all_casings(input_string[1:]): yield first.lower() + sub_casing yield first.upper()", "yield \"\" else: first = input_string[:1] if first.lower() == first.upper():", "first occurrence of `filename`. Returns full path to file if", "}}} def all_casings(input_string): \"\"\"Function to return a generator of all", "lettercase permutation. Returns module object if available, None if not.", "of addl sys.path members to try. \"\"\" lenv = []", "find first occurrence of `filename`. Returns full path to file", "permutations of *input_string*. \"\"\" if not input_string: yield \"\" else:", "__enter__(self): for pth in reversed(self.paths): sys.path.insert(0, pth) def __exit__(self, exc_type,", "any possible lettercase permutation. Returns module object if available, None", "is list (not str) of addl sys.path members to try.", "modobj = __import__(per) except ImportError: pass else: break return modobj", "\"\"\" lenv = [] if lenv is None else lenv", "input_string[:1] if first.lower() == first.upper(): for sub_casing in all_casings(input_string[1:]): yield", "sub_casing else: for sub_casing in all_casings(input_string[1:]): yield first.lower() + sub_casing", "if first.lower() == first.upper(): for sub_casing in all_casings(input_string[1:]): yield first", "= search_path.split(os.pathsep) # paths = string.split(search_path, os.pathsep) for path in", "else lenv with add_path(lenv): modobj = None for per in", "list(all_casings(module)): try: modobj = __import__(per) except ImportError: pass else: break", "return a generator of all lettercase permutations of *input_string*. \"\"\"", "\"\"\" if not input_string: yield \"\" else: first = input_string[:1]", "input_string: yield \"\" else: first = input_string[:1] if first.lower() ==", "if os.path.exists(os.path.join(path, filename)): file_found = True break if file_found: return", "import sys ## {{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path): \"\"\"Given", "pth in reversed(self.paths): sys.path.insert(0, pth) def __exit__(self, exc_type, exc_value, traceback):", "__init__(self, paths): # paths must be list self.paths = paths", "(r1) def search_file(filename, search_path): \"\"\"Given an os.pathsep divided `search_path`, find", "\"\"\" file_found = False paths = search_path.split(os.pathsep) # paths =", "try. \"\"\" lenv = [] if lenv is None else", "first = input_string[:1] if first.lower() == first.upper(): for sub_casing in", "to file if found or None if unfound. \"\"\" file_found", "# paths must be list self.paths = paths def __enter__(self):", "os.path.exists(os.path.join(path, filename)): file_found = True break if file_found: return os.path.abspath(os.path.join(path,", "__import__(per) except ImportError: pass else: break return modobj class add_path:", "all_casings(input_string[1:]): yield first + sub_casing else: for sub_casing in all_casings(input_string[1:]):", "permutation. Returns module object if available, None if not. `lenv`", "paths = search_path.split(os.pathsep) # paths = string.split(search_path, os.pathsep) for path", "search_path.split(os.pathsep) # paths = string.split(search_path, os.pathsep) for path in paths:", "filename)) else: return None ## end of http://code.activestate.com/recipes/52224/ }}} def", "class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths): # paths must be", "path in paths: if os.path.exists(os.path.join(path, filename)): file_found = True break", "\"\"\"Function to return a generator of all lettercase permutations of", "to return a generator of all lettercase permutations of *input_string*.", "if not input_string: yield \"\" else: first = input_string[:1] if", "is None else lenv with add_path(lenv): modobj = None for", "def all_casings(input_string): \"\"\"Function to return a generator of all lettercase", "of *input_string*. \"\"\" if not input_string: yield \"\" else: first", "yield first.upper() + sub_casing def import_ignorecase(module, lenv=None): \"\"\"Function to import", "else: return None ## end of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string):", "sub_casing in all_casings(input_string[1:]): yield first + sub_casing else: for sub_casing", "if available, None if not. `lenv` is list (not str)", "filename)): file_found = True break if file_found: return os.path.abspath(os.path.join(path, filename))", "must be list self.paths = paths def __enter__(self): for pth", "{{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path): \"\"\"Given an os.pathsep divided", "= paths def __enter__(self): for pth in reversed(self.paths): sys.path.insert(0, pth)", "os import sys ## {{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path):", "per in list(all_casings(module)): try: modobj = __import__(per) except ImportError: pass", "lenv is None else lenv with add_path(lenv): modobj = None", "file_found = True break if file_found: return os.path.abspath(os.path.join(path, filename)) else:", "None else lenv with add_path(lenv): modobj = None for per", "<gh_stars>1-10 import os import sys ## {{{ http://code.activestate.com/recipes/52224/ (r1) def", "paths def __enter__(self): for pth in reversed(self.paths): sys.path.insert(0, pth) def", "to import *module* in any possible lettercase permutation. Returns module", "search_file(filename, search_path): \"\"\"Given an os.pathsep divided `search_path`, find first occurrence", "all lettercase permutations of *input_string*. \"\"\" if not input_string: yield", "of all lettercase permutations of *input_string*. \"\"\" if not input_string:", "= input_string[:1] if first.lower() == first.upper(): for sub_casing in all_casings(input_string[1:]):", "first.lower() == first.upper(): for sub_casing in all_casings(input_string[1:]): yield first +", "None if unfound. \"\"\" file_found = False paths = search_path.split(os.pathsep)", "if lenv is None else lenv with add_path(lenv): modobj =", "in all_casings(input_string[1:]): yield first + sub_casing else: for sub_casing in", "*input_string*. \"\"\" if not input_string: yield \"\" else: first =", "a generator of all lettercase permutations of *input_string*. \"\"\" if", "+ sub_casing def import_ignorecase(module, lenv=None): \"\"\"Function to import *module* in", "sub_casing def import_ignorecase(module, lenv=None): \"\"\"Function to import *module* in any", "\"\"\"https://stackoverflow.com/a/39855753\"\"\" def __init__(self, paths): # paths must be list self.paths", "sys.path members to try. \"\"\" lenv = [] if lenv", "end of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): \"\"\"Function to return a", "occurrence of `filename`. Returns full path to file if found", "first.upper(): for sub_casing in all_casings(input_string[1:]): yield first + sub_casing else:", "first + sub_casing else: for sub_casing in all_casings(input_string[1:]): yield first.lower()", "or None if unfound. \"\"\" file_found = False paths =", "except ImportError: pass else: break return modobj class add_path: \"\"\"https://stackoverflow.com/a/39855753\"\"\"", "for pth in reversed(self.paths): sys.path.insert(0, pth) def __exit__(self, exc_type, exc_value,", "all_casings(input_string): \"\"\"Function to return a generator of all lettercase permutations", "paths): # paths must be list self.paths = paths def", "import os import sys ## {{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename,", "None if not. `lenv` is list (not str) of addl", "if found or None if unfound. \"\"\" file_found = False", "None ## end of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): \"\"\"Function to", "generator of all lettercase permutations of *input_string*. \"\"\" if not", "yield first.lower() + sub_casing yield first.upper() + sub_casing def import_ignorecase(module,", "not. `lenv` is list (not str) of addl sys.path members", "to try. \"\"\" lenv = [] if lenv is None", "os.pathsep) for path in paths: if os.path.exists(os.path.join(path, filename)): file_found =", "+ sub_casing else: for sub_casing in all_casings(input_string[1:]): yield first.lower() +", "= False paths = search_path.split(os.pathsep) # paths = string.split(search_path, os.pathsep)", "`filename`. Returns full path to file if found or None", "os.pathsep divided `search_path`, find first occurrence of `filename`. Returns full", "modobj = None for per in list(all_casings(module)): try: modobj =" ]
[ "3, 'vector'), (0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'),", "'matrix'), (1, (8,), 3, 'matrix'), ] ) def test_DCN(cross_num, hidden_size,", "1, 'matrix'), (1, (8,), 3, 'matrix'), ] ) def test_DCN(cross_num,", "test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not Estimator_TEST_TF1 and tf.__version__ < \"2.2.0\":", "input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num,", "check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'), (1,", "dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn)", "and tf.__version__ < \"2.2.0\": return model_name = \"DCN\" sample_size =", "@pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'), (1, (), 1, 'vector'),", "import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization',", "get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,),", "[(0, (8,), 2, 'vector'), (1, (), 1, 'vector'), (1, (8,),", "hidden_size, sparse_feature_num, cross_parameterization): model_name = \"DCN\" sample_size = SAMPLE_SIZE x,", "# def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict = {'sparse': [SparseFeat('sparse_1',", "= get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size,", "SAMPLE_SIZE x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model =", ") def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not Estimator_TEST_TF1 and tf.__version__", "DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) if __name__ == \"__main__\": pass", "] ) def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not Estimator_TEST_TF1 and", "def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2),", "feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns, cross_num=cross_num,", "] ) def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name = \"DCN\"", "sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)", "= {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], # 'dense':", "get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size,", "[SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError): #", "feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], #", "= \"DCN\" sample_size = SAMPLE_SIZE x, y, feature_columns = get_test_data(sample_size,", "cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()):", "as tf from deepctr.estimator import DCNEstimator from deepctr.models import DCN", "1)]} # with pytest.raises(ValueError): # _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num,", "(8,), 2, 'vector'), (1, (), 1, 'vector'), (1, (8,), 3,", "# _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) if __name__", "(1, (), 1, 'vector'), (1, (8,), 3, 'vector'), (0, (8,),", "\"DCN\" sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num,", "\"2.2.0\": return model_name = \"DCN\" sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns,", "sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5)", "'vector'), (0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1,", "{'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1',", "import pytest import tensorflow as tf from deepctr.estimator import DCNEstimator", "= SAMPLE_SIZE x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model", "3, 'matrix'), ] ) def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name", "(1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'), ] )", "if not Estimator_TEST_TF1 and tf.__version__ < \"2.2.0\": return model_name =", "import tensorflow as tf from deepctr.estimator import DCNEstimator from deepctr.models", "= DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x,", "SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2,", "[(1, (8,), 3) ] ) def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if", "'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ] ) def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num):", "from deepctr.models import DCN from ..utils import check_model, get_test_data, SAMPLE_SIZE,", "check_model(model, model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ]", "cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num',", "model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) #", "DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x, y)", "2, 'vector'), (1, (), 1, 'vector'), (1, (8,), 3, 'vector'),", "deepctr.models import DCN from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator,", "# with pytest.raises(ValueError): # _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size,", "hidden_size, sparse_feature_num): if not Estimator_TEST_TF1 and tf.__version__ < \"2.2.0\": return", "not Estimator_TEST_TF1 and tf.__version__ < \"2.2.0\": return model_name = \"DCN\"", "# feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)],", "= get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization,", "tf from deepctr.estimator import DCNEstimator from deepctr.models import DCN from", "3) ] ) def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not Estimator_TEST_TF1", "input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict = {'sparse':", "tf.__version__ < \"2.2.0\": return model_name = \"DCN\" sample_size = SAMPLE_SIZE", "10)], # 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} #", "\\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'), (1, (),", "1), SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError): # _ = DCN(None,", "(), 1, 'vector'), (1, (8,), 3, 'vector'), (0, (8,), 2,", "hidden_size=()): # feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3',", "y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns,", "cross_num=0, hidden_size=()): # feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5),", "2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1',", "deepctr.estimator import DCNEstimator from deepctr.models import DCN from ..utils import", "sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model,", "DCN from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\", "'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'), ]", "y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ] ) def test_DCNEstimator(cross_num,", "pytest import tensorflow as tf from deepctr.estimator import DCNEstimator from", "cross_parameterization): model_name = \"DCN\" sample_size = SAMPLE_SIZE x, y, feature_columns", "= \"DCN\" sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,", "with pytest.raises(ValueError): # _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5)", "(1, (8,), 3, 'matrix'), ] ) def test_DCN(cross_num, hidden_size, sparse_feature_num,", "linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns,", "(8,), 3, 'vector'), (0, (8,), 2, 'matrix'), (1, (), 1,", "SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError): # _ =", "from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1", "x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ] ) def", "(8,), 3, 'matrix'), ] ) def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization):", "DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8,", "dnn_dropout=0.5) check_model(model, model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3)", "dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,),", "Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'), (1, (), 1,", "def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not Estimator_TEST_TF1 and tf.__version__ <", "(8,), 3) ] ) def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not", "get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5)", "2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'),", "SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError): # _ = DCN(None, embedding_size=embedding_size,", "= SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model", "cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1,", "1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError): # _", "SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model =", "test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2',", "import DCNEstimator from deepctr.models import DCN from ..utils import check_model,", "[SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1', 1),", "DCNEstimator from deepctr.models import DCN from ..utils import check_model, get_test_data,", "<filename>tests/models/DCN_test.py import pytest import tensorflow as tf from deepctr.estimator import", "pytest.raises(ValueError): # _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) if", "test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name = \"DCN\" sample_size = SAMPLE_SIZE", "1, 'vector'), (1, (8,), 3, 'vector'), (0, (8,), 2, 'matrix'),", "'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'), (1, (), 1, 'vector'), (1,", "'vector'), (1, (8,), 3, 'vector'), (0, (8,), 2, 'matrix'), (1,", "sample_size = SAMPLE_SIZE x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)", "dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): #", "'matrix'), ] ) def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name =", "< \"2.2.0\": return model_name = \"DCN\" sample_size = SAMPLE_SIZE linear_feature_columns,", "get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'),", "tensorflow as tf from deepctr.estimator import DCNEstimator from deepctr.models import", "SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1),", "(8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3,", "feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x, y) @pytest.mark.parametrize(", "_ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) if __name__ ==", "# 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} # with", "\"DCN\" sample_size = SAMPLE_SIZE x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,", ") def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name = \"DCN\" sample_size", "model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ] )", "..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize(", "@pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ] ) def test_DCNEstimator(cross_num, hidden_size,", "def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name = \"DCN\" sample_size =", "import DCN from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator,", "check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict =", "'vector'), (1, (), 1, 'vector'), (1, (8,), 3, 'vector'), (0,", "model_name = \"DCN\" sample_size = SAMPLE_SIZE x, y, feature_columns =", "(1, (8,), 3, 'vector'), (0, (8,), 2, 'matrix'), (1, (),", "dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model,", "'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError):", "= DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) if __name__ == \"__main__\":", "sparse_feature_num, cross_parameterization): model_name = \"DCN\" sample_size = SAMPLE_SIZE x, y,", "dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0,", "from deepctr.estimator import DCNEstimator from deepctr.models import DCN from ..utils", "(0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,),", "5), SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1',", "return model_name = \"DCN\" sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn", "SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]}", "check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \\ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0,", "model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name,", "sparse_feature_num): if not Estimator_TEST_TF1 and tf.__version__ < \"2.2.0\": return model_name", "model_name = \"DCN\" sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn =", "dnn_dropout=0.5) check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict", "= DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) # def", "(), 1, 'matrix'), (1, (8,), 3, 'matrix'), ] ) def", "Estimator_TEST_TF1 and tf.__version__ < \"2.2.0\": return model_name = \"DCN\" sample_size", "x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns,", "dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns," ]
[ "subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3)", "authenticate(): try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass else: subprocess.check_call(", "\"rsync\", \"-r\", dir_, dest]) def cp(source, destination): subprocess.check_call([\"gsutil\", \"cp\", source,", "as e: output = _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in output: raise", "[\"gsutil\", \"-m\", \"rsync\", \"-r\", \"-e\", d, dest], stderr=subprocess.STDOUT ) except", "= os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass else: subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\",", ") except subprocess.CalledProcessError as e: output = _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\"", "dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\", dir_, dest]) def", "because gsutil logs upload progress there. subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\",", "d, dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e: output =", "class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s, bytes): return", "<filename>workflows/post_process_run/fv3post/gsutil.py<gh_stars>1-10 import os import subprocess import backoff class GSUtilResumableUploadException(Exception): pass", "except KeyError: pass else: subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\", credentials]", "backoff class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s, bytes):", "e: output = _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in output: raise GSUtilResumableUploadException()", "output = _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in output: raise GSUtilResumableUploadException() else:", "Pipe stderr to stdout because gsutil logs upload progress there.", "\"rsync\", \"-r\", \"-e\", d, dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as", "except subprocess.CalledProcessError as e: output = _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in", "isinstance(s, bytes): return s.decode(encoding) else: return s def authenticate(): try:", "try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass else: subprocess.check_call( [\"gcloud\",", "logs upload progress there. subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\", \"-r\", \"-e\",", "else: subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException,", "\"-m\", \"rsync\", \"-r\", \"-e\", d, dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError", "\"-r\", dir_, dest]) def cp(source, destination): subprocess.check_call([\"gsutil\", \"cp\", source, destination])", "KeyError: pass else: subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\", credentials] )", "\"ResumableUploadException\" in output: raise GSUtilResumableUploadException() else: raise e def download_directory(dir_,", "GSUtilResumableUploadException() else: raise e def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\",", "credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest): try: #", "os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass else: subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\",", "if isinstance(s, bytes): return s.decode(encoding) else: return s def authenticate():", "if \"ResumableUploadException\" in output: raise GSUtilResumableUploadException() else: raise e def", "upload progress there. subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\", \"-r\", \"-e\", d,", "\"--key-file\", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest): try:", "try: # Pipe stderr to stdout because gsutil logs upload", "s.decode(encoding) else: return s def authenticate(): try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]", "stderr to stdout because gsutil logs upload progress there. subprocess.check_output(", "@backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest): try: # Pipe stderr", "GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s, bytes): return s.decode(encoding)", "def upload_dir(d, dest): try: # Pipe stderr to stdout because", "import backoff class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s,", "s def authenticate(): try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass", "gsutil logs upload progress there. subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\", \"-r\",", "dest): try: # Pipe stderr to stdout because gsutil logs", "return s def authenticate(): try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError:", "def authenticate(): try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass else:", "import os import subprocess import backoff class GSUtilResumableUploadException(Exception): pass def", "upload_dir(d, dest): try: # Pipe stderr to stdout because gsutil", "stdout because gsutil logs upload progress there. subprocess.check_output( [\"gsutil\", \"-m\",", "output: raise GSUtilResumableUploadException() else: raise e def download_directory(dir_, dest): os.makedirs(dest,", "e def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\",", "encoding=\"utf-8\"): if isinstance(s, bytes): return s.decode(encoding) else: return s def", "credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except KeyError: pass else: subprocess.check_call( [\"gcloud\", \"auth\",", "\"-r\", \"-e\", d, dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e:", "progress there. subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\", \"-r\", \"-e\", d, dest],", ") @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest): try: # Pipe", "os import subprocess import backoff class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s,", "subprocess import backoff class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if", "\"activate-service-account\", \"--key-file\", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest):", "def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\", dir_,", "stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e: output = _decode_to_str_if_bytes(e.output) if", "\"-m\", \"rsync\", \"-r\", dir_, dest]) def cp(source, destination): subprocess.check_call([\"gsutil\", \"cp\",", "pass else: subprocess.check_call( [\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\", credentials] ) @backoff.on_exception(backoff.expo,", "os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\", dir_, dest]) def cp(source,", "subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\", dir_, dest]) def cp(source, destination): subprocess.check_call([\"gsutil\",", "subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\", \"-r\", \"-e\", d, dest], stderr=subprocess.STDOUT )", "else: raise e def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\",", "subprocess.CalledProcessError as e: output = _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in output:", "else: return s def authenticate(): try: credentials = os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] except", "pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s, bytes): return s.decode(encoding) else:", "# Pipe stderr to stdout because gsutil logs upload progress", "def _decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s, bytes): return s.decode(encoding) else: return", "to stdout because gsutil logs upload progress there. subprocess.check_output( [\"gsutil\",", "exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\", dir_, dest]) def cp(source, destination):", "_decode_to_str_if_bytes(s, encoding=\"utf-8\"): if isinstance(s, bytes): return s.decode(encoding) else: return s", "\"-e\", d, dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e: output", "raise GSUtilResumableUploadException() else: raise e def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True)", "_decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in output: raise GSUtilResumableUploadException() else: raise e", "[\"gcloud\", \"auth\", \"activate-service-account\", \"--key-file\", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def", "import subprocess import backoff class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding=\"utf-8\"):", "in output: raise GSUtilResumableUploadException() else: raise e def download_directory(dir_, dest):", "raise e def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\",", "return s.decode(encoding) else: return s def authenticate(): try: credentials =", "dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e: output = _decode_to_str_if_bytes(e.output)", "bytes): return s.decode(encoding) else: return s def authenticate(): try: credentials", "there. subprocess.check_output( [\"gsutil\", \"-m\", \"rsync\", \"-r\", \"-e\", d, dest], stderr=subprocess.STDOUT", "= _decode_to_str_if_bytes(e.output) if \"ResumableUploadException\" in output: raise GSUtilResumableUploadException() else: raise", "download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call([\"gsutil\", \"-m\", \"rsync\", \"-r\", dir_, dest])", "\"auth\", \"activate-service-account\", \"--key-file\", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d,", "max_tries=3) def upload_dir(d, dest): try: # Pipe stderr to stdout", "GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest): try: # Pipe stderr to" ]
[ "'comment']): bug_id = args['bug_id'] comment = args['comment'] ticket.bug_update(application, comment, bug_id)", "environment) description = notes.get_release_notes() release_num = notes.last_tag output.log('Release Notes', True)", "required=False) parser_create.add_argument( '-e', '--environment', help='Enter: STAGE, PROD', default='STAGE', required=False) parser_create.add_argument(", "bugzilla.mozilla.org \\ (without switch posts to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False)", "subparsers.add_parser('NEW', help='Create a NEW deployment ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example:", "= 'NEW' output = OutputHelper() output.log('Create deployment ticket', True, True)", "posts to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers = parser.add_subparsers(help='Ticket action')", "in ['repo_owner', 'application', 'environment']): # noqa repo_owner = args['repo_owner'] environment", "parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this switch to post directly to", "required=True) args = vars(parser.parse_args()) application = args['application'] bugzilla_mozilla = args['bugzilla_mozilla']", "tickets in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example: loop-server',", "output.log('Create deployment ticket', True, True) notes = ReleaseNotes(repo_owner, application, environment)", "application = args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if", "parser = argparse.ArgumentParser( description='Scripts for creating / updating deployment tickets", "repo_owner = args['repo_owner'] environment = args['environment'].lower() if args['cc_mail']: cc_mail =", "'--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment', help='Enter: STAGE,", "args = vars(parser.parse_args()) application = args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket", "in ['bug_id', 'comment']): bug_id = args['bug_id'] comment = args['comment'] ticket.bug_update(application,", "bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers = parser.add_subparsers(help='Ticket action') # parser", "args for key in ['bug_id', 'comment']): bug_id = args['bug_id'] comment", "for key in ['bug_id', 'comment']): bug_id = args['bug_id'] comment =", "ticket - {create} option parser_create = \\ subparsers.add_parser('NEW', help='Create a", "args['cc_mail'] else: cc_mail = '' status = 'NEW' output =", "'--bug-id', help='Example: 1234567', required=False) parser_update.add_argument( '-c', '--comment', help='Enter: <your bug", "default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example: <EMAIL> \\ NOTE: must", "deployment ticket' ) parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567', required=False) parser_update.add_argument(", "parser for ticket - {upate} option parser_update = subparsers.add_parser( 'UPDATE',", "NEW deployment ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False)", "updating deployment tickets in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application',", "['repo_owner', 'application', 'environment']): # noqa repo_owner = args['repo_owner'] environment =", "['bug_id', 'comment']): bug_id = args['bug_id'] comment = args['comment'] ticket.bug_update(application, comment,", "subparsers.add_parser( 'UPDATE', help='UPDATE an existing deployment ticket' ) parser_update.add_argument( '-i',", "/ updating deployment tickets in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a',", "required=False) # parser for ticket - {upate} option parser_update =", "= args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key in args for", "# parser for ticket - {create} option parser_create = \\", "for ticket - {create} option parser_create = \\ subparsers.add_parser('NEW', help='Create", "parser.add_subparsers(help='Ticket action') # parser for ticket - {create} option parser_create", "loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this switch to post", "'UPDATE', help='UPDATE an existing deployment ticket' ) parser_update.add_argument( '-i', '--bug-id',", "import BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes from output_helper import OutputHelper", "directly to bugzilla.mozilla.org \\ (without switch posts to: bugzilla-dev.allizom.org)', action='store_true',", "'--environment', help='Enter: STAGE, PROD', default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example:", "\\ NOTE: must be a registered username!', default='', required=False) #", "help='Example: 1234567', required=False) parser_update.add_argument( '-c', '--comment', help='Enter: <your bug comment>',", "ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e',", "all(key in args for key in ['repo_owner', 'application', 'environment']): #", "= ReleaseNotes(repo_owner, application, environment) description = notes.get_release_notes() release_num = notes.last_tag", "for creating / updating deployment tickets in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "(without switch posts to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers =", "argparse from deploy_tix.bugzilla_rest_client import BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes from", "STAGE, PROD', default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example: <EMAIL> \\", "'-B', '--bugzilla-mozilla', help='Set this switch to post directly to bugzilla.mozilla.org", "must be a registered username!', default='', required=False) # parser for", "= vars(parser.parse_args()) application = args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket =", "args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key in args for key", "'' status = 'NEW' output = OutputHelper() output.log('Create deployment ticket',", "a NEW deployment ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services',", "status = 'NEW' output = OutputHelper() output.log('Create deployment ticket', True,", "parser.add_argument( '-a', '--application', help='Example: loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set", "bug_id = args['bug_id'] comment = args['comment'] ticket.bug_update(application, comment, bug_id) if", "to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers = parser.add_subparsers(help='Ticket action') #", "= notes.last_tag output.log('Release Notes', True) output.log(description) ticket.bug_create( release_num, application, environment,", "comment = args['comment'] ticket.bug_update(application, comment, bug_id) if all(key in args", "'NEW' output = OutputHelper() output.log('Create deployment ticket', True, True) notes", "'-a', '--application', help='Example: loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this", "bug_id) if all(key in args for key in ['repo_owner', 'application',", "= args['environment'].lower() if args['cc_mail']: cc_mail = args['cc_mail'] else: cc_mail =", "key in ['repo_owner', 'application', 'environment']): # noqa repo_owner = args['repo_owner']", "required=False) subparsers = parser.add_subparsers(help='Ticket action') # parser for ticket -", "default='', required=False) # parser for ticket - {upate} option parser_update", "'-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment', help='Enter:", "bugzilla_mozilla = args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key in args", "environment = args['environment'].lower() if args['cc_mail']: cc_mail = args['cc_mail'] else: cc_mail", "help='Example: <EMAIL> \\ NOTE: must be a registered username!', default='',", "creating / updating deployment tickets in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(", "= '' status = 'NEW' output = OutputHelper() output.log('Create deployment", "from output_helper import OutputHelper def main(args=None): parser = argparse.ArgumentParser( description='Scripts", "BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes from output_helper import OutputHelper def", "parser_create.add_argument( '-e', '--environment', help='Enter: STAGE, PROD', default='STAGE', required=False) parser_create.add_argument( '-m',", "parser_create = \\ subparsers.add_parser('NEW', help='Create a NEW deployment ticket.') parser_create.add_argument(", "= args['cc_mail'] else: cc_mail = '' status = 'NEW' output", "OutputHelper def main(args=None): parser = argparse.ArgumentParser( description='Scripts for creating /", "all(key in args for key in ['bug_id', 'comment']): bug_id =", "parser_create.add_argument( '-m', '--cc-mail', help='Example: <EMAIL> \\ NOTE: must be a", "username!', default='', required=False) # parser for ticket - {upate} option", "'--application', help='Example: loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this switch", "switch to post directly to bugzilla.mozilla.org \\ (without switch posts", "ticket', True, True) notes = ReleaseNotes(repo_owner, application, environment) description =", ") parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567', required=False) parser_update.add_argument( '-c', '--comment',", "main(args=None): parser = argparse.ArgumentParser( description='Scripts for creating / updating deployment", "deploy_tix.release_notes import ReleaseNotes from output_helper import OutputHelper def main(args=None): parser", "if args['cc_mail']: cc_mail = args['cc_mail'] else: cc_mail = '' status", "this switch to post directly to bugzilla.mozilla.org \\ (without switch", "= parser.add_subparsers(help='Ticket action') # parser for ticket - {create} option", "PROD', default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example: <EMAIL> \\ NOTE:", "ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key in args for key in", "'--bugzilla-mozilla', help='Set this switch to post directly to bugzilla.mozilla.org \\", "# noqa repo_owner = args['repo_owner'] environment = args['environment'].lower() if args['cc_mail']:", "\\ subparsers.add_parser('NEW', help='Create a NEW deployment ticket.') parser_create.add_argument( '-o', '--repo-owner',", "'--comment', help='Enter: <your bug comment>', required=True) args = vars(parser.parse_args()) application", "mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment', help='Enter: STAGE, PROD', default='STAGE',", "a registered username!', default='', required=False) # parser for ticket -", "for ticket - {upate} option parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE", "bug comment>', required=True) args = vars(parser.parse_args()) application = args['application'] bugzilla_mozilla", "deployment ticket', True, True) notes = ReleaseNotes(repo_owner, application, environment) description", "ReleaseNotes(repo_owner, application, environment) description = notes.get_release_notes() release_num = notes.last_tag output.log('Release", "def main(args=None): parser = argparse.ArgumentParser( description='Scripts for creating / updating", "comment>', required=True) args = vars(parser.parse_args()) application = args['application'] bugzilla_mozilla =", "in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example: loop-server', required=True)", "- {upate} option parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE an existing", "noqa repo_owner = args['repo_owner'] environment = args['environment'].lower() if args['cc_mail']: cc_mail", "deployment ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument(", "1234567', required=False) parser_update.add_argument( '-c', '--comment', help='Enter: <your bug comment>', required=True)", "existing deployment ticket' ) parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567', required=False)", "description = notes.get_release_notes() release_num = notes.last_tag output.log('Release Notes', True) output.log(description)", "= args['comment'] ticket.bug_update(application, comment, bug_id) if all(key in args for", "to bugzilla.mozilla.org \\ (without switch posts to: bugzilla-dev.allizom.org)', action='store_true', default=False,", "notes.get_release_notes() release_num = notes.last_tag output.log('Release Notes', True) output.log(description) ticket.bug_create( release_num,", "application, environment) description = notes.get_release_notes() release_num = notes.last_tag output.log('Release Notes',", "'environment']): # noqa repo_owner = args['repo_owner'] environment = args['environment'].lower() if", "from deploy_tix.bugzilla_rest_client import BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes from output_helper", "- {create} option parser_create = \\ subparsers.add_parser('NEW', help='Create a NEW", "= args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key", "switch posts to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers = parser.add_subparsers(help='Ticket", "args['cc_mail']: cc_mail = args['cc_mail'] else: cc_mail = '' status =", "action') # parser for ticket - {create} option parser_create =", "= notes.get_release_notes() release_num = notes.last_tag output.log('Release Notes', True) output.log(description) ticket.bug_create(", "\\ (without switch posts to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers", "to post directly to bugzilla.mozilla.org \\ (without switch posts to:", "{upate} option parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE an existing deployment", "comment, bug_id) if all(key in args for key in ['repo_owner',", "OutputHelper() output.log('Create deployment ticket', True, True) notes = ReleaseNotes(repo_owner, application,", "\\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example: loop-server', required=True) parser.add_argument(", "# parser for ticket - {upate} option parser_update = subparsers.add_parser(", "notes = ReleaseNotes(repo_owner, application, environment) description = notes.get_release_notes() release_num =", "for key in ['repo_owner', 'application', 'environment']): # noqa repo_owner =", "NOTE: must be a registered username!', default='', required=False) # parser", "help='Example: loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this switch to", "default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment', help='Enter: STAGE, PROD', default='STAGE', required=False)", "from deploy_tix.release_notes import ReleaseNotes from output_helper import OutputHelper def main(args=None):", "<EMAIL> \\ NOTE: must be a registered username!', default='', required=False)", "= argparse.ArgumentParser( description='Scripts for creating / updating deployment tickets in", "True, True) notes = ReleaseNotes(repo_owner, application, environment) description = notes.get_release_notes()", "help='Enter: STAGE, PROD', default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example: <EMAIL>", "Notes', True) output.log(description) ticket.bug_create( release_num, application, environment, status, description, cc_mail", "if all(key in args for key in ['repo_owner', 'application', 'environment']):", "import argparse from deploy_tix.bugzilla_rest_client import BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes", "{create} option parser_create = \\ subparsers.add_parser('NEW', help='Create a NEW deployment", "'-i', '--bug-id', help='Example: 1234567', required=False) parser_update.add_argument( '-c', '--comment', help='Enter: <your", "ReleaseNotes from output_helper import OutputHelper def main(args=None): parser = argparse.ArgumentParser(", "True) notes = ReleaseNotes(repo_owner, application, environment) description = notes.get_release_notes() release_num", "parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment',", "ticket' ) parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567', required=False) parser_update.add_argument( '-c',", "deploy_tix.bugzilla_rest_client import BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes from output_helper import", "in args for key in ['bug_id', 'comment']): bug_id = args['bug_id']", "True) output.log(description) ticket.bug_create( release_num, application, environment, status, description, cc_mail )", "description='Scripts for creating / updating deployment tickets in \\ Bugzilla',", "be a registered username!', default='', required=False) # parser for ticket", "Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example: loop-server', required=True) parser.add_argument( '-B',", "required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this switch to post directly", "help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment', help='Enter: STAGE, PROD',", "args['environment'].lower() if args['cc_mail']: cc_mail = args['cc_mail'] else: cc_mail = ''", "= BugzillaRESTClient(bugzilla_mozilla) if all(key in args for key in ['bug_id',", "help='Enter: <your bug comment>', required=True) args = vars(parser.parse_args()) application =", "parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE an existing deployment ticket' )", "in args for key in ['repo_owner', 'application', 'environment']): # noqa", "help='Set this switch to post directly to bugzilla.mozilla.org \\ (without", "args['repo_owner'] environment = args['environment'].lower() if args['cc_mail']: cc_mail = args['cc_mail'] else:", "key in ['bug_id', 'comment']): bug_id = args['bug_id'] comment = args['comment']", "= OutputHelper() output.log('Create deployment ticket', True, True) notes = ReleaseNotes(repo_owner,", "notes.last_tag output.log('Release Notes', True) output.log(description) ticket.bug_create( release_num, application, environment, status,", "else: cc_mail = '' status = 'NEW' output = OutputHelper()", "option parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE an existing deployment ticket'", "argparse.ArgumentParser( description='Scripts for creating / updating deployment tickets in \\", "action='store_true', default=False, required=False) subparsers = parser.add_subparsers(help='Ticket action') # parser for", "help='UPDATE an existing deployment ticket' ) parser_update.add_argument( '-i', '--bug-id', help='Example:", "import ReleaseNotes from output_helper import OutputHelper def main(args=None): parser =", "post directly to bugzilla.mozilla.org \\ (without switch posts to: bugzilla-dev.allizom.org)',", "= args['repo_owner'] environment = args['environment'].lower() if args['cc_mail']: cc_mail = args['cc_mail']", "import OutputHelper def main(args=None): parser = argparse.ArgumentParser( description='Scripts for creating", "registered username!', default='', required=False) # parser for ticket - {upate}", "vars(parser.parse_args()) application = args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla)", "'--cc-mail', help='Example: <EMAIL> \\ NOTE: must be a registered username!',", "= \\ subparsers.add_parser('NEW', help='Create a NEW deployment ticket.') parser_create.add_argument( '-o',", "= args['bug_id'] comment = args['comment'] ticket.bug_update(application, comment, bug_id) if all(key", "cc_mail = '' status = 'NEW' output = OutputHelper() output.log('Create", "args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key in", "if all(key in args for key in ['bug_id', 'comment']): bug_id", "args['bug_id'] comment = args['comment'] ticket.bug_update(application, comment, bug_id) if all(key in", "required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example: <EMAIL> \\ NOTE: must be", "ticket.bug_update(application, comment, bug_id) if all(key in args for key in", "args for key in ['repo_owner', 'application', 'environment']): # noqa repo_owner", "'-e', '--environment', help='Enter: STAGE, PROD', default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail',", "parser_update.add_argument( '-c', '--comment', help='Enter: <your bug comment>', required=True) args =", "output = OutputHelper() output.log('Create deployment ticket', True, True) notes =", "release_num = notes.last_tag output.log('Release Notes', True) output.log(description) ticket.bug_create( release_num, application,", "cc_mail = args['cc_mail'] else: cc_mail = '' status = 'NEW'", "'-m', '--cc-mail', help='Example: <EMAIL> \\ NOTE: must be a registered", "parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567', required=False) parser_update.add_argument( '-c', '--comment', help='Enter:", "output.log('Release Notes', True) output.log(description) ticket.bug_create( release_num, application, environment, status, description,", "output_helper import OutputHelper def main(args=None): parser = argparse.ArgumentParser( description='Scripts for", "'application', 'environment']): # noqa repo_owner = args['repo_owner'] environment = args['environment'].lower()", "'-c', '--comment', help='Enter: <your bug comment>', required=True) args = vars(parser.parse_args())", "option parser_create = \\ subparsers.add_parser('NEW', help='Create a NEW deployment ticket.')", "subparsers = parser.add_subparsers(help='Ticket action') # parser for ticket - {create}", "required=False) parser_update.add_argument( '-c', '--comment', help='Enter: <your bug comment>', required=True) args", "ticket - {upate} option parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE an", "default=False, required=False) subparsers = parser.add_subparsers(help='Ticket action') # parser for ticket", "args['comment'] ticket.bug_update(application, comment, bug_id) if all(key in args for key", "formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example: loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla',", "deployment tickets in \\ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example:", "parser for ticket - {create} option parser_create = \\ subparsers.add_parser('NEW',", "<your bug comment>', required=True) args = vars(parser.parse_args()) application = args['application']", "= subparsers.add_parser( 'UPDATE', help='UPDATE an existing deployment ticket' ) parser_update.add_argument(", "help='Create a NEW deployment ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services',", "an existing deployment ticket' ) parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567',", "BugzillaRESTClient(bugzilla_mozilla) if all(key in args for key in ['bug_id', 'comment']):" ]
[ "range(nspheres): s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5", "obstacles: spheres = [] for i in range(nspheres): s =", ") band.p = band.pos * 0 scene.range = 1.5 scene.autoscale", "- force*dt # color based on \"stretch\": blue -> white", "s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 +", "way to make 'damped springs' than this! band.p = band.p", ") spheres.append( s ) if floor: below = less(band.pos[:,1],-1) band.p[:,1]", "/ dt) if scene.mouse.clicked: i = len(spheres) s = sphere(", "# nail down left endpoint #band.p[-1] = 0 # nail", "* 0.5, 0, 2 ) # blue (compressed) -> white", "'damped springs' than this! band.p = band.p * damp #band.p[0]", "on point n from point n+1 (to the right): length", "9.8 dt = 0.002 k = 3 damp = (1-0)**dt", ") spheres.append( s ) while True: rate(1.0 / dt) if", "0.02 m = 0.010 * restlength g = 9.8 dt", "0 # nail down right endpoint band.pos = band.pos +", "scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0), radius = 0.25, color", "= where( below, -1, band.pos[:,1] ) # need a more", "On a two-button mouse, middle is left + right. \"\"\")", "# color based on \"stretch\": blue -> white -> red", "curve( x = arange(-1,1,restlength), y = 1, radius = 0.02", "= 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) while", "band.pos - s.pos ) / dist surface = s.pos +", "= 0.02 ) band.p = band.pos * 0 scene.range =", "= band.p[:-1] + force*dt band.p[1:] = band.p[1:] - force*dt #", "radius = 0.02 ) band.p = band.pos * 0 scene.range", "dist/restlength * 0.5, 0, 2 ) # blue (compressed) ->", "= (band.pos[1:] - band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force = k", "<NAME> scene.title = \"Drape\" restlength = 0.02 m = 0.010", "+ i*0.1,0), radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append(", "= band.p[1:] - force*dt # color based on \"stretch\": blue", "under falling string. Right button drag or Ctrl-drag to rotate", "restlength g = 9.8 dt = 0.002 k = 3", "a more physical way to make 'damped springs' than this!", "point n from point n+1 (to the right): length =", "floor: below = less(band.pos[:,1],-1) band.p[:,1] = where( below, 0, band.p[:,1]", "right): length = (band.pos[1:] - band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force", "white -> red c = clip( dist/restlength * 0.5, 0,", "= 0.02 m = 0.010 * restlength g = 9.8", "3 floor = 0 # Create the stringy thing: band", "for s in spheres: dist = mag( band.pos - s.pos", "(compressed) -> white (relaxed) -> red (tension) band.red[1:] = where(", "= 0 # nail down right endpoint band.pos = band.pos", "force[:,newaxis] band.p[:-1] = band.p[:-1] + force*dt band.p[1:] = band.p[1:] -", "#(i*0.6 - 0.7,0.5 + i*0.1,0), radius = 0.25, color =", "or out. On a two-button mouse, middle is left +", "radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s )", "= (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) while True: rate(1.0 /", "color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) if floor: below", "= 3 damp = (1-0)**dt nspheres = 3 floor =", "spheres.append( s ) if floor: below = less(band.pos[:,1],-1) band.p[:,1] =", "= sqrt(sum(length*length,-1)) force = k * ( dist - restlength", "\"\"\") # <NAME> scene.title = \"Drape\" restlength = 0.02 m", "x = arange(-1,1,restlength), y = 1, radius = 0.02 )", "- band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force = k * (", "blue (compressed) -> white (relaxed) -> red (tension) band.red[1:] =", "left endpoint #band.p[-1] = 0 # nail down right endpoint", ") while True: rate(1.0 / dt) if scene.mouse.clicked: i =", "dist - restlength ) force = length/dist[:,newaxis] * force[:,newaxis] band.p[:-1]", "pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0), radius =", ") if sometrue(inside): R = ( band.pos - s.pos )", "-> red (tension) band.red[1:] = where( less(c,1), c, 1 )", "# nail down right endpoint band.pos = band.pos + band.p/m*dt", "2-c ) band.blue[1:] = where( less(c,1), 1, 2-c ) for", "g * dt # force[n] is the force on point", "dist, s.radius ) if sometrue(inside): R = ( band.pos -", "1, radius = 0.02 ) band.p = band.pos * 0", "+ force*dt band.p[1:] = band.p[1:] - force*dt # color based", "c, 1 ) band.green[1:] = where( less(c,1), c, 2-c )", "= 0 # Create the stringy thing: band = curve(", "3 damp = (1-0)**dt nspheres = 3 floor = 0", "0, 2 ) # blue (compressed) -> white (relaxed) ->", "mag( band.pos - s.pos )[:,newaxis] inside = less( dist, s.radius", ") for s in spheres: dist = mag( band.pos -", "sphere( pos = scene.mouse.getclick().pos, radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0)", "= 0 # nail down left endpoint #band.p[-1] = 0", "0.7,0.5 + i*0.1,0), radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )", "color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) while True: rate(1.0", "band = curve( x = arange(-1,1,restlength), y = 1, radius", ") band.pos[:,1] = where( below, -1, band.pos[:,1] ) # need", "# Let the user position obstacles: spheres = [] for", "\"Drape\" restlength = 0.02 m = 0.010 * restlength g", "white (relaxed) -> red (tension) band.red[1:] = where( less(c,1), c,", "len(spheres) s = sphere( pos = scene.mouse.getclick().pos, radius = 0.25,", "floor = 0 # Create the stringy thing: band =", "Create the stringy thing: band = curve( x = arange(-1,1,restlength),", "length = (band.pos[1:] - band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force =", "left + right. \"\"\") # <NAME> scene.title = \"Drape\" restlength", "s ) while True: rate(1.0 / dt) if scene.mouse.clicked: i", "to place spheres under falling string. Right button drag or", "spheres = [] for i in range(nspheres): s = sphere(", "band.p * damp #band.p[0] = 0 # nail down left", "below, 0, band.p[:,1] ) band.pos[:,1] = where( below, -1, band.pos[:,1]", "nspheres = 3 floor = 0 # Create the stringy", "Alt-drag to zoom in or out. On a two-button mouse,", "is left + right. \"\"\") # <NAME> scene.title = \"Drape\"", "thing: band = curve( x = arange(-1,1,restlength), y = 1,", "clip( dist/restlength * 0.5, 0, 2 ) # blue (compressed)", "0.002 k = 3 damp = (1-0)**dt nspheres = 3", "n from point n+1 (to the right): length = (band.pos[1:]", "( dist - restlength ) force = length/dist[:,newaxis] * force[:,newaxis]", "pos = scene.mouse.getclick().pos, radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )", "= band.p * damp #band.p[0] = 0 # nail down", "m = 0.010 * restlength g = 9.8 dt =", "middle is left + right. \"\"\") # <NAME> scene.title =", "(abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) while True: rate(1.0 / dt)", "band.p[:,1] - m * g * dt # force[n] is", "c, 2-c ) band.blue[1:] = where( less(c,1), 1, 2-c )", "= mag( band.pos - s.pos )[:,newaxis] inside = less( dist,", "c = clip( dist/restlength * 0.5, 0, 2 ) #", "dist surface = s.pos + (s.radius)*R band.pos = surface*inside +", "-> red c = clip( dist/restlength * 0.5, 0, 2", "# <NAME> scene.title = \"Drape\" restlength = 0.02 m =", "* print(\"\"\" Click to place spheres under falling string. Right", "band.pos + band.p/m*dt #gravity band.p[:,1] = band.p[:,1] - m *", "less(band.pos[:,1],-1) band.p[:,1] = where( below, 0, band.p[:,1] ) band.pos[:,1] =", "+ band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1) band.p = band.p - R*pdotR[:,newaxis]*inside", "band.p[1:] - force*dt # color based on \"stretch\": blue ->", "s.pos )[:,newaxis] inside = less( dist, s.radius ) if sometrue(inside):", "point n+1 (to the right): length = (band.pos[1:] - band.pos[:-1])", "= (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) if floor: below =", "(1-0)**dt nspheres = 3 floor = 0 # Create the", "= clip( dist/restlength * 0.5, 0, 2 ) # blue", ") band.blue[1:] = where( less(c,1), 1, 2-c ) for s", "below = less(band.pos[:,1],-1) band.p[:,1] = where( below, 0, band.p[:,1] )", "dt) if scene.mouse.clicked: i = len(spheres) s = sphere( pos", "surface*inside + band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1) band.p = band.p -", "scene.mouse.getclick().pos, radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s", "drag or Ctrl-drag to rotate view. Middle button drag or", "= 9.8 dt = 0.002 k = 3 damp =", "- 0.7,0.5 + i*0.1,0), radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0)", "(s.radius)*R band.pos = surface*inside + band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1) band.p", "more physical way to make 'damped springs' than this! band.p", "the force on point n from point n+1 (to the", "in or out. On a two-button mouse, middle is left", "band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force = k * ( dist", "= [] for i in range(nspheres): s = sphere( pos", "- restlength ) force = length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] =", "in range(nspheres): s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 -", ") if floor: below = less(band.pos[:,1],-1) band.p[:,1] = where( below,", "band.pos = surface*inside + band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1) band.p =", "rotate view. Middle button drag or Alt-drag to zoom in", "True: rate(1.0 / dt) if scene.mouse.clicked: i = len(spheres) s", "sometrue(inside): R = ( band.pos - s.pos ) / dist", "s.pos + (s.radius)*R band.pos = surface*inside + band.pos*(1-inside) pdotR =", "#band.p[-1] = 0 # nail down right endpoint band.pos =", "nail down right endpoint band.pos = band.pos + band.p/m*dt #gravity", "less(c,1), c, 2-c ) band.blue[1:] = where( less(c,1), 1, 2-c", "inside = less( dist, s.radius ) if sometrue(inside): R =", "on \"stretch\": blue -> white -> red c = clip(", "force[n] is the force on point n from point n+1", "\"stretch\": blue -> white -> red c = clip( dist/restlength", "i in range(nspheres): s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6", "# need a more physical way to make 'damped springs'", "color based on \"stretch\": blue -> white -> red c", "* restlength g = 9.8 dt = 0.002 k =", "if sometrue(inside): R = ( band.pos - s.pos ) /", "from visual import * print(\"\"\" Click to place spheres under", "= where( less(c,1), c, 2-c ) band.blue[1:] = where( less(c,1),", "force*dt band.p[1:] = band.p[1:] - force*dt # color based on", "string. Right button drag or Ctrl-drag to rotate view. Middle", "= k * ( dist - restlength ) force =", ") force = length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] = band.p[:-1] +", "= 0.010 * restlength g = 9.8 dt = 0.002", "import * print(\"\"\" Click to place spheres under falling string.", ") / dist surface = s.pos + (s.radius)*R band.pos =", "the right): length = (band.pos[1:] - band.pos[:-1]) dist = sqrt(sum(length*length,-1))", "s in spheres: dist = mag( band.pos - s.pos )[:,newaxis]", "arange(-1,1,restlength), y = 1, radius = 0.02 ) band.p =", "= s.pos + (s.radius)*R band.pos = surface*inside + band.pos*(1-inside) pdotR", "( band.pos - s.pos ) / dist surface = s.pos", "this! band.p = band.p * damp #band.p[0] = 0 #", "damp = (1-0)**dt nspheres = 3 floor = 0 #", "s.pos ) / dist surface = s.pos + (s.radius)*R band.pos", "= ( band.pos - s.pos ) / dist surface =", "= 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) if", "= where( below, 0, band.p[:,1] ) band.pos[:,1] = where( below,", "scene.mouse.clicked: i = len(spheres) s = sphere( pos = scene.mouse.getclick().pos,", "band.red[1:] = where( less(c,1), c, 1 ) band.green[1:] = where(", "band.p[:,1] = where( below, 0, band.p[:,1] ) band.pos[:,1] = where(", "force = k * ( dist - restlength ) force", "= where( less(c,1), c, 1 ) band.green[1:] = where( less(c,1),", "= sphere( pos = scene.mouse.getclick().pos, radius = 0.25, color =", "0.5, 0, 2 ) # blue (compressed) -> white (relaxed)", "force*dt # color based on \"stretch\": blue -> white ->", "below, -1, band.pos[:,1] ) # need a more physical way", "1 ) band.green[1:] = where( less(c,1), c, 2-c ) band.blue[1:]", "dist = mag( band.pos - s.pos )[:,newaxis] inside = less(", "print(\"\"\" Click to place spheres under falling string. Right button", "sqrt(sum(length*length,-1)) force = k * ( dist - restlength )", "(relaxed) -> red (tension) band.red[1:] = where( less(c,1), c, 1", "red (tension) band.red[1:] = where( less(c,1), c, 1 ) band.green[1:]", "(abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) if floor: below = less(band.pos[:,1],-1)", "#band.p[0] = 0 # nail down left endpoint #band.p[-1] =", "down right endpoint band.pos = band.pos + band.p/m*dt #gravity band.p[:,1]", "# force[n] is the force on point n from point", "blue -> white -> red c = clip( dist/restlength *", "scene.autoscale = 0 # Let the user position obstacles: spheres", "2-c ) for s in spheres: dist = mag( band.pos", "= 1, radius = 0.02 ) band.p = band.pos *", "= \"Drape\" restlength = 0.02 m = 0.010 * restlength", "dt = 0.002 k = 3 damp = (1-0)**dt nspheres", "two-button mouse, middle is left + right. \"\"\") # <NAME>", "(tension) band.red[1:] = where( less(c,1), c, 1 ) band.green[1:] =", "#gravity band.p[:,1] = band.p[:,1] - m * g * dt", "dist = sqrt(sum(length*length,-1)) force = k * ( dist -", "k = 3 damp = (1-0)**dt nspheres = 3 floor", "force = length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] = band.p[:-1] + force*dt", "Click to place spheres under falling string. Right button drag", "band.p[:,1] ) band.pos[:,1] = where( below, -1, band.pos[:,1] ) #", "button drag or Alt-drag to zoom in or out. On", "or Ctrl-drag to rotate view. Middle button drag or Alt-drag", "= length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] = band.p[:-1] + force*dt band.p[1:]", "= scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0), radius = 0.25,", "Middle button drag or Alt-drag to zoom in or out.", "= 1.5 scene.autoscale = 0 # Let the user position", "= band.pos + band.p/m*dt #gravity band.p[:,1] = band.p[:,1] - m", "spheres under falling string. Right button drag or Ctrl-drag to", "sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0), radius", "R = ( band.pos - s.pos ) / dist surface", "a two-button mouse, middle is left + right. \"\"\") #", "* ( dist - restlength ) force = length/dist[:,newaxis] *", "s.radius ) if sometrue(inside): R = ( band.pos - s.pos", "band.p = band.pos * 0 scene.range = 1.5 scene.autoscale =", "band.p/m*dt #gravity band.p[:,1] = band.p[:,1] - m * g *", "the stringy thing: band = curve( x = arange(-1,1,restlength), y", "-1, band.pos[:,1] ) # need a more physical way to", "s = sphere( pos = scene.mouse.getclick().pos, radius = 0.25, color", "while True: rate(1.0 / dt) if scene.mouse.clicked: i = len(spheres)", "= scene.mouse.getclick().pos, radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append(", "where( below, 0, band.p[:,1] ) band.pos[:,1] = where( below, -1,", "g = 9.8 dt = 0.002 k = 3 damp", "for i in range(nspheres): s = sphere( pos = scene.mouse.getclick().pos,", "springs' than this! band.p = band.p * damp #band.p[0] =", "length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] = band.p[:-1] + force*dt band.p[1:] =", "zoom in or out. On a two-button mouse, middle is", "1, 2-c ) for s in spheres: dist = mag(", "- m * g * dt # force[n] is the", "0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) if floor:", "endpoint #band.p[-1] = 0 # nail down right endpoint band.pos", ") band.green[1:] = where( less(c,1), c, 2-c ) band.blue[1:] =", "i*0.1,0), radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s", "physical way to make 'damped springs' than this! band.p =", "restlength = 0.02 m = 0.010 * restlength g =", "= surface*inside + band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1) band.p = band.p", "right endpoint band.pos = band.pos + band.p/m*dt #gravity band.p[:,1] =", "* g * dt # force[n] is the force on", "based on \"stretch\": blue -> white -> red c =", "0.010 * restlength g = 9.8 dt = 0.002 k", "(to the right): length = (band.pos[1:] - band.pos[:-1]) dist =", "* damp #band.p[0] = 0 # nail down left endpoint", "where( less(c,1), c, 1 ) band.green[1:] = where( less(c,1), c,", "view. Middle button drag or Alt-drag to zoom in or", "than this! band.p = band.p * damp #band.p[0] = 0", "0 # Let the user position obstacles: spheres = []", "band.pos[:,1] ) # need a more physical way to make", "= where( less(c,1), 1, 2-c ) for s in spheres:", "2 ) # blue (compressed) -> white (relaxed) -> red", "mouse, middle is left + right. \"\"\") # <NAME> scene.title", "Right button drag or Ctrl-drag to rotate view. Middle button", "scene.range = 1.5 scene.autoscale = 0 # Let the user", "rate(1.0 / dt) if scene.mouse.clicked: i = len(spheres) s =", "down left endpoint #band.p[-1] = 0 # nail down right", "where( less(c,1), 1, 2-c ) for s in spheres: dist", "= 0.002 k = 3 damp = (1-0)**dt nspheres =", "force on point n from point n+1 (to the right):", "band.green[1:] = where( less(c,1), c, 2-c ) band.blue[1:] = where(", "visual import * print(\"\"\" Click to place spheres under falling", "= band.p[:,1] - m * g * dt # force[n]", "out. On a two-button mouse, middle is left + right.", "if floor: below = less(band.pos[:,1],-1) band.p[:,1] = where( below, 0,", "# blue (compressed) -> white (relaxed) -> red (tension) band.red[1:]", "m * g * dt # force[n] is the force", "place spheres under falling string. Right button drag or Ctrl-drag", "position obstacles: spheres = [] for i in range(nspheres): s", "make 'damped springs' than this! band.p = band.p * damp", "where( less(c,1), c, 2-c ) band.blue[1:] = where( less(c,1), 1,", "right. \"\"\") # <NAME> scene.title = \"Drape\" restlength = 0.02", "= band.pos * 0 scene.range = 1.5 scene.autoscale = 0", "Ctrl-drag to rotate view. Middle button drag or Alt-drag to", "band.pos[:,1] = where( below, -1, band.pos[:,1] ) # need a", "band.p[:-1] = band.p[:-1] + force*dt band.p[1:] = band.p[1:] - force*dt", "= len(spheres) s = sphere( pos = scene.mouse.getclick().pos, radius =", "0 # nail down left endpoint #band.p[-1] = 0 #", "0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) while True:", "[] for i in range(nspheres): s = sphere( pos =", "band.pos = band.pos + band.p/m*dt #gravity band.p[:,1] = band.p[:,1] -", "endpoint band.pos = band.pos + band.p/m*dt #gravity band.p[:,1] = band.p[:,1]", "less( dist, s.radius ) if sometrue(inside): R = ( band.pos", "= (1-0)**dt nspheres = 3 floor = 0 # Create", "* dt # force[n] is the force on point n", "(band.pos[1:] - band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force = k *", "need a more physical way to make 'damped springs' than", "band.p[:-1] + force*dt band.p[1:] = band.p[1:] - force*dt # color", "the user position obstacles: spheres = [] for i in", "from point n+1 (to the right): length = (band.pos[1:] -", "is the force on point n from point n+1 (to", "dt # force[n] is the force on point n from", "= arange(-1,1,restlength), y = 1, radius = 0.02 ) band.p", "button drag or Ctrl-drag to rotate view. Middle button drag", "drag or Alt-drag to zoom in or out. On a", "Let the user position obstacles: spheres = [] for i", "or Alt-drag to zoom in or out. On a two-button", "i = len(spheres) s = sphere( pos = scene.mouse.getclick().pos, radius", "= curve( x = arange(-1,1,restlength), y = 1, radius =", "spheres.append( s ) while True: rate(1.0 / dt) if scene.mouse.clicked:", ")[:,newaxis] inside = less( dist, s.radius ) if sometrue(inside): R", "0.02 ) band.p = band.pos * 0 scene.range = 1.5", "s ) if floor: below = less(band.pos[:,1],-1) band.p[:,1] = where(", "to zoom in or out. On a two-button mouse, middle", "less(c,1), 1, 2-c ) for s in spheres: dist =", "to rotate view. Middle button drag or Alt-drag to zoom", "to make 'damped springs' than this! band.p = band.p *", "= 3 floor = 0 # Create the stringy thing:", ") # blue (compressed) -> white (relaxed) -> red (tension)", "= less(band.pos[:,1],-1) band.p[:,1] = where( below, 0, band.p[:,1] ) band.pos[:,1]", "in spheres: dist = mag( band.pos - s.pos )[:,newaxis] inside", "band.p[:,1] = band.p[:,1] - m * g * dt #", "* force[:,newaxis] band.p[:-1] = band.p[:-1] + force*dt band.p[1:] = band.p[1:]", "less(c,1), c, 1 ) band.green[1:] = where( less(c,1), c, 2-c", "band.pos - s.pos )[:,newaxis] inside = less( dist, s.radius )", "band.pos * 0 scene.range = 1.5 scene.autoscale = 0 #", "nail down left endpoint #band.p[-1] = 0 # nail down", "band.p[1:] = band.p[1:] - force*dt # color based on \"stretch\":", "/ dist surface = s.pos + (s.radius)*R band.pos = surface*inside", "surface = s.pos + (s.radius)*R band.pos = surface*inside + band.pos*(1-inside)", "y = 1, radius = 0.02 ) band.p = band.pos", "k * ( dist - restlength ) force = length/dist[:,newaxis]", "0 # Create the stringy thing: band = curve( x", "falling string. Right button drag or Ctrl-drag to rotate view.", "+ (s.radius)*R band.pos = surface*inside + band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1)", "1.5 scene.autoscale = 0 # Let the user position obstacles:", "+ right. \"\"\") # <NAME> scene.title = \"Drape\" restlength =", "stringy thing: band = curve( x = arange(-1,1,restlength), y =", "damp #band.p[0] = 0 # nail down left endpoint #band.p[-1]", "scene.title = \"Drape\" restlength = 0.02 m = 0.010 *", "= less( dist, s.radius ) if sometrue(inside): R = (", ") # need a more physical way to make 'damped", "- s.pos )[:,newaxis] inside = less( dist, s.radius ) if", "0 scene.range = 1.5 scene.autoscale = 0 # Let the", "red c = clip( dist/restlength * 0.5, 0, 2 )", "* 0 scene.range = 1.5 scene.autoscale = 0 # Let", "if scene.mouse.clicked: i = len(spheres) s = sphere( pos =", "band.p = band.p * damp #band.p[0] = 0 # nail", "0, band.p[:,1] ) band.pos[:,1] = where( below, -1, band.pos[:,1] )", "-> white -> red c = clip( dist/restlength * 0.5,", "# Create the stringy thing: band = curve( x =", "n+1 (to the right): length = (band.pos[1:] - band.pos[:-1]) dist", "band.blue[1:] = where( less(c,1), 1, 2-c ) for s in", "- s.pos ) / dist surface = s.pos + (s.radius)*R", "= sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0),", "restlength ) force = length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] = band.p[:-1]", "-> white (relaxed) -> red (tension) band.red[1:] = where( less(c,1),", "spheres: dist = mag( band.pos - s.pos )[:,newaxis] inside =", "user position obstacles: spheres = [] for i in range(nspheres):", "= 0 # Let the user position obstacles: spheres =", "where( below, -1, band.pos[:,1] ) # need a more physical", "+ band.p/m*dt #gravity band.p[:,1] = band.p[:,1] - m * g" ]
[ "[]) else: numba_index_common_dtype = left_index_dtype return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen,", "return isinstance(var, types.NoneType) or var.literal_value is value elif isinstance(value, type(bool)):", "arrays of specified types can be compared\"\"\" return ((ty_left ==", "# and/or other materials provided with the distribution. # #", "invalid, e.g.: Method nsmallest(). The object n given: bool expected:", "provided that the following conditions are met: # # Redistributions", "materials provided with the distribution. # # THIS SOFTWARE IS", "= func_name def raise_exc(self, data, expected_types, name=''): \"\"\" Raise exception", "= right.dtype index_dtypes_match = left_index_dtype == right_index_dtype if not index_dtypes_match:", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right): \"\"\"Used during typing to check", "must reproduce the above copyright notice, # this list of", "of the parameter \"\"\" if not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__,", "codegen(*args, **kwargs) loc_vars = {} exec(func_text, global_vars, loc_vars) _impl =", "above copyright notice, # this list of conditions and the", "OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "name of the parameter \"\"\" msg = self.msg_template.format(self.func_name, name, data,", "dtype in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]", "a Numba literal value equal to value\"\"\" if not isinstance(var,", "# # Redistribution and use in source and binary forms,", "# # Redistributions of source code must retain the above", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "typing to check that series has numeric index\"\"\" return isinstance(ty_series.index.dtype,", "in RangeIndex ctor and other methods that take dtype parameter.", "during typing to check that specified types can be compared\"\"\"", "ty.signed def _check_dtype_param_type(dtype): \"\"\" Returns True is dtype is a", "to unblock compilation return ty_left == ty_right return False def", "else: return var.literal_value == value def has_python_value(var, value): \"\"\"Used during", "expected: {}' def __init__(self, func_name): \"\"\" Parameters ---------- func_name: :obj:`str`", "related to typing compilation phase \"\"\" import numpy import numba", "name, data, expected_types) raise TypingError(msg) def check(self, data, accepted_type, name=''):", "case of SDC limitation\"\"\" pass def kwsparams2list(params): \"\"\"Convert parameters dict", "notice, # this list of conditions and the following disclaimer.", "scalar_types): \"\"\"Used to find common numba dtype for a sequences", "'dtype'): ty_right = ty_right.dtype # add the rest of supported", "a valid type for dtype parameter and False otherwise. Used", "== value def has_python_value(var, value): \"\"\"Used during typing to check", "PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE", "in case of SDC limitation\"\"\" pass def kwsparams2list(params): \"\"\"Convert parameters", "that variable var was resolved as Python type and has", "was resolved as Python type and has specific value\"\"\" if", "sequences of numba dtypes each representing some numpy dtype\"\"\" np_array_dtypes", "for dtype in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype =", "isinstance(value, type(bool)): return var is value else: return var ==", "False def check_arrays_comparable(ty_left, ty_right): \"\"\"Used during typing to check that", "***************************************************************************** \"\"\" | This file contains SDC utility functions related", "USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE", "False if value is None or isinstance(value, type(bool)): return var", "OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER", "return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for", "THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF", "= ( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, ) + sdc_old_index_types", "type Parameters ---------- data: :obj:`any` real type of the data", "= (types.Array, StringArrayType, ) sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType, RangeIndexType,", "v in params.items()] def sigparams2list(param_names, defaults): \"\"\"Creates a list of", "each representing some numpy dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype", "value else: return var.literal_value == value def has_python_value(var, value): \"\"\"Used", "== value def is_default(var, value): return has_literal_value(var, value) or has_python_value(var,", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "\"\"\" valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype, valid_dtype_types)", "from sdc.datatypes.indexes import * from sdc.str_arr_type import string_array_type, StringArrayType from", "caching of data allocated for range indexes at request for", "value def has_python_value(var, value): \"\"\"Used during typing to check that", "numba_common_dtype def find_index_common_dtype(left, right): \"\"\"Used to find common dtype for", "generator of an implementation\"\"\" def _df_impl_generator(*args, **kwargs): func_text, global_vars =", "global_vars = codegen(*args, **kwargs) loc_vars = {} exec(func_text, global_vars, loc_vars)", "_impl = loc_vars[impl_name] return _impl return _df_impl_generator def check_signed_integer(ty): return", "e.g.: Method nsmallest(). The object n given: bool expected: int", "of data allocated for range indexes at request for .values", "exception name: :obj:`str` name of the parameter \"\"\" msg =", "if not isinstance(var, types.Literal): return False if value is None:", "( types.Array, StringArrayType, Categorical, ) class TypeChecker: \"\"\" Validate object", "has_python_value(var, value) or isinstance(var, types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used during typing", "return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted) def", "contains SDC utility functions related to typing compilation phase \"\"\"", "specific dtype\"\"\" return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types,", "of a format 'key=value' from parameter names and default values\"\"\"", "2020, Intel Corporation All rights reserved. # # Redistribution and", "NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE", "= ( EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types = ( types.Array,", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR", "valid type for dtype parameter and False otherwise. Used in", "ctor and other methods that take dtype parameter. \"\"\" valid_dtype_types", "that the following conditions are met: # # Redistributions of", "source and binary forms, with or without # modification, are", "conditions and the following disclaimer. # # Redistributions in binary", "isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to find common numba", "numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left, right): \"\"\"Used to find common", "+ sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType, ) # TO-DO:", "sigparams2list(param_names, defaults): \"\"\"Creates a list of strings of a format", "conditions are met: # # Redistributions of source code must", "v) for k, v in params.items()] def sigparams2list(param_names, defaults): \"\"\"Creates", "kwsparams2list(params): \"\"\"Convert parameters dict to a list of string of", "var is a Numba literal value equal to value\"\"\" if", "defaults else f'{param}={defaults[param]}') for param in param_names] def has_literal_value(var, value):", "is value else: return var.literal_value == value def has_python_value(var, value):", "SDC limitation\"\"\" pass def kwsparams2list(params): \"\"\"Convert parameters dict to a", "some numpy dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]", "exec(func_text, global_vars, loc_vars) _impl = loc_vars[impl_name] return _impl return _df_impl_generator", "is a valid type for dtype parameter and False otherwise.", "index\"\"\" return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right): \"\"\"Used during typing", "of specified types can be compared\"\"\" return ((ty_left == string_array_type", "sdc_old_index_types = (types.Array, StringArrayType, ) sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType,", "# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "of the data expected_types: :obj:`str` expected types inserting directly to", "gen_impl_generator(codegen, impl_name): \"\"\"Generate generator of an implementation\"\"\" def _df_impl_generator(*args, **kwargs):", "are met: # # Redistributions of source code must retain", "and ty.signed def _check_dtype_param_type(dtype): \"\"\" Returns True is dtype is", "numpy_support from sdc.datatypes.indexes import * from sdc.str_arr_type import string_array_type, StringArrayType", "to check that variable var was resolved as Python type", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF", "_impl return _df_impl_generator def check_signed_integer(ty): return isinstance(ty, types.Integer) and ty.signed", "that type_var is a numeric numpy arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number)", "FIXME: just for now to unblock compilation return ty_left ==", "= numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left,", "= find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype = left_index_dtype return", "a list of strings of a format 'key=value' from parameter", "Parameters ---------- data: :obj:`any` real type of the data expected_types:", "the following conditions are met: # # Redistributions of source", "check that variable var is a Numba literal value equal", "isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", ") sdc_pandas_df_column_types = ( types.Array, StringArrayType, Categorical, ) class TypeChecker:", "right_index_dtype if not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], [])", "arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): \"\"\"Used during typing to", "NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "phase \"\"\" import numpy import numba import sdc from numba", "AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT,", "= self.msg_template.format(self.func_name, name, data, expected_types) raise TypingError(msg) def check(self, data,", "and binary forms, with or without # modification, are permitted", "following disclaimer. # # Redistributions in binary form must reproduce", "\"\"\" Check data type belongs to specified type Parameters ----------", "np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left, right): \"\"\"Used", "_df_impl_generator def check_signed_integer(ty): return isinstance(ty, types.Integer) and ty.signed def _check_dtype_param_type(dtype):", "the parameter \"\"\" if not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name)", "{}\\n given: {}\\n expected: {}' def __init__(self, func_name): \"\"\" Parameters", "if not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else:", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE", "RangeIndexType, ) sdc_pandas_df_column_types = ( types.Array, StringArrayType, Categorical, ) class", "now to unblock compilation return ty_left == ty_right return False", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** \"\"\"", "types checking \"\"\" self.func_name = func_name def raise_exc(self, data, expected_types,", "of source code must retain the above copyright notice, #", "OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY", "dtypes are equal\"\"\" left_index_dtype = left.dtype right_index_dtype = right.dtype index_dtypes_match", "or isinstance(var, types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used during typing to check", "THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY", "func_name: :obj:`str` name of the function where types checking \"\"\"", "global_vars, loc_vars) _impl = loc_vars[impl_name] return _impl return _df_impl_generator def", "NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "{}' def __init__(self, func_name): \"\"\" Parameters ---------- func_name: :obj:`str` name", "{}\\n expected: {}' def __init__(self, func_name): \"\"\" Parameters ---------- func_name:", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL", "A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL", "check that variable var was resolved as Python type and", "a numeric numpy array of specific dtype\"\"\" return isinstance(type_var, types.Array)", "# this list of conditions and the following disclaimer. #", "at request for .values sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType, RangeIndexType,", "= [numpy_support.as_dtype(dtype) for dtype in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for", "implementation\"\"\" def _df_impl_generator(*args, **kwargs): func_text, global_vars = codegen(*args, **kwargs) loc_vars", "return _df_impl_generator def check_signed_integer(ty): return isinstance(ty, types.Integer) and ty.signed def", "isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean): return isinstance(ty_right,", "ty_right == string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype):", "notice, # this list of conditions and the following disclaimer", "\"\"\" Raise exception with unified message Parameters ---------- data: :obj:`any`", "return ((ty_left == string_array_type and ty_right == string_array_type) or (check_is_numeric_array(ty_left)", "dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to find common numba dtype", "import Categorical sdc_old_index_types = (types.Array, StringArrayType, ) sdc_pandas_index_types = (", "isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)): #", "allocated for range indexes at request for .values sdc_indexes_wo_values_cache =", "check_index_is_numeric(ty_series): \"\"\"Used during typing to check that series has numeric", "INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "the data expected_types: :obj:`str` expected types inserting directly to the", "msg = self.msg_template.format(self.func_name, name, data, expected_types) raise TypingError(msg) def check(self,", "met: # # Redistributions of source code must retain the", "name=''): \"\"\" Raise exception with unified message Parameters ---------- data:", "isinstance(var, types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used during typing to check that", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "value\"\"\" if not isinstance(var, types.Literal): return False if value is", "is dtype is a valid type for dtype parameter and", "dtype for a sequences of numba dtypes each representing some", ":obj:`str` name of the parameter \"\"\" msg = self.msg_template.format(self.func_name, name,", "# # Redistributions in binary form must reproduce the above", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** \"\"\" |", "that take dtype parameter. \"\"\" valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType,", ") # TO-DO: support caching of data allocated for range", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "defaults): \"\"\"Creates a list of strings of a format 'key=value'", "disclaimer in the documentation # and/or other materials provided with", "\"\"\" Validate object type and raise TypingError if the type", "is None or isinstance(value, type(bool)): return var is value else:", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "default values\"\"\" return [(f'{param}' if param not in defaults else", "right): \"\"\"Used to find common dtype for indexes of two", "***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved.", "types from numba.core.errors import TypingError from numba.np import numpy_support from", "types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME: just for now", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "types.Array) and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to find", "check that type_var is a numeric numpy array of specific", "isinstance(var, type(value)): return False if value is None or isinstance(value,", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes,", "check that type_var is a numeric numpy arrays\"\"\" return check_is_array_of_dtype(type_var,", "from numba.np import numpy_support from sdc.datatypes.indexes import * from sdc.str_arr_type", "supported types here if isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number) if", "return var is value else: return var == value def", "# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "that specified types can be compared\"\"\" if hasattr(ty_left, 'dtype'): ty_left", "sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types = (", "the data accepted_type: :obj:`type` accepted type name: :obj:`str` name of", "string of a format 'key=value'\"\"\" return ['{}={}'.format(k, v) for k,", "isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left,", "as Python type and has specific value\"\"\" if not isinstance(var,", "var is value else: return var == value def is_default(var,", ") + sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType, ) #", ":obj:`str` expected types inserting directly to the exception name: :obj:`str`", "(check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): \"\"\"Used during typing to", "numpy dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types] np_scalar_dtypes", "= left.dtype right_index_dtype = right.dtype index_dtypes_match = left_index_dtype == right_index_dtype", "typing to check that underlying arrays of specified types can", "inserting directly to the exception name: :obj:`str` name of the", "return isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME: just", "unblock compilation return ty_left == ty_right return False def check_arrays_comparable(ty_left,", "that type_var is a numeric numpy array of specific dtype\"\"\"", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #", "expected_types, name=''): \"\"\" Raise exception with unified message Parameters ----------", "name=''): \"\"\" Check data type belongs to specified type Parameters", "and False otherwise. Used in RangeIndex ctor and other methods", "def has_literal_value(var, value): \"\"\"Used during typing to check that variable", "dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types] np_scalar_dtypes =", "index dtypes are equal\"\"\" left_index_dtype = left.dtype right_index_dtype = right.dtype", "not in defaults else f'{param}={defaults[param]}') for param in param_names] def", "and other methods that take dtype parameter. \"\"\" valid_dtype_types =", "name: :obj:`str` name of the parameter \"\"\" if not isinstance(data,", "sdc_pandas_df_column_types = ( types.Array, StringArrayType, Categorical, ) class TypeChecker: \"\"\"", "if param not in defaults else f'{param}={defaults[param]}') for param in", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED.", "---------- data: :obj:`any` real type of the data accepted_type: :obj:`type`", "parameter and False otherwise. Used in RangeIndex ctor and other", "numba import types from numba.core.errors import TypingError from numba.np import", "Redistributions of source code must retain the above copyright notice,", "None: return isinstance(var, types.NoneType) or var.literal_value is value elif isinstance(value,", "return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right): \"\"\"Used during typing to", "(types.Array, StringArrayType, ) sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType,", "here if isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType):", "to typing compilation phase \"\"\" import numpy import numba import", "of the parameter \"\"\" msg = self.msg_template.format(self.func_name, name, data, expected_types)", "numpy import numba import sdc from numba import types from", "OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #", "documentation # and/or other materials provided with the distribution. #", "has_literal_value(var, value): \"\"\"Used during typing to check that variable var", "raise TypingError(msg) def check(self, data, accepted_type, name=''): \"\"\" Check data", "dtypes each representing some numpy dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype) for", "or without # modification, are permitted provided that the following", "to the exception name: :obj:`str` name of the parameter \"\"\"", "dtype for indexes of two series and verify if index", "Redistribution and use in source and binary forms, with or", "source code must retain the above copyright notice, # this", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "Categorical, ) class TypeChecker: \"\"\" Validate object type and raise", "names and default values\"\"\" return [(f'{param}' if param not in", "import numpy import numba import sdc from numba import types", "var was resolved as Python type and has specific value\"\"\"", "type_var is a numeric numpy arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number) def", "return ['{}={}'.format(k, v) for k, v in params.items()] def sigparams2list(param_names,", "value def is_default(var, value): return has_literal_value(var, value) or has_python_value(var, value)", "var == value def is_default(var, value): return has_literal_value(var, value) or", "# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "check_signed_integer(ty): return isinstance(ty, types.Integer) and ty.signed def _check_dtype_param_type(dtype): \"\"\" Returns", "to find common dtype for indexes of two series and", "array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types] np_common_dtype =", "types.Literal): return False if value is None: return isinstance(var, types.NoneType)", "string_array_type, StringArrayType from sdc.datatypes.categorical.types import Categorical sdc_old_index_types = (types.Array, StringArrayType,", "RangeIndexType, Int64IndexType, MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType,", "# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", ":obj:`str` name of the parameter \"\"\" if not isinstance(data, accepted_type):", "for a sequences of numba dtypes each representing some numpy", "belongs to specified type Parameters ---------- data: :obj:`any` real type", "if isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME: just for now to", "sdc from numba import types from numba.core.errors import TypingError from", "TypingError if the type is invalid, e.g.: Method nsmallest(). The", "\"\"\"Used to find common dtype for indexes of two series", "rights reserved. # # Redistribution and use in source and", "or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): \"\"\"Used during typing", "def kwsparams2list(params): \"\"\"Convert parameters dict to a list of string", "Returns True is dtype is a valid type for dtype", "\"\"\"Used during typing to check that type_var is a numeric", "expected types inserting directly to the exception name: :obj:`str` name", "The object n given: bool expected: int \"\"\" msg_template =", "\"\"\" Parameters ---------- func_name: :obj:`str` name of the function where", "value is None: return isinstance(var, types.NoneType) or var.literal_value is value", "ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT", "binary form must reproduce the above copyright notice, # this", "strings of a format 'key=value' from parameter names and default", "Int64IndexType, MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType,", ") sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, )", "accepted type name: :obj:`str` name of the parameter \"\"\" if", "to check that underlying arrays of specified types can be", "format 'key=value'\"\"\" return ['{}={}'.format(k, v) for k, v in params.items()]", "or var.literal_value is value elif isinstance(value, type(bool)): return var.literal_value is", "numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype = left_index_dtype", "# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "with the distribution. # # THIS SOFTWARE IS PROVIDED BY", "StringArrayType, ) sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType,", "self.msg_template.format(self.func_name, name, data, expected_types) raise TypingError(msg) def check(self, data, accepted_type,", "numpy arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): \"\"\"Used during typing", "Check data type belongs to specified type Parameters ---------- data:", "IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "if value is None or isinstance(value, type(bool)): return var is", "(c) 2020, Intel Corporation All rights reserved. # # Redistribution", "Categorical sdc_old_index_types = (types.Array, StringArrayType, ) sdc_pandas_index_types = ( EmptyIndexType,", "DAMAGE. # ***************************************************************************** \"\"\" | This file contains SDC utility", "support caching of data allocated for range indexes at request", "_check_dtype_param_type(dtype): \"\"\" Returns True is dtype is a valid type", "specific value\"\"\" if not isinstance(var, type(value)): return False if value", "raised in case of SDC limitation\"\"\" pass def kwsparams2list(params): \"\"\"Convert", "is_default(var, value): return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var,", "(INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT", "to check that specified types can be compared\"\"\" if hasattr(ty_left,", "loc_vars[impl_name] return _impl return _df_impl_generator def check_signed_integer(ty): return isinstance(ty, types.Integer)", "def has_python_value(var, value): \"\"\"Used during typing to check that variable", "data type belongs to specified type Parameters ---------- data: :obj:`any`", "check_is_array_of_dtype(type_var, dtype): \"\"\"Used during typing to check that type_var is", "in source and binary forms, with or without # modification,", "expected_types: :obj:`str` expected types inserting directly to the exception name:", "def _check_dtype_param_type(dtype): \"\"\" Returns True is dtype is a valid", "from sdc.str_arr_type import string_array_type, StringArrayType from sdc.datatypes.categorical.types import Categorical sdc_old_index_types", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "if isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)):", "permitted provided that the following conditions are met: # #", "types.Number) def check_types_comparable(ty_left, ty_right): \"\"\"Used during typing to check that", "value): \"\"\"Used during typing to check that variable var was", "return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean) if", "in the documentation # and/or other materials provided with the", "array of specific dtype\"\"\" return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype)", "of conditions and the following disclaimer in the documentation #", "param in param_names] def has_literal_value(var, value): \"\"\"Used during typing to", "typing to check that type_var is a numeric numpy arrays\"\"\"", "return var == value def is_default(var, value): return has_literal_value(var, value)", "sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType, ) # TO-DO: support", "np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype)", "message Parameters ---------- data: :obj:`any` real type of the data", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "object {}\\n given: {}\\n expected: {}' def __init__(self, func_name): \"\"\"", "ty_left = ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype #", "[left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype = left_index_dtype return index_dtypes_match, numba_index_common_dtype", "other methods that take dtype parameter. \"\"\" valid_dtype_types = (types.NoneType,", "func_text, global_vars = codegen(*args, **kwargs) loc_vars = {} exec(func_text, global_vars,", "of strings of a format 'key=value' from parameter names and", "\"\"\"Exception to be raised in case of SDC limitation\"\"\" pass", "the parameter \"\"\" msg = self.msg_template.format(self.func_name, name, data, expected_types) raise", "types.NoneType) or var.literal_value is value elif isinstance(value, type(bool)): return var.literal_value", "be compared\"\"\" if hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype if hasattr(ty_right,", "form must reproduce the above copyright notice, # this list", "of the function where types checking \"\"\" self.func_name = func_name", "of specific dtype\"\"\" return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype) def", "NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF", "return var.literal_value is value else: return var.literal_value == value def", "'dtype'): ty_left = ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype", "for k, v in params.items()] def sigparams2list(param_names, defaults): \"\"\"Creates a", "can be compared\"\"\" return ((ty_left == string_array_type and ty_right ==", "= codegen(*args, **kwargs) loc_vars = {} exec(func_text, global_vars, loc_vars) _impl", "use in source and binary forms, with or without #", "value) or isinstance(var, types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used during typing to", "'key=value'\"\"\" return ['{}={}'.format(k, v) for k, v in params.items()] def", "that underlying arrays of specified types can be compared\"\"\" return", "for range indexes at request for .values sdc_indexes_wo_values_cache = (", "a list of string of a format 'key=value'\"\"\" return ['{}={}'.format(k,", "EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE", "== string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): \"\"\"Used", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED", "and verify if index dtypes are equal\"\"\" left_index_dtype = left.dtype", "during typing to check that type_var is a numeric numpy", "to check that variable var is a Numba literal value", "True is dtype is a valid type for dtype parameter", "sdc.datatypes.indexes import * from sdc.str_arr_type import string_array_type, StringArrayType from sdc.datatypes.categorical.types", "parameters dict to a list of string of a format", "isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception): \"\"\"Exception to be", "type of the data expected_types: :obj:`str` expected types inserting directly", "in params.items()] def sigparams2list(param_names, defaults): \"\"\"Creates a list of strings", "Python type and has specific value\"\"\" if not isinstance(var, type(value)):", "ty_right): \"\"\"Used during typing to check that underlying arrays of", "isinstance(var, types.NoneType) or var.literal_value is value elif isinstance(value, type(bool)): return", "check_types_comparable(ty_left, ty_right): \"\"\"Used during typing to check that specified types", "isinstance(ty, types.Integer) and ty.signed def _check_dtype_param_type(dtype): \"\"\" Returns True is", "def __init__(self, func_name): \"\"\" Parameters ---------- func_name: :obj:`str` name of", "value\"\"\" if not isinstance(var, type(value)): return False if value is", "\"\"\" Returns True is dtype is a valid type for", "(types.Tuple, types.UniTuple)): # FIXME: just for now to unblock compilation", "RangeIndexType, ) # TO-DO: support caching of data allocated for", "def check_is_array_of_dtype(type_var, dtype): \"\"\"Used during typing to check that type_var", "# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "code must retain the above copyright notice, # this list", "equal\"\"\" left_index_dtype = left.dtype right_index_dtype = right.dtype index_dtypes_match = left_index_dtype", "methods that take dtype parameter. \"\"\" valid_dtype_types = (types.NoneType, types.Omitted,", "parameter. \"\"\" valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype,", ":obj:`type` accepted type name: :obj:`str` name of the parameter \"\"\"", "class SDCLimitation(Exception): \"\"\"Exception to be raised in case of SDC", "that variable var is a Numba literal value equal to", "SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "TypingError(msg) def check(self, data, accepted_type, name=''): \"\"\" Check data type", "ty_right = ty_right.dtype # add the rest of supported types", "= {} exec(func_text, global_vars, loc_vars) _impl = loc_vars[impl_name] return _impl", "\"\"\" | This file contains SDC utility functions related to", "data, expected_types, name=''): \"\"\" Raise exception with unified message Parameters", "# add the rest of supported types here if isinstance(ty_left,", "_df_impl_generator(*args, **kwargs): func_text, global_vars = codegen(*args, **kwargs) loc_vars = {}", "typing compilation phase \"\"\" import numpy import numba import sdc", "import * from sdc.str_arr_type import string_array_type, StringArrayType from sdc.datatypes.categorical.types import", "return False def check_arrays_comparable(ty_left, ty_right): \"\"\"Used during typing to check", "sdc.datatypes.categorical.types import Categorical sdc_old_index_types = (types.Array, StringArrayType, ) sdc_pandas_index_types =", "and raise TypingError if the type is invalid, e.g.: Method", "accepted_type, name=''): \"\"\" Check data type belongs to specified type", "or isinstance(value, type(bool)): return var is value else: return var", "numba dtypes each representing some numpy dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype)", "TypingError from numba.np import numpy_support from sdc.datatypes.indexes import * from", "from parameter names and default values\"\"\" return [(f'{param}' if param", "check that underlying arrays of specified types can be compared\"\"\"", "find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to find common numba dtype for a", "take dtype parameter. \"\"\" valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass)", "list of strings of a format 'key=value' from parameter names", "check_is_numeric_array(type_var): \"\"\"Used during typing to check that type_var is a", "sdc.str_arr_type import string_array_type, StringArrayType from sdc.datatypes.categorical.types import Categorical sdc_old_index_types =", "with or without # modification, are permitted provided that the", "int \"\"\" msg_template = '{} The object {}\\n given: {}\\n", "numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left, right):", "if index dtypes are equal\"\"\" left_index_dtype = left.dtype right_index_dtype =", "types.Number) def check_index_is_numeric(ty_series): \"\"\"Used during typing to check that series", "CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN", "accepted_type.__name__, name=name) class SDCLimitation(Exception): \"\"\"Exception to be raised in case", "distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "is a numeric numpy arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series):", "= loc_vars[impl_name] return _impl return _df_impl_generator def check_signed_integer(ty): return isinstance(ty,", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE", "isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME: just for", "numba_index_common_dtype def gen_impl_generator(codegen, impl_name): \"\"\"Generate generator of an implementation\"\"\" def", ".values sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types =", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "data allocated for range indexes at request for .values sdc_indexes_wo_values_cache", "object type and raise TypingError if the type is invalid,", "param_names] def has_literal_value(var, value): \"\"\"Used during typing to check that", "valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype, valid_dtype_types) or", "if hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right", "right_index_dtype = right.dtype index_dtypes_match = left_index_dtype == right_index_dtype if not", "False otherwise. Used in RangeIndex ctor and other methods that", "StringArrayType, Categorical, ) class TypeChecker: \"\"\" Validate object type and", "the exception name: :obj:`str` name of the parameter \"\"\" msg", "# this list of conditions and the following disclaimer in", "\"\"\"Used during typing to check that variable var was resolved", "POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** \"\"\" | This file", "= (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype, valid_dtype_types) or dtype", "find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype = left_index_dtype return index_dtypes_match,", "parameter names and default values\"\"\" return [(f'{param}' if param not", "directly to the exception name: :obj:`str` name of the parameter", "during typing to check that series has numeric index\"\"\" return", "representing some numpy dtype\"\"\" np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in", "Copyright (c) 2020, Intel Corporation All rights reserved. # #", "accepted_type: :obj:`type` accepted type name: :obj:`str` name of the parameter", "the following disclaimer. # # Redistributions in binary form must", "data accepted_type: :obj:`type` accepted type name: :obj:`str` name of the", "left_index_dtype return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name): \"\"\"Generate generator of", "of an implementation\"\"\" def _df_impl_generator(*args, **kwargs): func_text, global_vars = codegen(*args,", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR", "the following disclaimer in the documentation # and/or other materials", "return False if value is None or isinstance(value, type(bool)): return", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "otherwise. Used in RangeIndex ctor and other methods that take", "type of the data accepted_type: :obj:`type` accepted type name: :obj:`str`", "in defaults else f'{param}={defaults[param]}') for param in param_names] def has_literal_value(var,", "\"\"\"Used during typing to check that variable var is a", "and use in source and binary forms, with or without", "not isinstance(var, types.Literal): return False if value is None: return", "be compared\"\"\" return ((ty_left == string_array_type and ty_right == string_array_type)", "parameter \"\"\" if not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class", "COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "if not isinstance(var, type(value)): return False if value is None", "equal to value\"\"\" if not isinstance(var, types.Literal): return False if", "self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception): \"\"\"Exception to be raised in", "\"\"\" if not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception):", "indexes of two series and verify if index dtypes are", "PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types = ( types.Array, StringArrayType, Categorical, )", "type(bool)): return var is value else: return var == value", "TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY", "((ty_left == string_array_type and ty_right == string_array_type) or (check_is_numeric_array(ty_left) and", "hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype # add the rest of", "of string of a format 'key=value'\"\"\" return ['{}={}'.format(k, v) for", "if isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType): return", "Parameters ---------- func_name: :obj:`str` name of the function where types", "is invalid, e.g.: Method nsmallest(). The object n given: bool", "conditions and the following disclaimer in the documentation # and/or", "name: :obj:`str` name of the parameter \"\"\" msg = self.msg_template.format(self.func_name,", "# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): \"\"\"Used during typing to check", "func_name def raise_exc(self, data, expected_types, name=''): \"\"\" Raise exception with", "reproduce the above copyright notice, # this list of conditions", ":obj:`any` real type of the data expected_types: :obj:`str` expected types", "variable var is a Numba literal value equal to value\"\"\"", "var.literal_value is value else: return var.literal_value == value def has_python_value(var,", "* from sdc.str_arr_type import string_array_type, StringArrayType from sdc.datatypes.categorical.types import Categorical", "that series has numeric index\"\"\" return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left,", "common dtype for indexes of two series and verify if", "disclaimer. # # Redistributions in binary form must reproduce the", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING", "WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN", "AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN", "specified type Parameters ---------- data: :obj:`any` real type of the", "given: bool expected: int \"\"\" msg_template = '{} The object", "in binary form must reproduce the above copyright notice, #", "types.UnicodeType) if isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple,", "\"\"\" import numpy import numba import sdc from numba import", "compared\"\"\" return ((ty_left == string_array_type and ty_right == string_array_type) or", "types here if isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number) if isinstance(ty_left,", "forms, with or without # modification, are permitted provided that", "TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "binary forms, with or without # modification, are permitted provided", "types.UniTuple)): # FIXME: just for now to unblock compilation return", "and/or other materials provided with the distribution. # # THIS", "TO-DO: support caching of data allocated for range indexes at", "checking \"\"\" self.func_name = func_name def raise_exc(self, data, expected_types, name=''):", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # *****************************************************************************", "typing to check that type_var is a numeric numpy array", "{} exec(func_text, global_vars, loc_vars) _impl = loc_vars[impl_name] return _impl return", "type name: :obj:`str` name of the parameter \"\"\" if not", "if not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception): \"\"\"Exception", "list of string of a format 'key=value'\"\"\" return ['{}={}'.format(k, v)", "OF SUCH DAMAGE. # ***************************************************************************** \"\"\" | This file contains", "to a list of string of a format 'key=value'\"\"\" return", "just for now to unblock compilation return ty_left == ty_right", "return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name): \"\"\"Generate generator of an", "# ***************************************************************************** \"\"\" | This file contains SDC utility functions", "OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY", "'key=value' from parameter names and default values\"\"\" return [(f'{param}' if", "during typing to check that variable var was resolved as", "Validate object type and raise TypingError if the type is", "from numba import types from numba.core.errors import TypingError from numba.np", "EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like =", "for param in param_names] def has_literal_value(var, value): \"\"\"Used during typing", "request for .values sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType, RangeIndexType, )", "where types checking \"\"\" self.func_name = func_name def raise_exc(self, data,", "dict to a list of string of a format 'key=value'\"\"\"", "return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used", "of numba dtypes each representing some numpy dtype\"\"\" np_array_dtypes =", "to check that series has numeric index\"\"\" return isinstance(ty_series.index.dtype, types.Number)", "DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "the documentation # and/or other materials provided with the distribution.", "type_var is a numeric numpy array of specific dtype\"\"\" return", "| This file contains SDC utility functions related to typing", "find common numba dtype for a sequences of numba dtypes", "(types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype, valid_dtype_types) or dtype is", "are permitted provided that the following conditions are met: #", "Raise exception with unified message Parameters ---------- data: :obj:`any` real", "def sigparams2list(param_names, defaults): \"\"\"Creates a list of strings of a", "# FIXME: just for now to unblock compilation return ty_left", "left_index_dtype = left.dtype right_index_dtype = right.dtype index_dtypes_match = left_index_dtype ==", "of conditions and the following disclaimer. # # Redistributions in", "two series and verify if index dtypes are equal\"\"\" left_index_dtype", "to check that type_var is a numeric numpy array of", "types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean)", "= left_index_dtype == right_index_dtype if not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(", "if value is None: return isinstance(var, types.NoneType) or var.literal_value is", "np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def", "TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO", "ARISING IN ANY WAY OUT OF THE USE OF THIS", "has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted) def check_is_numeric_array(type_var):", "rest of supported types here if isinstance(ty_left, types.Number): return isinstance(ty_right,", "types can be compared\"\"\" return ((ty_left == string_array_type and ty_right", "dtype in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype)", "import string_array_type, StringArrayType from sdc.datatypes.categorical.types import Categorical sdc_old_index_types = (types.Array,", "to check that type_var is a numeric numpy arrays\"\"\" return", "be raised in case of SDC limitation\"\"\" pass def kwsparams2list(params):", "Redistributions in binary form must reproduce the above copyright notice,", ":obj:`str` name of the function where types checking \"\"\" self.func_name", "f'{param}={defaults[param]}') for param in param_names] def has_literal_value(var, value): \"\"\"Used during", "def check_types_comparable(ty_left, ty_right): \"\"\"Used during typing to check that specified", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "range indexes at request for .values sdc_indexes_wo_values_cache = ( EmptyIndexType,", "following conditions are met: # # Redistributions of source code", "type is invalid, e.g.: Method nsmallest(). The object n given:", "value elif isinstance(value, type(bool)): return var.literal_value is value else: return", "ty_right return False def check_arrays_comparable(ty_left, ty_right): \"\"\"Used during typing to", "[numpy_support.as_dtype(dtype) for dtype in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype", "isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean) if isinstance(ty_left,", "types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype, valid_dtype_types) or dtype is None", "# Redistribution and use in source and binary forms, with", "---------- func_name: :obj:`str` name of the function where types checking", "raise TypingError if the type is invalid, e.g.: Method nsmallest().", "the type is invalid, e.g.: Method nsmallest(). The object n", "SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS", "the above copyright notice, # this list of conditions and", "for .values sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types", "an implementation\"\"\" def _df_impl_generator(*args, **kwargs): func_text, global_vars = codegen(*args, **kwargs)", "\"\"\"Generate generator of an implementation\"\"\" def _df_impl_generator(*args, **kwargs): func_text, global_vars", "# Redistributions of source code must retain the above copyright", "data expected_types: :obj:`str` expected types inserting directly to the exception", "OUT OF THE USE OF THIS SOFTWARE, # EVEN IF", "function where types checking \"\"\" self.func_name = func_name def raise_exc(self,", "if hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype # add the rest", "ty_left == ty_right return False def check_arrays_comparable(ty_left, ty_right): \"\"\"Used during", "= numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left, right): \"\"\"Used to find", "return ty_left == ty_right return False def check_arrays_comparable(ty_left, ty_right): \"\"\"Used", "return _impl return _df_impl_generator def check_signed_integer(ty): return isinstance(ty, types.Integer) and", "are equal\"\"\" left_index_dtype = left.dtype right_index_dtype = right.dtype index_dtypes_match =", "provided with the distribution. # # THIS SOFTWARE IS PROVIDED", "parameter \"\"\" msg = self.msg_template.format(self.func_name, name, data, expected_types) raise TypingError(msg)", "to be raised in case of SDC limitation\"\"\" pass def", "__init__(self, func_name): \"\"\" Parameters ---------- func_name: :obj:`str` name of the", "and the following disclaimer. # # Redistributions in binary form", "SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR", "in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types] np_common_dtype", "list of conditions and the following disclaimer in the documentation", "msg_template = '{} The object {}\\n given: {}\\n expected: {}'", "( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like", "underlying arrays of specified types can be compared\"\"\" return ((ty_left", "check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): \"\"\"Used during typing to check that", "def check(self, data, accepted_type, name=''): \"\"\" Check data type belongs", "# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "type(bool)): return var.literal_value is value else: return var.literal_value == value", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #", "PositionalIndexType, RangeIndexType, ) # TO-DO: support caching of data allocated", "# modification, are permitted provided that the following conditions are", "ty_right.dtype # add the rest of supported types here if", "dtype\"\"\" return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):", "unified message Parameters ---------- data: :obj:`any` real type of the", "USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "is a numeric numpy array of specific dtype\"\"\" return isinstance(type_var,", "import numba import sdc from numba import types from numba.core.errors", "index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype =", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT", "---------- data: :obj:`any` real type of the data expected_types: :obj:`str`", "data: :obj:`any` real type of the data accepted_type: :obj:`type` accepted", "loc_vars) _impl = loc_vars[impl_name] return _impl return _df_impl_generator def check_signed_integer(ty):", "None or isinstance(value, type(bool)): return var is value else: return", "( EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types = ( types.Array, StringArrayType,", "specified types can be compared\"\"\" if hasattr(ty_left, 'dtype'): ty_left =", "param not in defaults else f'{param}={defaults[param]}') for param in param_names]", "\"\"\"Used to find common numba dtype for a sequences of", "following disclaimer in the documentation # and/or other materials provided", "SDC utility functions related to typing compilation phase \"\"\" import", "format 'key=value' from parameter names and default values\"\"\" return [(f'{param}'", "\"\"\"Used during typing to check that specified types can be", "to find common numba dtype for a sequences of numba", "params.items()] def sigparams2list(param_names, defaults): \"\"\"Creates a list of strings of", "specified types can be compared\"\"\" return ((ty_left == string_array_type and", "numeric index\"\"\" return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right): \"\"\"Used during", "return isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if", "values\"\"\" return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}')", "return False if value is None: return isinstance(var, types.NoneType) or", "[numpy_support.as_dtype(dtype) for dtype in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype", "types.Boolean): return isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME:", "in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return", "and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): \"\"\"Used during typing to check", "numba.np import numpy_support from sdc.datatypes.indexes import * from sdc.str_arr_type import", "expected_types) raise TypingError(msg) def check(self, data, accepted_type, name=''): \"\"\" Check", "not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype", "isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right,", "has numeric index\"\"\" return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right): \"\"\"Used", "Method nsmallest(). The object n given: bool expected: int \"\"\"", "def _df_impl_generator(*args, **kwargs): func_text, global_vars = codegen(*args, **kwargs) loc_vars =", "k, v in params.items()] def sigparams2list(param_names, defaults): \"\"\"Creates a list", "name of the function where types checking \"\"\" self.func_name =", "copyright notice, # this list of conditions and the following", "dtype parameter. \"\"\" valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return", "variable var was resolved as Python type and has specific", "raise_exc(self, data, expected_types, name=''): \"\"\" Raise exception with unified message", "def check_index_is_numeric(ty_series): \"\"\"Used during typing to check that series has", "from sdc.datatypes.categorical.types import Categorical sdc_old_index_types = (types.Array, StringArrayType, ) sdc_pandas_index_types", "import sdc from numba import types from numba.core.errors import TypingError", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, #", "or has_python_value(var, value) or isinstance(var, types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used during", "loc_vars = {} exec(func_text, global_vars, loc_vars) _impl = loc_vars[impl_name] return", "series and verify if index dtypes are equal\"\"\" left_index_dtype =", "a format 'key=value'\"\"\" return ['{}={}'.format(k, v) for k, v in", "value) or has_python_value(var, value) or isinstance(var, types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used", "func_name): \"\"\" Parameters ---------- func_name: :obj:`str` name of the function", "LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "numeric numpy array of specific dtype\"\"\" return isinstance(type_var, types.Array) and", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED", "list of conditions and the following disclaimer. # # Redistributions", "sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, ) +", "for indexes of two series and verify if index dtypes", "and the following disclaimer in the documentation # and/or other", "the function where types checking \"\"\" self.func_name = func_name def", "types.Omitted) def check_is_numeric_array(type_var): \"\"\"Used during typing to check that type_var", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "the distribution. # # THIS SOFTWARE IS PROVIDED BY THE", "OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE", "has specific value\"\"\" if not isinstance(var, type(value)): return False if", "def check_arrays_comparable(ty_left, ty_right): \"\"\"Used during typing to check that underlying", "compilation phase \"\"\" import numpy import numba import sdc from", "== right_index_dtype if not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype],", "INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF", "The object {}\\n given: {}\\n expected: {}' def __init__(self, func_name):", "add the rest of supported types here if isinstance(ty_left, types.Number):", "to specified type Parameters ---------- data: :obj:`any` real type of", "ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY,", "of supported types here if isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number)", "= ( types.Array, StringArrayType, Categorical, ) class TypeChecker: \"\"\" Validate", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY,", "string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): \"\"\"Used during", "data: :obj:`any` real type of the data expected_types: :obj:`str` expected", "expected: int \"\"\" msg_template = '{} The object {}\\n given:", "name=name) class SDCLimitation(Exception): \"\"\"Exception to be raised in case of", "THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF", "dtype is a valid type for dtype parameter and False", "this list of conditions and the following disclaimer. # #", "return numba_common_dtype def find_index_common_dtype(left, right): \"\"\"Used to find common dtype", "def find_index_common_dtype(left, right): \"\"\"Used to find common dtype for indexes", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES", "check(self, data, accepted_type, name=''): \"\"\" Check data type belongs to", "data, accepted_type, name=''): \"\"\" Check data type belongs to specified", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "numba_index_common_dtype = left_index_dtype return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name): \"\"\"Generate", "Used in RangeIndex ctor and other methods that take dtype", "other materials provided with the distribution. # # THIS SOFTWARE", "isinstance(var, types.Literal): return False if value is None: return isinstance(var,", "retain the above copyright notice, # this list of conditions", "types.Number) if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean):", "for dtype in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in", "All rights reserved. # # Redistribution and use in source", "This file contains SDC utility functions related to typing compilation", "left.dtype right_index_dtype = right.dtype index_dtypes_match = left_index_dtype == right_index_dtype if", "left_index_dtype == right_index_dtype if not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype,", "RangeIndex ctor and other methods that take dtype parameter. \"\"\"", "without # modification, are permitted provided that the following conditions", "( PositionalIndexType, RangeIndexType, ) # TO-DO: support caching of data", "pass def kwsparams2list(params): \"\"\"Convert parameters dict to a list of", "common numba dtype for a sequences of numba dtypes each", "if the type is invalid, e.g.: Method nsmallest(). The object", "isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME: just for now to unblock", "def raise_exc(self, data, expected_types, name=''): \"\"\" Raise exception with unified", "THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR", "verify if index dtypes are equal\"\"\" left_index_dtype = left.dtype right_index_dtype", "file contains SDC utility functions related to typing compilation phase", "# TO-DO: support caching of data allocated for range indexes", "this list of conditions and the following disclaimer in the", "modification, are permitted provided that the following conditions are met:", "not isinstance(var, type(value)): return False if value is None or", "types.Number): return isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType)", "during typing to check that underlying arrays of specified types", "a format 'key=value' from parameter names and default values\"\"\" return", "impl_name): \"\"\"Generate generator of an implementation\"\"\" def _df_impl_generator(*args, **kwargs): func_text,", "Parameters ---------- data: :obj:`any` real type of the data accepted_type:", "name of the parameter \"\"\" if not isinstance(data, accepted_type): self.raise_exc(data,", "class TypeChecker: \"\"\" Validate object type and raise TypingError if", "of a format 'key=value'\"\"\" return ['{}={}'.format(k, v) for k, v", "import TypingError from numba.np import numpy_support from sdc.datatypes.indexes import *", "typing to check that specified types can be compared\"\"\" if", "find common dtype for indexes of two series and verify", "TypeChecker: \"\"\" Validate object type and raise TypingError if the", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, #", "and ty_right == string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var,", "can be compared\"\"\" if hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype if", "is value else: return var == value def is_default(var, value):", "elif isinstance(value, type(bool)): return var.literal_value is value else: return var.literal_value", "import types from numba.core.errors import TypingError from numba.np import numpy_support", "ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype # add the", "self.func_name = func_name def raise_exc(self, data, expected_types, name=''): \"\"\" Raise", "reserved. # # Redistribution and use in source and binary", "real type of the data expected_types: :obj:`str` expected types inserting", "is a Numba literal value equal to value\"\"\" if not", "\"\"\"Used during typing to check that underlying arrays of specified", "value equal to value\"\"\" if not isinstance(var, types.Literal): return False", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR", "var.literal_value is value elif isinstance(value, type(bool)): return var.literal_value is value", "in param_names] def has_literal_value(var, value): \"\"\"Used during typing to check", "types can be compared\"\"\" if hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype", "BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY", "\"\"\"Convert parameters dict to a list of string of a", "LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "= ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype # add", "'{} The object {}\\n given: {}\\n expected: {}' def __init__(self,", "value else: return var == value def is_default(var, value): return", "right_index_dtype], []) else: numba_index_common_dtype = left_index_dtype return index_dtypes_match, numba_index_common_dtype def", "['{}={}'.format(k, v) for k, v in params.items()] def sigparams2list(param_names, defaults):", "var.literal_value == value def has_python_value(var, value): \"\"\"Used during typing to", ") class TypeChecker: \"\"\" Validate object type and raise TypingError", "indexes at request for .values sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType,", "object n given: bool expected: int \"\"\" msg_template = '{}", "check that series has numeric index\"\"\" return isinstance(ty_series.index.dtype, types.Number) def", "PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "return var.literal_value == value def has_python_value(var, value): \"\"\"Used during typing", "[(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param", "EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types = ( types.Array, StringArrayType, Categorical,", "**kwargs) loc_vars = {} exec(func_text, global_vars, loc_vars) _impl = loc_vars[impl_name]", "== string_array_type and ty_right == string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right)))", "Numba literal value equal to value\"\"\" if not isinstance(var, types.Literal):", "PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like = (", "a numeric numpy arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): \"\"\"Used", "\"\"\" msg_template = '{} The object {}\\n given: {}\\n expected:", "numba import sdc from numba import types from numba.core.errors import", "LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION)", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "given: {}\\n expected: {}' def __init__(self, func_name): \"\"\" Parameters ----------", "series has numeric index\"\"\" return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right):", "def is_default(var, value): return has_literal_value(var, value) or has_python_value(var, value) or", "for now to unblock compilation return ty_left == ty_right return", "typing to check that variable var was resolved as Python", "types.Array, StringArrayType, Categorical, ) class TypeChecker: \"\"\" Validate object type", "else: return var == value def is_default(var, value): return has_literal_value(var,", "to value\"\"\" if not isinstance(var, types.Literal): return False if value", "numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left, right): \"\"\"Used to", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE", "Corporation All rights reserved. # # Redistribution and use in", "and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to find common", "sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType, ) # TO-DO: support caching", "\"\"\"Used during typing to check that series has numeric index\"\"\"", "return isinstance(ty, types.Integer) and ty.signed def _check_dtype_param_type(dtype): \"\"\" Returns True", "for dtype parameter and False otherwise. Used in RangeIndex ctor", "= ( PositionalIndexType, RangeIndexType, ) # TO-DO: support caching of", "utility functions related to typing compilation phase \"\"\" import numpy", "\"\"\"Creates a list of strings of a format 'key=value' from", "not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception): \"\"\"Exception to", "def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): \"\"\"Used to find common numba dtype for", "exception with unified message Parameters ---------- data: :obj:`any` real type", "# Redistributions in binary form must reproduce the above copyright", "# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE", "has_python_value(var, value): \"\"\"Used during typing to check that variable var", "dtype): \"\"\"Used during typing to check that type_var is a", "scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype", "MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType, )", "a sequences of numba dtypes each representing some numpy dtype\"\"\"", ":obj:`any` real type of the data accepted_type: :obj:`type` accepted type", "check that specified types can be compared\"\"\" if hasattr(ty_left, 'dtype'):", "else: numba_index_common_dtype = left_index_dtype return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name):", "isinstance(value, type(bool)): return var.literal_value is value else: return var.literal_value ==", "type belongs to specified type Parameters ---------- data: :obj:`any` real", "numpy array of specific dtype\"\"\" return isinstance(type_var, types.Array) and isinstance(type_var.dtype,", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "value): return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted)", "ty_right): \"\"\"Used during typing to check that specified types can", "of the data accepted_type: :obj:`type` accepted type name: :obj:`str` name", "during typing to check that variable var is a Numba", "limitation\"\"\" pass def kwsparams2list(params): \"\"\"Convert parameters dict to a list", "value): \"\"\"Used during typing to check that variable var is", "dtype parameter and False otherwise. Used in RangeIndex ctor and", "right.dtype index_dtypes_match = left_index_dtype == right_index_dtype if not index_dtypes_match: numba_index_common_dtype", "value is None or isinstance(value, type(bool)): return var is value", "= '{} The object {}\\n given: {}\\n expected: {}' def", "\"\"\" self.func_name = func_name def raise_exc(self, data, expected_types, name=''): \"\"\"", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "bool expected: int \"\"\" msg_template = '{} The object {}\\n", "numeric numpy arrays\"\"\" return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): \"\"\"Used during", "OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "functions related to typing compilation phase \"\"\" import numpy import", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "typing to check that variable var is a Numba literal", "import numpy_support from sdc.datatypes.indexes import * from sdc.str_arr_type import string_array_type,", "SUCH DAMAGE. # ***************************************************************************** \"\"\" | This file contains SDC", "is None: return isinstance(var, types.NoneType) or var.literal_value is value elif", "Intel Corporation All rights reserved. # # Redistribution and use", "of two series and verify if index dtypes are equal\"\"\"", "def check_signed_integer(ty): return isinstance(ty, types.Integer) and ty.signed def _check_dtype_param_type(dtype): \"\"\"", "string_array_type and ty_right == string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def", "n given: bool expected: int \"\"\" msg_template = '{} The", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #", "numba.core.errors import TypingError from numba.np import numpy_support from sdc.datatypes.indexes import", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "compilation return ty_left == ty_right return False def check_arrays_comparable(ty_left, ty_right):", "type and raise TypingError if the type is invalid, e.g.:", "# Copyright (c) 2020, Intel Corporation All rights reserved. #", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, #", "data, expected_types) raise TypingError(msg) def check(self, data, accepted_type, name=''): \"\"\"", "type for dtype parameter and False otherwise. Used in RangeIndex", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER", "index_dtypes_match = left_index_dtype == right_index_dtype if not index_dtypes_match: numba_index_common_dtype =", "else f'{param}={defaults[param]}') for param in param_names] def has_literal_value(var, value): \"\"\"Used", "with unified message Parameters ---------- data: :obj:`any` real type of", "the rest of supported types here if isinstance(ty_left, types.Number): return", "**kwargs): func_text, global_vars = codegen(*args, **kwargs) loc_vars = {} exec(func_text,", "= ty_right.dtype # add the rest of supported types here", "types inserting directly to the exception name: :obj:`str` name of", "index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name): \"\"\"Generate generator of an implementation\"\"\"", "hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right =", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN", "def check_is_numeric_array(type_var): \"\"\"Used during typing to check that type_var is", "real type of the data accepted_type: :obj:`type` accepted type name:", "numba dtype for a sequences of numba dtypes each representing", "literal value equal to value\"\"\" if not isinstance(var, types.Literal): return", "find_index_common_dtype(left, right): \"\"\"Used to find common dtype for indexes of", "is value elif isinstance(value, type(bool)): return var.literal_value is value else:", "= [numpy_support.as_dtype(dtype) for dtype in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)", "THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** \"\"\" | This", "must retain the above copyright notice, # this list of", "type and has specific value\"\"\" if not isinstance(var, type(value)): return", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR #", "check_arrays_comparable(ty_left, ty_right): \"\"\"Used during typing to check that underlying arrays", "type(value)): return False if value is None or isinstance(value, type(bool)):", "nsmallest(). The object n given: bool expected: int \"\"\" msg_template", "(INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS", "and has specific value\"\"\" if not isinstance(var, type(value)): return False", "types.Integer) and ty.signed def _check_dtype_param_type(dtype): \"\"\" Returns True is dtype", "def gen_impl_generator(codegen, impl_name): \"\"\"Generate generator of an implementation\"\"\" def _df_impl_generator(*args,", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE)", "False if value is None: return isinstance(var, types.NoneType) or var.literal_value", "\"\"\" msg = self.msg_template.format(self.func_name, name, data, expected_types) raise TypingError(msg) def", "SDCLimitation(Exception): \"\"\"Exception to be raised in case of SDC limitation\"\"\"", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS", "check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): \"\"\"Used during typing to check that", "of SDC limitation\"\"\" pass def kwsparams2list(params): \"\"\"Convert parameters dict to", "accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception): \"\"\"Exception to be raised", "if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean): return", "== ty_right return False def check_arrays_comparable(ty_left, ty_right): \"\"\"Used during typing", "and default values\"\"\" return [(f'{param}' if param not in defaults", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "= left_index_dtype return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name): \"\"\"Generate generator", "compared\"\"\" if hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype if hasattr(ty_right, 'dtype'):", "from numba.core.errors import TypingError from numba.np import numpy_support from sdc.datatypes.indexes", "StringArrayType from sdc.datatypes.categorical.types import Categorical sdc_old_index_types = (types.Array, StringArrayType, )", "resolved as Python type and has specific value\"\"\" if not" ]
[ "to sum. Time complexity is O(√n). Time of execution ~", "through N and check if every number is prime or", "= sieve[p-1] sieve[p+1] = sieve[p] p += 2 for _", "0] * 500000 sieve[0], sieve[1], sieve[2] = 0, 0, 2", "i in s if i <= n)) # Sieve implementation:", "+ 1, 6): if p % i == 0 or", "1000000 limit = 1000000 sieve = [0] + [1, 0]", "Time complexity of O(n*log(log(n))). Time of execution ~ 2sec for", "limit: if sieve[p]: sieve[p] = sieve[p-1] + p for i", "_ in range(int(input())): n = int(input()) print(sum(i for i in", "= 0 else: sieve[p] = sieve[p-1] sieve[p+1] = sieve[p] p", "p % (i + 2) == 0: break else: yield", "of O(n*log(log(n))). Time of execution ~ 2sec for n =", "p in range(5, n+1, 2): if p % 3 ==", "in range (5, int(sqrt(p)) + 1, 6): if p %", "of execution ~ 2sec for n = 1000000 limit =", "Time complexity is O(√n). Time of execution ~ 8sec for", "every number is prime or not. If prime add to", "Time of execution ~ 8sec for n = 1000000 def", "of execution ~ 8sec for n = 1000000 def prime(n):", "i == 0 or p % (i + 2) ==", "% 3 == 0: continue else: for i in range", "<= limit: if sieve[p]: sieve[p] = sieve[p-1] + p for", "if i <= n)) # Sieve implementation: Time complexity of", "import sqrt # Naive method: Loop through N and check", "+ [1, 0] * 500000 sieve[0], sieve[1], sieve[2] = 0,", "0, 2 p = 3 while p <= limit: if", "set(prime(1000000)) for _ in range(int(input())): n = int(input()) print(sum(i for", "for i in range (5, int(sqrt(p)) + 1, 6): if", "2): if p % 3 == 0: continue else: for", "implementation: Time complexity of O(n*log(log(n))). Time of execution ~ 2sec", "execution ~ 8sec for n = 1000000 def prime(n): yield", "limit+1, p): sieve[i] = 0 else: sieve[p] = sieve[p-1] sieve[p+1]", "else: yield p s = set(prime(1000000)) for _ in range(int(input())):", "(5, int(sqrt(p)) + 1, 6): if p % i ==", "from math import sqrt # Naive method: Loop through N", "method: Loop through N and check if every number is", "prime or not. If prime add to sum. Time complexity", "if p % 3 == 0: continue else: for i", "yield p s = set(prime(1000000)) for _ in range(int(input())): n", "N and check if every number is prime or not.", "sieve[1], sieve[2] = 0, 0, 2 p = 3 while", "= 1000000 def prime(n): yield 2 yield 3 for p", "def prime(n): yield 2 yield 3 for p in range(5,", "not. If prime add to sum. Time complexity is O(√n).", "n = 1000000 def prime(n): yield 2 yield 3 for", "int(sqrt(p)) + 1, 6): if p % i == 0", "i <= n)) # Sieve implementation: Time complexity of O(n*log(log(n))).", "for n = 1000000 def prime(n): yield 2 yield 3", "sieve[p-1] + p for i in range(p*p, limit+1, p): sieve[i]", "range(int(input())): n = int(input()) print(sum(i for i in s if", "or not. If prime add to sum. Time complexity is", "== 0 or p % (i + 2) == 0:", "0: break else: yield p s = set(prime(1000000)) for _", "sqrt # Naive method: Loop through N and check if", "1000000 def prime(n): yield 2 yield 3 for p in", "for _ in range(int(input())): n = int(input()) print(sum(i for i", "p): sieve[i] = 0 else: sieve[p] = sieve[p-1] sieve[p+1] =", "= 1000000 limit = 1000000 sieve = [0] + [1,", "p for i in range(p*p, limit+1, p): sieve[i] = 0", "3 == 0: continue else: for i in range (5,", "number is prime or not. If prime add to sum.", "sum. Time complexity is O(√n). Time of execution ~ 8sec", "in range(p*p, limit+1, p): sieve[i] = 0 else: sieve[p] =", "prime add to sum. Time complexity is O(√n). Time of", "O(n*log(log(n))). Time of execution ~ 2sec for n = 1000000", "else: for i in range (5, int(sqrt(p)) + 1, 6):", "break else: yield p s = set(prime(1000000)) for _ in", "s = set(prime(1000000)) for _ in range(int(input())): n = int(input())", "if p % i == 0 or p % (i", "<= n)) # Sieve implementation: Time complexity of O(n*log(log(n))). Time", "complexity of O(n*log(log(n))). Time of execution ~ 2sec for n", "2sec for n = 1000000 limit = 1000000 sieve =", "2 p = 3 while p <= limit: if sieve[p]:", "range (5, int(sqrt(p)) + 1, 6): if p % i", "sieve[p]: sieve[p] = sieve[p-1] + p for i in range(p*p,", "Time of execution ~ 2sec for n = 1000000 limit", "math import sqrt # Naive method: Loop through N and", "complexity is O(√n). Time of execution ~ 8sec for n", "% i == 0 or p % (i + 2)", "sieve[0], sieve[1], sieve[2] = 0, 0, 2 p = 3", "sieve[p-1] sieve[p+1] = sieve[p] p += 2 for _ in", "is prime or not. If prime add to sum. Time", "<reponame>PROxZIMA/Competitive-Coding from math import sqrt # Naive method: Loop through", "p % 3 == 0: continue else: for i in", "= set(prime(1000000)) for _ in range(int(input())): n = int(input()) print(sum(i", "= 0, 0, 2 p = 3 while p <=", "sieve[2] = 0, 0, 2 p = 3 while p", "in s if i <= n)) # Sieve implementation: Time", "(i + 2) == 0: break else: yield p s", "Loop through N and check if every number is prime", "== 0: continue else: for i in range (5, int(sqrt(p))", "3 while p <= limit: if sieve[p]: sieve[p] = sieve[p-1]", "i in range (5, int(sqrt(p)) + 1, 6): if p", "for i in s if i <= n)) # Sieve", "== 0: break else: yield p s = set(prime(1000000)) for", "3 for p in range(5, n+1, 2): if p %", "prime(n): yield 2 yield 3 for p in range(5, n+1,", "for i in range(p*p, limit+1, p): sieve[i] = 0 else:", "= 1000000 sieve = [0] + [1, 0] * 500000", "if every number is prime or not. If prime add", "Naive method: Loop through N and check if every number", "# Naive method: Loop through N and check if every", "+ 2) == 0: break else: yield p s =", "+ p for i in range(p*p, limit+1, p): sieve[i] =", "check if every number is prime or not. If prime", "O(√n). Time of execution ~ 8sec for n = 1000000", "int(input()) print(sum(i for i in s if i <= n))", "500000 sieve[0], sieve[1], sieve[2] = 0, 0, 2 p =", "yield 2 yield 3 for p in range(5, n+1, 2):", "~ 8sec for n = 1000000 def prime(n): yield 2", "limit = 1000000 sieve = [0] + [1, 0] *", "i in range(p*p, limit+1, p): sieve[i] = 0 else: sieve[p]", "print(sum(i for i in s if i <= n)) #", "# Sieve implementation: Time complexity of O(n*log(log(n))). Time of execution", "0: continue else: for i in range (5, int(sqrt(p)) +", "n = 1000000 limit = 1000000 sieve = [0] +", "If prime add to sum. Time complexity is O(√n). Time", "for p in range(5, n+1, 2): if p % 3", "for n = 1000000 limit = 1000000 sieve = [0]", "p = 3 while p <= limit: if sieve[p]: sieve[p]", "p <= limit: if sieve[p]: sieve[p] = sieve[p-1] + p", "sieve = [0] + [1, 0] * 500000 sieve[0], sieve[1],", "add to sum. Time complexity is O(√n). Time of execution", "else: sieve[p] = sieve[p-1] sieve[p+1] = sieve[p] p += 2", "continue else: for i in range (5, int(sqrt(p)) + 1,", "p % i == 0 or p % (i +", "0 or p % (i + 2) == 0: break", "yield 3 for p in range(5, n+1, 2): if p", "= sieve[p-1] + p for i in range(p*p, limit+1, p):", "Sieve implementation: Time complexity of O(n*log(log(n))). Time of execution ~", "range(p*p, limit+1, p): sieve[i] = 0 else: sieve[p] = sieve[p-1]", "8sec for n = 1000000 def prime(n): yield 2 yield", "2) == 0: break else: yield p s = set(prime(1000000))", "n)) # Sieve implementation: Time complexity of O(n*log(log(n))). Time of", "= [0] + [1, 0] * 500000 sieve[0], sieve[1], sieve[2]", "sieve[i] = 0 else: sieve[p] = sieve[p-1] sieve[p+1] = sieve[p]", "sieve[p] = sieve[p-1] sieve[p+1] = sieve[p] p += 2 for", "= 3 while p <= limit: if sieve[p]: sieve[p] =", "range(5, n+1, 2): if p % 3 == 0: continue", "n = int(input()) print(sum(i for i in s if i", "n+1, 2): if p % 3 == 0: continue else:", "2 yield 3 for p in range(5, n+1, 2): if", "6): if p % i == 0 or p %", "s if i <= n)) # Sieve implementation: Time complexity", "in range(int(input())): n = int(input()) print(sum(i for i in s", "and check if every number is prime or not. If", "% (i + 2) == 0: break else: yield p", "execution ~ 2sec for n = 1000000 limit = 1000000", "0, 0, 2 p = 3 while p <= limit:", "[1, 0] * 500000 sieve[0], sieve[1], sieve[2] = 0, 0,", "0 else: sieve[p] = sieve[p-1] sieve[p+1] = sieve[p] p +=", "1000000 sieve = [0] + [1, 0] * 500000 sieve[0],", "if sieve[p]: sieve[p] = sieve[p-1] + p for i in", "~ 2sec for n = 1000000 limit = 1000000 sieve", "or p % (i + 2) == 0: break else:", "in range(5, n+1, 2): if p % 3 == 0:", "sieve[p] = sieve[p-1] + p for i in range(p*p, limit+1,", "p s = set(prime(1000000)) for _ in range(int(input())): n =", "is O(√n). Time of execution ~ 8sec for n =", "sieve[p+1] = sieve[p] p += 2 for _ in range(int(input())):", "[0] + [1, 0] * 500000 sieve[0], sieve[1], sieve[2] =", "= int(input()) print(sum(i for i in s if i <=", "* 500000 sieve[0], sieve[1], sieve[2] = 0, 0, 2 p", "1, 6): if p % i == 0 or p", "= sieve[p] p += 2 for _ in range(int(input())): print(sieve[int(input())])", "while p <= limit: if sieve[p]: sieve[p] = sieve[p-1] +" ]
[ "the ErrorFlag histogram) # - SystemMask: system mask: if 1,", "), cms.PSet( SystemLabel = cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"), SystemMask =", "SystemFolder: the folder where the ErrorFlag histogram is looked for", "= cms.string(\"GT\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"L1TEMU/Stage1GTexpert\") ) )", "histogram) # - SystemMask: system mask: if 1, the system", "- SystemMask: system mask: if 1, the system is masked", "HwValLabel = cms.string(\"HTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ),", "= cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"), SystemMask = cms.uint32(0), SystemFolder =", "label as used in hardware validation package # (the package", "= cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"), SystemMask = cms.uint32(1), SystemFolder =", "cms.PSet( SystemLabel = cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"), SystemMask = cms.uint32(0),", "# # the position in the parameter set gives, in", "cms.PSet( SystemLabel = cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"), SystemMask = cms.uint32(1),", "= cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"), SystemMask = cms.uint32(1), SystemFolder =", "- SystemLabel: system label # - HwValLabel: system label as", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"),", "), cms.PSet( SystemLabel = cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"), SystemMask =", "summary plot # - SystemFolder: the folder where the ErrorFlag", "HwValLabel: system label as used in hardware validation package #", "in the parameter set gives, in reverse order, the position", "plot # - SystemFolder: the folder where the ErrorFlag histogram", "as cms from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", #", "cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RCT\"), HwValLabel", "cms.string(\"DTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "HwValLabel = cms.string(\"CTF\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ),", "gives, in reverse order, the position in the reportSummaryMap #", "- SystemFolder: the folder where the ErrorFlag histogram is looked", "system is masked in the summary plot # - SystemFolder:", "), cms.PSet( SystemLabel = cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"), SystemMask =", "give: # - SystemLabel: system label # - HwValLabel: system", "SystemLabel: system label # - HwValLabel: system label as used", "position in the reportSummaryMap # in the emulator column (left", "= cms.string(\"RPC\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet(", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"),", "SystemMask: system mask: if 1, the system is masked in", "= cms.string(\"DTF\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet(", "HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ),", "parameter set gives, in reverse order, the position in the", "= cms.string(\"HTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet(", "FWCore.ParameterSet.Config as cms from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\",", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask", "import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for each L1", "l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for each L1 system, give:", "package # (the package producing the ErrorFlag histogram) # -", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"), SystemMask", "cms.string(\"GMT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"),", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"),", "= cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GMT\"),", "), cms.PSet( SystemLabel = cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"), SystemMask =", "# # for each L1 system, give: # - SystemLabel:", "cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTPG\"), HwValLabel", "cms.PSet( SystemLabel = cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"), SystemMask = cms.uint32(0),", "cms.PSet( SystemLabel = cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"), SystemMask = cms.uint32(1),", "set gives, in reverse order, the position in the reportSummaryMap", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"),", "emulator column (left column) L1Systems = cms.VPSet( cms.PSet( SystemLabel =", "= cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GT\"),", "looked for # # the position in the parameter set", "# for each L1 system, give: # - SystemLabel: system", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"),", "position in the parameter set gives, in reverse order, the", "cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\")", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GMT\"), HwValLabel =", "the parameter set gives, in reverse order, the position in", "= cms.VPSet( cms.PSet( SystemLabel = cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"), SystemMask", "is looked for # # the position in the parameter", "cms.VPSet( cms.PSet( SystemLabel = cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"), SystemMask =", "cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\")", "= cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"), SystemMask = cms.uint32(1), SystemFolder =", "), cms.PSet( SystemLabel = cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"), SystemMask =", "label # - HwValLabel: system label as used in hardware", "cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTPG\"), HwValLabel", "cms.string(\"RPC\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "cms.string(\"GT\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"L1TEMU/Stage1GTexpert\") ) ) )", "= cms.string(\"CTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet(", "SystemLabel = cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"), SystemMask = cms.uint32(0), SystemFolder", "histogram is looked for # # the position in the", "= DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for each L1 system, give: #", "cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"HCAL\"), HwValLabel", "= cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"), SystemMask = cms.uint32(1), SystemFolder =", "# - SystemFolder: the folder where the ErrorFlag histogram is", "the folder where the ErrorFlag histogram is looked for #", "= cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"HCAL\"),", "= cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"), SystemMask = cms.uint32(1), SystemFolder =", "# (the package producing the ErrorFlag histogram) # - SystemMask:", "SystemLabel = cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"), SystemMask = cms.uint32(0), SystemFolder", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RPC\"), HwValLabel =", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTF\"), HwValLabel =", "folder where the ErrorFlag histogram is looked for # #", "# in the emulator column (left column) L1Systems = cms.VPSet(", "SystemLabel = cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"), SystemMask = cms.uint32(1), SystemFolder", "DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for each", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"),", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"), SystemMask", "HwValLabel = cms.string(\"DTF\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ),", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"),", "SystemLabel = cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"), SystemMask = cms.uint32(0), SystemFolder", "= cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RPC\"),", "- HwValLabel: system label as used in hardware validation package", "HwValLabel = cms.string(\"RPC\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ),", "= cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"), SystemMask = cms.uint32(0), SystemFolder =", "1, the system is masked in the summary plot #", "is masked in the summary plot # - SystemFolder: the", "DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for each L1 system,", "ErrorFlag histogram is looked for # # the position in", "= cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTPG\"),", "from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for", "), cms.PSet( SystemLabel = cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"), SystemMask =", "= cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"),", "= cms.string(\"RCT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet(", "cms.PSet( SystemLabel = cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"), SystemMask = cms.uint32(0),", "), cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask =", "), cms.PSet( SystemLabel = cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"), SystemMask =", "HwValLabel = cms.string(\"RCT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ),", "masked in the summary plot # - SystemFolder: the folder", "reverse order, the position in the reportSummaryMap # in the", "L1 system, give: # - SystemLabel: system label # -", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTPG\"), HwValLabel =", "= cms.string(\"GMT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet(", "HwValLabel = cms.string(\"ETP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ),", "= cms.string(\"ETP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet(", "cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0),", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RCT\"), HwValLabel =", "the reportSummaryMap # in the emulator column (left column) L1Systems", "in reverse order, the position in the reportSummaryMap # in", "cms.PSet( SystemLabel = cms.string(\"GMT\"), HwValLabel = cms.string(\"GMT\"), SystemMask = cms.uint32(0),", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"), SystemMask", "), cms.PSet( SystemLabel = cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"), SystemMask =", "SystemLabel = cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"), SystemMask = cms.uint32(1), SystemFolder", "# - SystemMask: system mask: if 1, the system is", "L1Systems = cms.VPSet( cms.PSet( SystemLabel = cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"),", "cms.string(\"CTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\")", "cms.string(\"RCT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"),", "(left column) L1Systems = cms.VPSet( cms.PSet( SystemLabel = cms.string(\"ECAL\"), HwValLabel", "the system is masked in the summary plot # -", "= cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTF\"),", "each L1 system, give: # - SystemLabel: system label #", "order, the position in the reportSummaryMap # in the emulator", "cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"L1TEMU/Stage1GTexpert\")", "if 1, the system is masked in the summary plot", "SystemLabel = cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"), SystemMask = cms.uint32(0), SystemFolder", "cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\")", "cms.string(\"CTF\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"), SystemMask", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"), SystemMask", "the position in the reportSummaryMap # in the emulator column", "cms.string(\"HTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "the summary plot # - SystemFolder: the folder where the", "cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\")", "cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTF\"), HwValLabel", "= cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"),", "mask: if 1, the system is masked in the summary", "# the position in the parameter set gives, in reverse", "cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\")", "cms from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # #", "for each L1 system, give: # - SystemLabel: system label", "the position in the parameter set gives, in reverse order,", "cms.PSet( SystemLabel = cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"), SystemMask = cms.uint32(1),", "cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\")", "reportSummaryMap # in the emulator column (left column) L1Systems =", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"HCAL\"), HwValLabel =", "validation package # (the package producing the ErrorFlag histogram) #", "= cms.string(\"CTF\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet(", "in the summary plot # - SystemFolder: the folder where", "), cms.PSet( SystemLabel = cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"), SystemMask =", "system mask: if 1, the system is masked in the", "in the emulator column (left column) L1Systems = cms.VPSet( cms.PSet(", "column (left column) L1Systems = cms.VPSet( cms.PSet( SystemLabel = cms.string(\"ECAL\"),", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"), SystemMask", "HwValLabel = cms.string(\"GT\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"L1TEMU/Stage1GTexpert\") )", "DQMEDHarvester(\"L1EmulatorErrorFlagClient\", # # for each L1 system, give: # -", "cms.PSet( SystemLabel = cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"), SystemMask = cms.uint32(1),", "= cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RCT\"),", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GT\"), HwValLabel =", "cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\")", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel =", "where the ErrorFlag histogram is looked for # # the", "cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"RPC\"), HwValLabel", "hardware validation package # (the package producing the ErrorFlag histogram)", "cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\")", "cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GT\"), HwValLabel", "in the reportSummaryMap # in the emulator column (left column)", "SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel =", "the emulator column (left column) L1Systems = cms.VPSet( cms.PSet( SystemLabel", "ErrorFlag histogram) # - SystemMask: system mask: if 1, the", "= cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"), SystemMask = cms.uint32(0), SystemFolder =", "= cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet(", "(the package producing the ErrorFlag histogram) # - SystemMask: system", "= cms.string(\"DTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet(", "SystemLabel = cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"), SystemMask = cms.uint32(1), SystemFolder", "# - SystemLabel: system label # - HwValLabel: system label", "cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel", "HwValLabel = cms.string(\"DTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ),", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTPG\"), HwValLabel =", "system label as used in hardware validation package # (the", "= cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTF\"),", "for # # the position in the parameter set gives,", "cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"GMT\"), HwValLabel", "SystemLabel = cms.string(\"GT\"), HwValLabel = cms.string(\"GT\"), SystemMask = cms.uint32(1), SystemFolder", "cms.string(\"ETP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "used in hardware validation package # (the package producing the", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"), SystemMask", "cms.string(\"RPC\"), HwValLabel = cms.string(\"RPC\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\")", "# - HwValLabel: system label as used in hardware validation", "cms.PSet( SystemLabel = cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"), SystemMask = cms.uint32(1),", "cms.PSet( SystemLabel = cms.string(\"CSCTF\"), HwValLabel = cms.string(\"CTF\"), SystemMask = cms.uint32(1),", "SystemLabel = cms.string(\"ECAL\"), HwValLabel = cms.string(\"ETP\"), SystemMask = cms.uint32(1), SystemFolder", "in hardware validation package # (the package producing the ErrorFlag", "= cms.string(\"CSCTPG\"), HwValLabel = cms.string(\"CTP\"), SystemMask = cms.uint32(1), SystemFolder =", "system, give: # - SystemLabel: system label # - HwValLabel:", "= cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0), SystemFolder =", "HwValLabel = cms.string(\"CTP\"), SystemMask = cms.uint32(1), SystemFolder = cms.string(\"\") ),", "as used in hardware validation package # (the package producing", "cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTF\"), HwValLabel", "<filename>DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py<gh_stars>100-1000 import FWCore.ParameterSet.Config as cms from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient", "= cms.string(\"RCT\"), HwValLabel = cms.string(\"RCT\"), SystemMask = cms.uint32(0), SystemFolder =", "package producing the ErrorFlag histogram) # - SystemMask: system mask:", "HwValLabel = cms.string(\"GMT\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ),", "SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel =", "SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTF\"), HwValLabel =", "column) L1Systems = cms.VPSet( cms.PSet( SystemLabel = cms.string(\"ECAL\"), HwValLabel =", "cms.string(\"DTF\"), SystemMask = cms.uint32(0), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTPG\"), HwValLabel = cms.string(\"DTP\"), SystemMask", "the ErrorFlag histogram is looked for # # the position", "cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"DTTF\"), HwValLabel = cms.string(\"DTF\"), SystemMask", "SystemLabel = cms.string(\"Stage1Layer2\"), HwValLabel = cms.string(\"Stage1Layer2\"), SystemMask = cms.uint32(0), SystemFolder", "system label # - HwValLabel: system label as used in", "import FWCore.ParameterSet.Config as cms from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient =", "producing the ErrorFlag histogram) # - SystemMask: system mask: if", "= cms.uint32(1), SystemFolder = cms.string(\"\") ), cms.PSet( SystemLabel = cms.string(\"CSCTPG\"),", "SystemLabel = cms.string(\"HCAL\"), HwValLabel = cms.string(\"HTP\"), SystemMask = cms.uint32(1), SystemFolder" ]
[ "nullable=False, autoincrement=True, ), Column('client_id', String(63), nullable=False), Column('name', String(63), nullable=True), Column('secret',", "Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group =", "deleting users to maintain foreign key integrity Column('first_name', String(30, convert_unicode=True)),", "nullable=False), # Permissions encoded as integer (see below) Column('permission', Integer,", "on worker. Column( 'checkin_time', DateTime, nullable=False ), # When the", "= MetaData() bundle = Table( 'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "), # When the worker last checked in with the", "Column('state', String(63), nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False),", "= Table( 'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id',", "Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean, default=False, nullable=True), Column('is_verified', Boolean, nullable=False,", "Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have # dependencies to", "to bundles not (yet) in the system. Column('parent_uuid', String(63), nullable=False),", "of the bundle that the sender is on? ) #", "Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), #", "'checkin_time', DateTime, nullable=False ), # When the worker last checked", "Number of bytes allowed Column('disk_used', Float, nullable=False), # Number of", "Column('is_anonymous', Boolean, nullable=False, default=False), Column( 'date_created', DateTime ), # When", "Float, nullable=False), # Number of bytes already used Index('user_user_id_index', 'user_id'),", "Permissions for worksheets group_object_permission = Table( 'group_object_permission', db_metadata, Column( 'id',", "the id of the worksheet that the sender is on?", "LargeBinary, String, Text, Unicode, ) from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata", "'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "default Column( 'date_last_modified', DateTime ), # When the worksheet was", "bundle (bundle_uuid != null) # - type = worksheet (subworksheet_uuid", "null) # Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain #", "= Table( 'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "Boolean, nullable=False, default=True ), # Set to False instead of", "list of allowed redirect URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token =", "), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('code', String(64),", "from sqlalchemy.types import ( BigInteger, Boolean, DateTime, Enum, Float, Integer,", "null if the worksheet created before v0.5.31; Set to current_timestamp", "UTF-8 # encoding, or use appropriate database engine-specific data types", "autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet item", "Boolean, DateTime, Enum, Float, Integer, LargeBinary, String, Text, Unicode, )", "nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime, nullable=True), Column('key', String(64), nullable=False),", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Basic information Column('user_id', String(63),", "default=False, nullable=True), Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser', Boolean, nullable=False, default=False),", "child_path, target = (parent_uuid, parent_path) bundle_dependency = Table( 'bundle_dependency', db_metadata,", "name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), ) user_group = Table( 'user_group',", "jobs allowed to run on worker. Column('is_terminating', Boolean, nullable=False), )", "constraint on the worker table so that we can create", "the CodaLab bundle system tables. \"\"\" # TODO: Replace String", "SQLAlchemy will automatically perform conversions to and from UTF-8 #", "# No foreign key constraint on the worker table so", "# Short human-readable description of the worksheet Column( 'frozen', DateTime,", "(new features) # Store information about users. user = Table(", "'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group = Table( 'group', db_metadata, Column(", "'data_hash'), Index('state_index', 'state'), # Needed for the bundle manager. )", "table objects for the CodaLab bundle system tables. \"\"\" #", "one of the following: none (0), read (1), or all", "nullable=False, default=False), Column('password', String(128), nullable=False), # Additional information Column('affiliation', String(255,", "of deleting users to maintain foreign key integrity Column('first_name', String(30,", "nullable=False, default=False), Column( 'date_created', DateTime ), # When the worksheet", "Column('name', String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'),", "Stores password reset codes user_reset_code = Table( 'user_reset_code', db_metadata, Column(", "Column('data_hash', String(63), nullable=True), Column('state', String(63), nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous',", "), Column('client_id', String(63), nullable=False), Column('name', String(63), nullable=True), Column('secret', String(255), nullable=True),", "db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False),", "of 254 to be compliant with RFC3696/5321 Column( 'notifications', Integer,", "running on each worker. worker_run = Table( 'worker_run', db_metadata, Column('user_id',", "below) Column('permission', Integer, nullable=False), ) # A permission value is", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('bundle_type', String(63),", "nullable=True), Column('url', String(255, convert_unicode=True), nullable=True), # Quotas Column('time_quota', Float, nullable=False),", "nullable=False), # Number of seconds already used Column('disk_quota', Float, nullable=False),", "Column('expires', DateTime, nullable=False), ) oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata, Column(", "on worker. Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker.", "String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean, default=False, nullable=True), Column('is_verified',", "bundles currently running on each worker. worker_run = Table( 'worker_run',", "the following: NOTIFICATIONS_NONE = 0x00 # Receive no notifications NOTIFICATIONS_IMPORTANT", "for worksheets group_object_permission = Table( 'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "# What is the id of the bundle that the", "ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"), nullable=False, ),", "), # When the worksheet was frozen (forever immutable) if", "GROUP_OBJECT_PERMISSION_ALL = 0x02 # A notifications value is one of", "primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63),", "on specific workers. Column('cpus', Integer, nullable=False), # Number of CPUs", "'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), # Which emails user wants", "Column, ForeignKey, Index, MetaData, Table, UniqueConstraint from sqlalchemy.types import (", ") worksheet_item = Table( 'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "information about all sockets currently allocated to each worker. worker_socket", "= 0x02 # A notifications value is one of the", "worksheet that the sender is on? Column( 'bundle_uuid', String(63), nullable=True", "to modify this group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'),", "have many columns now, but it will eventually # include", "DateTime, nullable=False), Column('has_access', Boolean, default=False, nullable=True), Column('is_verified', Boolean, nullable=False, default=False),", "bundle manager. ) # Includes things like name, description, etc.", "'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "Column('user_id', String(63), nullable=False), Column('user_name', String(63), nullable=False, unique=True), Column( 'email', String(254),", "# For each child_uuid, we have: key = child_path, target", "'worksheet_uuid', String(63), nullable=True ), # What is the id of", "'tag_exclusive', Boolean, nullable=False ), # Whether worker runs bundles if", "not (yet) in the system. Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text,", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path',", "unique=True), Column('refresh_token', String(255), unique=True), Column('expires', DateTime, nullable=False), ) oauth2_auth_code =", "Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'),", "created before v0.5.31; Set to current_timestamp by default UniqueConstraint('uuid', name='uix_1'),", "because bundles can have # dependencies to bundles not (yet)", "bundles group_bundle_permission = Table( 'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "(1), or all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01", "use appropriate database engine-specific data types for Unicode # data.", "key integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime,", "seconds already used Column('disk_quota', Float, nullable=False), # Number of bytes", "oauth2_client = Table( 'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "computed. Column('data_hash', String(63), nullable=True), Column('state', String(63), nullable=False), Column('owner_id', String(255), nullable=True),", "etc. bundle_metadata = Table( 'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "current_timestamp by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), )", "String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a worksheet object Column('object_uuid',", "nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63),", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False),", "the bundle service. Column('socket_id', Integer, nullable=False), # Socket ID worker", "of seconds allowed Column('parallel_run_quota', Integer, nullable=False), # Number of parallel", "Unicode and UnicodeText as appropriate # This way, SQLAlchemy will", "space on worker. Column( 'checkin_time', DateTime, nullable=False ), # When", "String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized list", "nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'),", ") # Stores (email) verification keys user_verification = Table( 'user_verification',", "# dependencies to bundles not (yet) in the system. Column('parent_uuid',", ") # For each child_uuid, we have: key = child_path,", "DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False), ) # Store information about", "= 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 # A notifications value is", "user = Table( 'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "by default Column( 'date_last_modified', DateTime ), # When the worksheet", "SQLAlchemy table objects for the CodaLab bundle system tables. \"\"\"", "Integer, nullable=False), # Number of parallel jobs allowed Column('time_used', Float,", "of the chat? Column( 'worksheet_uuid', String(63), nullable=True ), # What", "foreign key constraint on the worker table so that we", "UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item = Table(", "'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "to maintain foreign key integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30,", "on? Column( 'bundle_uuid', String(63), nullable=True ), # What is the", "for bundles group_bundle_permission = Table( 'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "nullable=True), # Who sent it? Column('recipient_user_id', String(63), nullable=True), # Who", "String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'),", "is the id of the bundle that the sender is", "String(255), unique=True), Column('refresh_token', String(255), unique=True), Column('expires', DateTime, nullable=False), ) oauth2_auth_code", "# Number of CPUs on worker. Column('gpus', Integer, nullable=False), #", "to a worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions", "nullable=False), # TODO: make this nullable Column('type', String(20), nullable=False), Column('sort_key',", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid', String(63),", "the worksheet was created; Set to null if the worksheet", "Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) #", "nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet", "parent_path) bundle_dependency = Table( 'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('bundle_type', String(63), nullable=False), #", "nullable=False), # Number of seconds allowed Column('parallel_run_quota', Integer, nullable=False), #", "sqlalchemy.types import ( BigInteger, Boolean, DateTime, Enum, Float, Integer, LargeBinary,", "did the user send this query? Column('sender_user_id', String(63), nullable=True), #", "Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False), # Deliberately omit", "worker. worker_socket = Table( 'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),", "messages on. Column( 'shared_file_system', Boolean, nullable=False ), # Whether the", "Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('owner_id', String(255), nullable=True), Column(", "null) # - type = worksheet (subworksheet_uuid != null) #", "DateTime, nullable=False), Column('date_sent', DateTime, nullable=True), Column('key', String(64), nullable=False), ) #", "Column('scopes', Text, nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token', String(255), unique=True), Column('expires',", "Column( 'exit_after_num_runs', Integer, nullable=False ), # Number of jobs allowed", "# Total memory of worker. Column('free_disk_bytes', BigInteger, nullable=True), # Available", "'owner_id'), ) worksheet_item = Table( 'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Basic", "nullable=True), # Tag that allows for scheduling runs on specific", "only important notifications NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications", "information about the bundles currently running on each worker. worker_run", "ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), # No foreign key constraint", "Number of seconds allowed Column('parallel_run_quota', Integer, nullable=False), # Number of", "Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True), nullable=True), # Quotas", "nullable=False), Column('name', String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid',", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('access_token', String(255), unique=True),", "is able to modify this group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'),", "user is able to modify this group. Column('is_admin', Boolean), Index('group_uuid_index',", "Number of bytes already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id',", "because worksheets can contain # bundles and worksheets not (yet)", "each worker. worker_dependency = Table( 'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id),", "NULL except for run bundles. Column('command', Text, nullable=True), # The", "except for run bundles. Column('command', Text, nullable=True), # The data_hash", "Set to current timestamp by default Column( 'date_last_modified', DateTime ),", "so that we can create a socket # for the", "Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed redirect URIs", "# data. Currently, only worksheet.title uses the Unicode column type.", "nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True), # Tag", "can create a socket # for the worker before adding", "String and Text columns with Unicode and UnicodeText as appropriate", "Boolean, nullable=False ), # Whether worker runs bundles if and", "'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), #", "make this nullable Column('type', String(20), nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index',", "# When the worksheet was frozen (forever immutable) if it", "ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded as integer (see below) Column('permission',", "String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text, nullable=False), # TODO:", "ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('code', String(64), nullable=False), ) #", "have: key = child_path, target = (parent_uuid, parent_path) bundle_dependency =", "'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "A worksheet item is either: # - type = bundle", "DateTime, nullable=False), Column('code', String(64), nullable=False), ) # OAuth2 Tables oauth2_client", "the worker before adding the worker to the worker table.", "from sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint from", "still being computed. Column('data_hash', String(63), nullable=True), Column('state', String(63), nullable=False), Column('owner_id',", "logged in Column( 'is_active', Boolean, nullable=False, default=True ), # Set", "markup (value != null) # - type = directive (value", "description of the worksheet Column( 'frozen', DateTime, nullable=True ), #", "Whether a user is able to modify this group. Column('is_admin',", "nullable=False), Column('date_sent', DateTime, nullable=True), Column('key', String(64), nullable=False), ) # Stores", "# A notifications value is one of the following: NOTIFICATIONS_NONE", "Column('permission', Integer, nullable=False), ) # A permission value is one", "value is still being computed. Column('data_hash', String(63), nullable=True), Column('state', String(63),", "the sender is on? ) # Store information about workers.", "String(63), nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), )", "Column('value', Text, nullable=False), # TODO: make this nullable Column('type', String(20),", "'name'), Index('group_owner_id_index', 'owner_id'), ) user_group = Table( 'user_group', db_metadata, Column(", "String(63), nullable=False, unique=True), Column( 'email', String(254), nullable=False, unique=True ), #", "omit ForeignKey(bundle.c.uuid), because worksheets can contain # bundles and worksheets", "'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "of parallel jobs allowed Column('time_used', Float, nullable=False), # Number of", "sockets currently allocated to each worker. worker_socket = Table( 'worker_socket',", "worker and the server have a shared filesystem. Column( 'tag_exclusive',", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255),", "the worksheet created before v0.5.31; Set to current timestamp by", ") group = Table( 'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "import ForeignKeyConstraint db_metadata = MetaData() bundle = Table( 'bundle', db_metadata,", "service. Column('socket_id', Integer, nullable=False), # Socket ID worker listens for", "autoincrement=True, ), Column('client_id', String(63), nullable=False), Column('name', String(63), nullable=True), Column('secret', String(255),", "String(20), nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index',", "to current_timestamp by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'),", "columns now, but it will eventually # include columns for", "0x01 # Receive only important notifications NOTIFICATIONS_GENERAL = 0x02 #", "Text, nullable=True), # The data_hash will be NULL if the", "Total memory of worker. Column('free_disk_bytes', BigInteger, nullable=True), # Available disk", "bundle that the sender is on? ) # Store information", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Primary", "# The command will be NULL except for run bundles.", "worksheet_tag = Table( 'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean, default=False,", "able to modify this group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index',", "unique=True ), # Length of 254 to be compliant with", "one of the following: NOTIFICATIONS_NONE = 0x00 # Receive no", "A permission value is one of the following: none (0),", "# When the worksheet was created; Set to null if", "worker table. Column('socket_id', Integer, primary_key=True, nullable=False), ) # Store information", "Index('group_owner_id_index', 'owner_id'), ) user_group = Table( 'user_group', db_metadata, Column( 'id',", "and UnicodeText as appropriate # This way, SQLAlchemy will automatically", "Column('disk_used', Float, nullable=False), # Number of bytes already used Index('user_user_id_index',", "include columns for owner, group, permissions, etc. worksheet = Table(", "Column( 'checkin_time', DateTime, nullable=False ), # When the worker last", "table. Column('socket_id', Integer, primary_key=True, nullable=False), ) # Store information about", "will eventually # include columns for owner, group, permissions, etc.", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('code', String(64), nullable=False),", "primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),", "Column( 'date_last_modified', DateTime ), # When the worksheet was last", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"),", "- type = directive (value != null) # Deliberately omit", ") # Store information about users' questions or feedback. chat", "'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet tags worksheet_tag = Table(", "TODO: Replace String and Text columns with Unicode and UnicodeText", "DateTime ), # When the worksheet was last modified; Set", "String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid),", "it? Column('message', Text, nullable=False), # What's the content of the", "Column('sender_user_id', String(63), nullable=True), # Who sent it? Column('recipient_user_id', String(63), nullable=True),", "worker. Column('gpus', Integer, nullable=False), # Number of GPUs on worker.", "nullable=False), Column('code', String(64), nullable=False), ) # OAuth2 Tables oauth2_client =", "(value != null) # Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can", "'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "for the worker before adding the worker to the worker", "Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet tags worksheet_tag =", "the worksheet Column( 'frozen', DateTime, nullable=True ), # When the", "Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain # bundles and", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid',", "users' questions or feedback. chat = Table( 'chat', db_metadata, Column(", "chat? Column( 'worksheet_uuid', String(63), nullable=True ), # What is the", "permission value is one of the following: none (0), read", "NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications NOTIFICATIONS_GENERAL =", "a shared filesystem. Column( 'tag_exclusive', Boolean, nullable=False ), # Whether", "can have # dependencies to bundles not (yet) in the", "for Unicode # data. Currently, only worksheet.title uses the Unicode", "'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), )", "Short human-readable description of the worksheet Column( 'frozen', DateTime, nullable=True", "maintain foreign key integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)),", "Text, nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token', String(255), unique=True), Column('expires', DateTime,", "column type. from sqlalchemy import Column, ForeignKey, Index, MetaData, Table,", "'metadata_value', mysql_length=63), ) # For each child_uuid, we have: key", "), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet item is", "worksheets not (yet) in the system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid',", "and from UTF-8 # encoding, or use appropriate database engine-specific", "Boolean, default=False, nullable=True), Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser', Boolean, nullable=False,", "String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum(\"authorization_code\", \"password\",", "Column( 'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"), nullable=False, ), Column('response_type', Enum(\"code\",", "nullable=False ), # Whether worker runs bundles if and only", "from UTF-8 # encoding, or use appropriate database engine-specific data", "the Unicode column type. from sqlalchemy import Column, ForeignKey, Index,", "'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"), nullable=False, ), Column('response_type', Enum(\"code\", \"token\"),", "worker. worker_run = Table( 'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),", "worksheet table does not have many columns now, but it", "# Reference to a worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),", "# OAuth2 Tables oauth2_client = Table( 'oauth2_client', db_metadata, Column( 'id',", "mysql_length=63), ) # For each child_uuid, we have: key =", "DateTime), # Null if user has never logged in Column(", "for the user/worker combination. # See WorkerModel for the serialization", "the bundles currently running on each worker. worker_run = Table(", "Column('cpus', Integer, nullable=False), # Number of CPUs on worker. Column('gpus',", "Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text, nullable=False), #", "(parent_uuid, parent_path) bundle_dependency = Table( 'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "nullable=False), Column('date_created', DateTime, nullable=False), Column('code', String(64), nullable=False), ) # OAuth2", "uses the Unicode column type. from sqlalchemy import Column, ForeignKey,", "nullable=False), # The command will be NULL except for run", "String(63), nullable=True), # Who received it? Column('message', Text, nullable=False), #", "worksheet.title uses the Unicode column type. from sqlalchemy import Column,", "Store information about users. user = Table( 'user', db_metadata, Column(", "name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), # Needed for the bundle", "String(127), primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True),", "of allowed redirect URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token = Table(", "information about the dependencies available on each worker. worker_dependency =", "= Table( 'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "), # When the worksheet was created; Set to null", "type = bundle (bundle_uuid != null) # - type =", "the worksheet was frozen (forever immutable) if it is. Column('is_anonymous',", "0x02 # A notifications value is one of the following:", "created before v0.5.31; Set to current timestamp by default Column(", "primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True),", "Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access',", "server have a shared filesystem. Column( 'tag_exclusive', Boolean, nullable=False ),", "Float, nullable=False), # Number of seconds allowed Column('parallel_run_quota', Integer, nullable=False),", "nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value',", "encoding, or use appropriate database engine-specific data types for Unicode", "# Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have # dependencies", "Column( 'date_created', DateTime ), # When the worksheet was created;", "adding the worker to the worker table. Column('socket_id', Integer, primary_key=True,", "parallel jobs allowed Column('time_used', Float, nullable=False), # Number of seconds", "nullable=False), Column('parent_path', Text, nullable=False), ) # The worksheet table does", "# See WorkerModel for the serialization method. Column('dependencies', LargeBinary, nullable=False),", "Text, nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have", "Column('scopes', Text, nullable=False), Column('code', String(100), nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri',", "of jobs allowed to run on worker. Column('is_terminating', Boolean, nullable=False),", "primary_key=True, nullable=False, autoincrement=True, ), # Primary key Column('time', DateTime, nullable=False),", "DateTime, Enum, Float, Integer, LargeBinary, String, Text, Unicode, ) from", "worksheet item is either: # - type = bundle (bundle_uuid", "contain # bundles and worksheets not (yet) in the system.", "String(254), nullable=False, unique=True ), # Length of 254 to be", "nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), #", "primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference", "tags. Column( 'exit_after_num_runs', Integer, nullable=False ), # Number of jobs", "nullable=False), ) # Stores password reset codes user_reset_code = Table(", "A notifications value is one of the following: NOTIFICATIONS_NONE =", "'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), ) # Store", "is one of the following: none (0), read (1), or", "import ( BigInteger, Boolean, DateTime, Enum, Float, Integer, LargeBinary, String,", "before v0.5.31; Set to current_timestamp by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index',", "String(63), nullable=False), Column('user_name', String(63), nullable=False, unique=True), Column( 'email', String(254), nullable=False,", "information about workers. worker = Table( 'worker', db_metadata, Column('user_id', String(63),", "permissions, etc. worksheet = Table( 'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "bundle system tables. \"\"\" # TODO: Replace String and Text", "String(63), nullable=False), Column('bundle_type', String(63), nullable=False), # The command will be", "to the worker table. Column('socket_id', Integer, primary_key=True, nullable=False), ) #", "Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False), ) # Store information", "primary_key=True, nullable=False), ) # Store information about the bundles currently", "Number of jobs allowed to run on worker. Column('is_terminating', Boolean,", "'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet tags worksheet_tag", "be NULL if the bundle's value is still being computed.", "), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes',", "Table( 'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False),", "What is the id of the bundle that the sender", "= Table( 'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "nullable=False), # Socket ID worker listens for messages on. Column(", "bundle service. Column('socket_id', Integer, nullable=False), # Socket ID worker listens", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name',", "on worker. Column('is_terminating', Boolean, nullable=False), ) # Store information about", "Column('access_token', String(255), unique=True), Column('refresh_token', String(255), unique=True), Column('expires', DateTime, nullable=False), )", "The SQLAlchemy table objects for the CodaLab bundle system tables.", "String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), ) user_group", "Integer, primary_key=True, nullable=False), ) # Store information about the bundles", "nullable=False), ) # Store information about users' questions or feedback.", "with the bundle service. Column('socket_id', Integer, nullable=False), # Socket ID", "Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group = Table( 'group', db_metadata,", "be NULL except for run bundles. Column('command', Text, nullable=True), #", "notifications NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications NOTIFICATIONS_GENERAL", "This way, SQLAlchemy will automatically perform conversions to and from", "Column( 'is_active', Boolean, nullable=False, default=True ), # Set to False", "but it will eventually # include columns for owner, group,", "not have many columns now, but it will eventually #", "nullable=True), Column( 'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"), nullable=False, ), Column('response_type',", "ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime, nullable=True), Column('key', String(64),", "that the sender is on? ) # Store information about", "'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "used Column('disk_quota', Float, nullable=False), # Number of bytes allowed Column('disk_used',", "on the worker table so that we can create a", "autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),", "nullable=False), ) # The worksheet table does not have many", "the worker table so that we can create a socket", "String(63), nullable=True), Column('state', String(63), nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean,", "# Number of GPUs on worker. Column('memory_bytes', BigInteger, nullable=False), #", "DateTime, nullable=False ), # When the worker last checked in", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key',", "Table( 'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127),", "Table( 'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "Text, Unicode, ) from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata = MetaData()", "redirect URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token = Table( 'oauth2_token', db_metadata,", ") # Store information about all sockets currently allocated to", "nullable=True ), # What is the id of the bundle", "primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text,", "nullable=False), Index('uuid_index', 'run_uuid'), ) # Store information about the dependencies", "instead of deleting users to maintain foreign key integrity Column('first_name',", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),", "automatically perform conversions to and from UTF-8 # encoding, or", "# Quotas Column('time_quota', Float, nullable=False), # Number of seconds allowed", "nullable=True), Column('value', Text, nullable=False), # TODO: make this nullable Column('type',", "Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), # Which emails user wants to", "nullable=False), Column('name', String(63), nullable=True), Column('secret', String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id),", "String(63), nullable=False), Column('name', String(63), nullable=True), Column('secret', String(255), nullable=True), Column('user_id', String(63),", "bundles not (yet) in the system. Column('parent_uuid', String(63), nullable=False), Column('parent_path',", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('bundle_type',", "NOTIFICATIONS_NONE = 0x00 # Receive no notifications NOTIFICATIONS_IMPORTANT = 0x01", "Integer, nullable=False), ) # A permission value is one of", "The worksheet table does not have many columns now, but", ") from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata = MetaData() bundle =", "# A permission value is one of the following: none", "socket # for the worker before adding the worker to", "is on? Column( 'bundle_uuid', String(63), nullable=True ), # What is", "Boolean, nullable=False, default=False), Column( 'date_created', DateTime ), # When the", "dependencies for the user/worker combination. # See WorkerModel for the", "'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "'date_last_modified', DateTime ), # When the worksheet was last modified;", "Column('tag', Text, nullable=True), # Tag that allows for scheduling runs", "UniqueConstraint('user_id', name='uix_1'), ) # Stores (email) verification keys user_verification =", "When did the user send this query? Column('sender_user_id', String(63), nullable=True),", "modified; Set to null if the worksheet created before v0.5.31;", "Table( 'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index',", "the dependencies available on each worker. worker_dependency = Table( 'worker_dependency',", "null if the worksheet created before v0.5.31; Set to current", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid',", "of CPUs on worker. Column('gpus', Integer, nullable=False), # Number of", "worker. Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker. Column('free_disk_bytes',", "nullable=False), # No foreign key constraint on the worker table", "worker. Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on worker.", "(forever immutable) if it is. Column('is_anonymous', Boolean, nullable=False, default=False), Column(", "DateTime ), # When the worksheet was created; Set to", "Stores (email) verification keys user_verification = Table( 'user_verification', db_metadata, Column(", "URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token = Table( 'oauth2_token', db_metadata, Column(", "Unicode, ) from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata = MetaData() bundle", "!= null) # - type = worksheet (subworksheet_uuid != null)", "# Length of 254 to be compliant with RFC3696/5321 Column(", "convert_unicode=True), nullable=True), # Quotas Column('time_quota', Float, nullable=False), # Number of", "Column('code', String(64), nullable=False), ) # OAuth2 Tables oauth2_client = Table(", "type. from sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint", "the bundle that the sender is on? ) # Store", "autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('bundle_type', String(63), nullable=False), # The", "Whether worker runs bundles if and only if they match", "the worker to the worker table. Column('socket_id', Integer, primary_key=True, nullable=False),", "= Table( 'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id',", "['worker.user_id', 'worker.worker_id']), # Serialized list of dependencies for the user/worker", "integer (see below) Column('permission', Integer, nullable=False), ) # Permissions for", "DateTime, nullable=False), # When did the user send this query?", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),", "if the worksheet created before v0.5.31; Set to current timestamp", "nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group", "Column('disk_quota', Float, nullable=False), # Number of bytes allowed Column('disk_used', Float,", "perform conversions to and from UTF-8 # encoding, or use", "can contain # bundles and worksheets not (yet) in the", "nullable=False), Column('scopes', Text, nullable=False), Column('code', String(100), nullable=False), Column('expires', DateTime, nullable=False),", "Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index',", "nullable=False), ) # Store information about all sockets currently allocated", "worksheet = Table( 'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "as appropriate # This way, SQLAlchemy will automatically perform conversions", "Index('state_index', 'state'), # Needed for the bundle manager. ) #", "about users' questions or feedback. chat = Table( 'chat', db_metadata,", "nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), ) # For each child_uuid,", "v0.5.31; Set to current timestamp by default Column( 'date_last_modified', DateTime", "= Table( 'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "timestamp by default Column( 'date_last_modified', DateTime ), # When the", "Column( 'shared_file_system', Boolean, nullable=False ), # Whether the worker and", "# Stores password reset codes user_reset_code = Table( 'user_reset_code', db_metadata,", "Column('worker_id', String(127), nullable=False), # No foreign key constraint on the", "nullable=True), Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser', Boolean, nullable=False, default=False), Column('password',", "was created; Set to null if the worksheet created before", "Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser', Boolean, nullable=False, default=False), Column('password', String(128),", "ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('access_token',", "none (0), read (1), or all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00", "as integer (see below) Column('permission', Integer, nullable=False), ) # Permissions", "no notifications NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications", "the worker table. Column('socket_id', Integer, primary_key=True, nullable=False), ) # Store", "workers. Column('cpus', Integer, nullable=False), # Number of CPUs on worker.", "Column('last_login', DateTime), # Null if user has never logged in", "\"\"\" The SQLAlchemy table objects for the CodaLab bundle system", "types for Unicode # data. Currently, only worksheet.title uses the", "ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet item is either: # -", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id',", "String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False),", "Column('is_terminating', Boolean, nullable=False), ) # Store information about all sockets", "data types for Unicode # data. Currently, only worksheet.title uses", "was frozen (forever immutable) if it is. Column('is_anonymous', Boolean, nullable=False,", "Unicode column type. from sqlalchemy import Column, ForeignKey, Index, MetaData,", "bundle_dependency = Table( 'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "dependencies to bundles not (yet) in the system. Column('parent_uuid', String(63),", "(see below) Column('permission', Integer, nullable=False), ) # A permission value", "that the sender is on? Column( 'bundle_uuid', String(63), nullable=True ),", "'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "<filename>codalab/model/tables.py \"\"\" The SQLAlchemy table objects for the CodaLab bundle", "NULL if the bundle's value is still being computed. Column('data_hash',", "GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 # A notifications value", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), #", "Float, nullable=False), # Number of bytes allowed Column('disk_used', Float, nullable=False),", "if and only if they match tags. Column( 'exit_after_num_runs', Integer,", "), # Short human-readable description of the worksheet Column( 'frozen',", "When the worker last checked in with the bundle service.", "oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "is. Column('is_anonymous', Boolean, nullable=False, default=False), Column( 'date_created', DateTime ), #", "String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('code', String(100), nullable=False), Column('expires',", "), Column('uuid', String(63), nullable=False), Column('bundle_type', String(63), nullable=False), # The command", "nullable=False, default=True ), # Set to False instead of deleting", "is the id of the worksheet that the sender is", "wants to receive Column('last_login', DateTime), # Null if user has", "Index('uuid_index', 'run_uuid'), ) # Store information about the dependencies available", "Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False), # Whether", "if the worksheet created before v0.5.31; Set to current_timestamp by", "directive (value != null) # Deliberately omit ForeignKey(bundle.c.uuid), because worksheets", "DateTime, nullable=False), ) oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata, Column( 'id',", "for the bundle manager. ) # Includes things like name,", "dependencies available on each worker. worker_dependency = Table( 'worker_dependency', db_metadata,", "Permissions for bundles group_bundle_permission = Table( 'group_bundle_permission', db_metadata, Column( 'id',", "Column('time_quota', Float, nullable=False), # Number of seconds allowed Column('parallel_run_quota', Integer,", "Currently, only worksheet.title uses the Unicode column type. from sqlalchemy", "verification keys user_verification = Table( 'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "run on worker. Column('is_terminating', Boolean, nullable=False), ) # Store information", "Integer, nullable=False), ) # Permissions for worksheets group_object_permission = Table(", "allowed redirect URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token = Table( 'oauth2_token',", "Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), ) # Permissions for bundles", "ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid),", "worksheets can contain # bundles and worksheets not (yet) in", "following: none (0), read (1), or all (2). GROUP_OBJECT_PERMISSION_NONE =", "primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63),", "comma-separated list of allowed redirect URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token", "Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text, nullable=False), ) # The worksheet", ") # OAuth2 Tables oauth2_client = Table( 'oauth2_client', db_metadata, Column(", "Column('response_type', Enum(\"code\", \"token\"), nullable=False), Column('scopes', Text, nullable=False), # comma-separated list", "of dependencies for the user/worker combination. # See WorkerModel for", "'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True,", "'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized list of dependencies for the", "Text, nullable=False), # comma-separated list of allowed scopes Column('redirect_uris', Text,", "= Table( 'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Primary key", "nullable=False), # Number of GPUs on worker. Column('memory_bytes', BigInteger, nullable=False),", "String(63), ForeignKey(\"user.user_id\"), nullable=False), # Whether a user is able to", "have # dependencies to bundles not (yet) in the system.", "Store information about all sockets currently allocated to each worker.", "oauth2_token = Table( 'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "Column('socket_id', Integer, nullable=False), # Socket ID worker listens for messages", "if they match tags. Column( 'exit_after_num_runs', Integer, nullable=False ), #", "'bundle_uuid', String(63), nullable=True ), # What is the id of", "Table, UniqueConstraint from sqlalchemy.types import ( BigInteger, Boolean, DateTime, Enum,", "Whether the worker and the server have a shared filesystem.", "for owner, group, permissions, etc. worksheet = Table( 'worksheet', db_metadata,", "nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('access_token', String(255),", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),", "foreign key integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined',", "about all sockets currently allocated to each worker. worker_socket =", "MetaData() bundle = Table( 'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('code', String(100),", "nullable=False), # When did the user send this query? Column('sender_user_id',", "\"\"\" # TODO: Replace String and Text columns with Unicode", "nullable=True), # Quotas Column('time_quota', Float, nullable=False), # Number of seconds", "as integer (see below) Column('permission', Integer, nullable=False), ) # A", "Table( 'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "'worker.worker_id']), # Serialized list of dependencies for the user/worker combination.", "worker. worker_dependency = Table( 'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True,", "keys user_verification = Table( 'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), ) user_group = Table(", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63),", "autoincrement=True, ), # Basic information Column('user_id', String(63), nullable=False), Column('user_name', String(63),", "of bytes allowed Column('disk_used', Float, nullable=False), # Number of bytes", ") # Store information about the bundles currently running on", "convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True), nullable=True), # Quotas Column('time_quota', Float,", "the server have a shared filesystem. Column( 'tag_exclusive', Boolean, nullable=False", "nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\",", "worker listens for messages on. Column( 'shared_file_system', Boolean, nullable=False ),", "ForeignKey(group.c.uuid), nullable=False), # Reference to a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid),", "db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), # No", "bundles. Column('command', Text, nullable=True), # The data_hash will be NULL", "Table( 'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "of worker. Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on", "before v0.5.31; Set to current timestamp by default Column( 'date_last_modified',", "nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False),", "), # What is the id of the worksheet that", "bundles if and only if they match tags. Column( 'exit_after_num_runs',", "Column('key', String(64), nullable=False), ) # Stores password reset codes user_reset_code", "= Table( 'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "), # Set to False instead of deleting users to", "Serialized list of dependencies for the user/worker combination. # See", "it will eventually # include columns for owner, group, permissions,", "Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a worksheet object", "and Text columns with Unicode and UnicodeText as appropriate #", "Column( 'bundle_uuid', String(63), nullable=True ), # What is the id", "default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), # Needed for", "ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False), # Whether a user", "nullable=False, default=False), Column('is_superuser', Boolean, nullable=False, default=False), Column('password', String(128), nullable=False), #", "sender is on? Column( 'bundle_uuid', String(63), nullable=True ), # What", "String(127), nullable=False), # No foreign key constraint on the worker", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id',", ") # Includes things like name, description, etc. bundle_metadata =", "Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'),", "nullable=True ), # What is the id of the worksheet", "The command will be NULL except for run bundles. Column('command',", "way, SQLAlchemy will automatically perform conversions to and from UTF-8", "# Worksheet tags worksheet_tag = Table( 'worksheet_tag', db_metadata, Column( 'id',", "of the worksheet Column( 'frozen', DateTime, nullable=True ), # When", "name='uix_1'), ) oauth2_token = Table( 'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "Column('time', DateTime, nullable=False), # When did the user send this", "be compliant with RFC3696/5321 Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ),", "nullable=True), Column( 'title', Unicode(255), nullable=True ), # Short human-readable description", "the content of the chat? Column( 'worksheet_uuid', String(63), nullable=True ),", "Index('user_id_index', 'user_id'), ) # Permissions for bundles group_bundle_permission = Table(", "codes user_reset_code = Table( 'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "nullable=False), ) # Store information about the bundles currently running", "), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a worksheet", "integer (see below) Column('permission', Integer, nullable=False), ) # A permission", "Integer, nullable=False), # Number of CPUs on worker. Column('gpus', Integer,", "- type = worksheet (subworksheet_uuid != null) # - type", "this group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), ) #", "columns for owner, group, permissions, etc. worksheet = Table( 'worksheet',", "of the following: none (0), read (1), or all (2).", "Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63),", "Store information about the bundles currently running on each worker.", "item is either: # - type = bundle (bundle_uuid !=", "sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint from sqlalchemy.types", "information Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True), nullable=True), #", "before adding the worker to the worker table. Column('socket_id', Integer,", "0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 # A notifications value is one", "current timestamp by default Column( 'date_last_modified', DateTime ), # When", "# The worksheet table does not have many columns now,", "the system. Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text, nullable=False), ) #", "= Table( 'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "unique=True), Column( 'email', String(254), nullable=False, unique=True ), # Length of", "checked in with the bundle service. Column('socket_id', Integer, nullable=False), #", "= 0x00 # Receive no notifications NOTIFICATIONS_IMPORTANT = 0x01 #", "available on each worker. worker_dependency = Table( 'worker_dependency', db_metadata, Column('user_id',", "# Permissions encoded as integer (see below) Column('permission', Integer, nullable=False),", "Table( 'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "\"refresh_token\"), nullable=False, ), Column('response_type', Enum(\"code\", \"token\"), nullable=False), Column('scopes', Text, nullable=False),", "String(255), nullable=False), ) # Store information about users' questions or", "autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('user_defined', Boolean),", "GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 #", "is either: # - type = bundle (bundle_uuid != null)", "Table( 'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "many columns now, but it will eventually # include columns", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid),", "# When the worker last checked in with the bundle", "When the worksheet was last modified; Set to null if", "of the following: NOTIFICATIONS_NONE = 0x00 # Receive no notifications", "database engine-specific data types for Unicode # data. Currently, only", "nullable=False, unique=True), Column( 'email', String(254), nullable=False, unique=True ), # Length", "), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime,", "nullable=True), Column('key', String(64), nullable=False), ) # Stores password reset codes", "is one of the following: NOTIFICATIONS_NONE = 0x00 # Receive", "String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group = Table(", "nullable=False), Column('user_name', String(63), nullable=False, unique=True), Column( 'email', String(254), nullable=False, unique=True", "Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a bundle Column('object_uuid',", "allowed Column('time_used', Float, nullable=False), # Number of seconds already used", "String(64), nullable=False), ) # OAuth2 Tables oauth2_client = Table( 'oauth2_client',", "worksheet (subworksheet_uuid != null) # - type = markup (value", "nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"),", "Column('user_name', String(63), nullable=False, unique=True), Column( 'email', String(254), nullable=False, unique=True ),", "important notifications NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new", "things like name, description, etc. bundle_metadata = Table( 'bundle_metadata', db_metadata,", "Column('code', String(100), nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False), )", "nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to", "Worksheet tags worksheet_tag = Table( 'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "nullable=True), Column('secret', String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type',", "String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean,", "autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False), #", "in the system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value',", "), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a bundle", "send this query? Column('sender_user_id', String(63), nullable=True), # Who sent it?", "Tag that allows for scheduling runs on specific workers. Column('cpus',", "), # Length of 254 to be compliant with RFC3696/5321", "nullable=False), Column('redirect_uri', String(255), nullable=False), ) # Store information about users'", "the user/worker combination. # See WorkerModel for the serialization method.", "nullable=False), # Number of CPUs on worker. Column('gpus', Integer, nullable=False),", "Column('type', String(20), nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'),", "autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False),", "with RFC3696/5321 Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), # Which", "tables. \"\"\" # TODO: Replace String and Text columns with", "user_reset_code = Table( 'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "When the worksheet was created; Set to null if the", "= Table( 'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent',", "memory of worker. Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space", "omit ForeignKey(bundle.c.uuid), because bundles can have # dependencies to bundles", "# Store information about all sockets currently allocated to each", "# include columns for owner, group, permissions, etc. worksheet =", "= Table( 'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127),", "allowed Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs allowed", "String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), ) # Store information about", "scopes Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed redirect", "), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False), # Deliberately", "Boolean, nullable=False), ) # Store information about all sockets currently", "ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), )", "nullable Column('type', String(20), nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index',", "= markup (value != null) # - type = directive", "allowed Column('disk_used', Float, nullable=False), # Number of bytes already used", "Store information about the dependencies available on each worker. worker_dependency", ") oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "user has never logged in Column( 'is_active', Boolean, nullable=False, default=True", "For each child_uuid, we have: key = child_path, target =", "nullable=False, ), Column('response_type', Enum(\"code\", \"token\"), nullable=False), Column('scopes', Text, nullable=False), #", "Unicode(255), nullable=True ), # Short human-readable description of the worksheet", "Table( 'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "# Receive no notifications NOTIFICATIONS_IMPORTANT = 0x01 # Receive only", "eventually # include columns for owner, group, permissions, etc. worksheet", "Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on worker. Column(", "a socket # for the worker before adding the worker", "data_hash will be NULL if the bundle's value is still", "String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a bundle Column('object_uuid', String(63),", "'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "for scheduling runs on specific workers. Column('cpus', Integer, nullable=False), #", "Column('gpus', Integer, nullable=False), # Number of GPUs on worker. Column('memory_bytes',", "Receive general notifications (new features) # Store information about users.", "ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid',", "Number of GPUs on worker. Column('memory_bytes', BigInteger, nullable=False), # Total", "or use appropriate database engine-specific data types for Unicode #", "Text, nullable=True), # Tag that allows for scheduling runs on", "String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False), # Whether a", "key constraint on the worker table so that we can", "user/worker combination. # See WorkerModel for the serialization method. Column('dependencies',", "the worker last checked in with the bundle service. Column('socket_id',", "# comma-separated list of allowed redirect URIs UniqueConstraint('client_id', name='uix_1'), )", "has never logged in Column( 'is_active', Boolean, nullable=False, default=True ),", "null) # - type = markup (value != null) #", "content of the chat? Column( 'worksheet_uuid', String(63), nullable=True ), #", "Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean, default=False, nullable=True),", "is on? ) # Store information about workers. worker =", "Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text,", "- type = bundle (bundle_uuid != null) # - type", "convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean, default=False, nullable=True), Column('is_verified', Boolean,", "we can create a socket # for the worker before", "Which emails user wants to receive Column('last_login', DateTime), # Null", "!= null) # - type = directive (value != null)", "child_uuid, we have: key = child_path, target = (parent_uuid, parent_path)", "Receive no notifications NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important", "worker = Table( 'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),", "'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), ) # Stores (email) verification", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63),", "type = markup (value != null) # - type =", "nullable=False), Column('scopes', Text, nullable=False), # comma-separated list of allowed scopes", "When the worksheet was frozen (forever immutable) if it is.", "# Number of seconds allowed Column('parallel_run_quota', Integer, nullable=False), # Number", "password reset codes user_reset_code = Table( 'user_reset_code', db_metadata, Column( 'id',", "engine-specific data types for Unicode # data. Currently, only worksheet.title", "used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), ) # Stores", "nullable=False), Column('worker_id', String(127), nullable=False), # No foreign key constraint on", "'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True,", "db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'],", "BigInteger, nullable=True), # Available disk space on worker. Column( 'checkin_time',", "nullable=False), Column('bundle_type', String(63), nullable=False), # The command will be NULL", "Column('password', String(128), nullable=False), # Additional information Column('affiliation', String(255, convert_unicode=True), nullable=True),", "seconds allowed Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs", "each worker. worker_socket = Table( 'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id),", "receive Column('last_login', DateTime), # Null if user has never logged", "to current timestamp by default Column( 'date_last_modified', DateTime ), #", "worksheet created before v0.5.31; Set to current_timestamp by default UniqueConstraint('uuid',", "Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid),", "'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('user_defined',", "worksheet_item = Table( 'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint from sqlalchemy.types import", "# Who received it? Column('message', Text, nullable=False), # What's the", "nullable=False ), # Whether the worker and the server have", "Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), ) # For each child_uuid, we", "autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('code',", "Tables oauth2_client = Table( 'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "was last modified; Set to null if the worksheet created", "String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index',", "combination. # See WorkerModel for the serialization method. Column('dependencies', LargeBinary,", "nullable=False), # Whether a user is able to modify this", "compliant with RFC3696/5321 Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), #", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63),", "v0.5.31; Set to current_timestamp by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'),", "(0), read (1), or all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ", "for messages on. Column( 'shared_file_system', Boolean, nullable=False ), # Whether", "# When did the user send this query? Column('sender_user_id', String(63),", "String, Text, Unicode, ) from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata =", "MetaData, Table, UniqueConstraint from sqlalchemy.types import ( BigInteger, Boolean, DateTime,", "Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), ) # Permissions for bundles group_bundle_permission", "of allowed scopes Column('redirect_uris', Text, nullable=False), # comma-separated list of", "ForeignKey(bundle.c.uuid), because bundles can have # dependencies to bundles not", "worker last checked in with the bundle service. Column('socket_id', Integer,", "ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'),", "worker before adding the worker to the worker table. Column('socket_id',", "Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255),", "now, but it will eventually # include columns for owner,", "Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime, nullable=True), Column('key', String(64), nullable=False), )", "list of dependencies for the user/worker combination. # See WorkerModel", "bundle's value is still being computed. Column('data_hash', String(63), nullable=True), Column('state',", "group_bundle_permission = Table( 'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "Index, MetaData, Table, UniqueConstraint from sqlalchemy.types import ( BigInteger, Boolean,", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), #", "only worksheet.title uses the Unicode column type. from sqlalchemy import", "String(255), unique=True), Column('expires', DateTime, nullable=False), ) oauth2_auth_code = Table( 'oauth2_auth_code',", "bundles can have # dependencies to bundles not (yet) in", "( BigInteger, Boolean, DateTime, Enum, Float, Integer, LargeBinary, String, Text,", "UnicodeText as appropriate # This way, SQLAlchemy will automatically perform", "on? ) # Store information about workers. worker = Table(", "in Column( 'is_active', Boolean, nullable=False, default=True ), # Set to", "Index('worksheet_tag_tag_index', 'tag'), ) group = Table( 'group', db_metadata, Column( 'id',", "a user is able to modify this group. Column('is_admin', Boolean),", "jobs allowed Column('time_used', Float, nullable=False), # Number of seconds already", "the worksheet was last modified; Set to null if the", "worker_run = Table( 'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id',", "# Null if user has never logged in Column( 'is_active',", "unique=True), Column('expires', DateTime, nullable=False), ) oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata,", "Float, Integer, LargeBinary, String, Text, Unicode, ) from sqlalchemy.sql.schema import", "Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), ) user_group = Table( 'user_group', db_metadata,", "to be compliant with RFC3696/5321 Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL", "each worker. worker_run = Table( 'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id),", "# comma-separated list of allowed scopes Column('redirect_uris', Text, nullable=False), #", "# Store information about the bundles currently running on each", "'metadata_key', 'metadata_value', mysql_length=63), ) # For each child_uuid, we have:", "Integer, nullable=False ), # Number of jobs allowed to run", "run bundles. Column('command', Text, nullable=True), # The data_hash will be", "UniqueConstraint('client_id', name='uix_1'), ) oauth2_token = Table( 'oauth2_token', db_metadata, Column( 'id',", "ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid), because", "Store information about users' questions or feedback. chat = Table(", "# Receive general notifications (new features) # Store information about", "# - type = markup (value != null) # -", "String(63), nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid',", "a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded as", "0x02 # Receive general notifications (new features) # Store information", "nullable=False), Column('code', String(100), nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False),", "Number of seconds already used Column('disk_quota', Float, nullable=False), # Number", "Column('refresh_token', String(255), unique=True), Column('expires', DateTime, nullable=False), ) oauth2_auth_code = Table(", "to receive Column('last_login', DateTime), # Null if user has never", "allocated to each worker. worker_socket = Table( 'worker_socket', db_metadata, Column('user_id',", "to run on worker. Column('is_terminating', Boolean, nullable=False), ) # Store", "Boolean, nullable=False, default=False), Column('is_superuser', Boolean, nullable=False, default=False), Column('password', String(128), nullable=False),", "Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text, nullable=False),", "we have: key = child_path, target = (parent_uuid, parent_path) bundle_dependency", "# Store information about users. user = Table( 'user', db_metadata,", "nullable=True ), # Short human-readable description of the worksheet Column(", "Column( 'title', Unicode(255), nullable=True ), # Short human-readable description of", "the sender is on? Column( 'bundle_uuid', String(63), nullable=True ), #", "# What's the content of the chat? Column( 'worksheet_uuid', String(63),", "# - type = directive (value != null) # Deliberately", "String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'],", "String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),", "columns with Unicode and UnicodeText as appropriate # This way,", "primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A", "sender is on? ) # Store information about workers. worker", "OAuth2 Tables oauth2_client = Table( 'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "Column('owner_id', String(255), nullable=True), Column( 'title', Unicode(255), nullable=True ), # Short", "of GPUs on worker. Column('memory_bytes', BigInteger, nullable=False), # Total memory", "sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata = MetaData() bundle = Table( 'bundle',", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid',", "), # Number of jobs allowed to run on worker.", "name, description, etc. bundle_metadata = Table( 'bundle_metadata', db_metadata, Column( 'id',", "manager. ) # Includes things like name, description, etc. bundle_metadata", "default=True ), # Set to False instead of deleting users", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63),", "shared filesystem. Column( 'tag_exclusive', Boolean, nullable=False ), # Whether worker", "= Table( 'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127),", "Index('worksheet_owner_index', 'owner_id'), ) worksheet_item = Table( 'worksheet_item', db_metadata, Column( 'id',", "(email) verification keys user_verification = Table( 'user_verification', db_metadata, Column( 'id',", "Table( 'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "Column('has_access', Boolean, default=False, nullable=True), Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser', Boolean,", "Table( 'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "Column('name', String(63), nullable=True), Column('secret', String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True),", "appropriate database engine-specific data types for Unicode # data. Currently,", "Column('secret', String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum(\"authorization_code\",", "CPUs on worker. Column('gpus', Integer, nullable=False), # Number of GPUs", "in with the bundle service. Column('socket_id', Integer, nullable=False), # Socket", "(2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02", "The data_hash will be NULL if the bundle's value is", "nullable=False, autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False),", "created; Set to null if the worksheet created before v0.5.31;", "'title', Unicode(255), nullable=True ), # Short human-readable description of the", "Table( 'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "Basic information Column('user_id', String(63), nullable=False), Column('user_name', String(63), nullable=False, unique=True), Column(", "by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item", "Text, nullable=False), # TODO: make this nullable Column('type', String(20), nullable=False),", "Column( 'frozen', DateTime, nullable=True ), # When the worksheet was", "nullable=False, autoincrement=True, ), # Basic information Column('user_id', String(63), nullable=False), Column('user_name',", ") # Permissions for worksheets group_object_permission = Table( 'group_object_permission', db_metadata,", "String(100), nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False), ) #", "= Table( 'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "Table( 'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "nullable=False, autoincrement=True, ), # Primary key Column('time', DateTime, nullable=False), #", "Column('url', String(255, convert_unicode=True), nullable=True), # Quotas Column('time_quota', Float, nullable=False), #", "['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), ) #", "the bundle manager. ) # Includes things like name, description,", "nullable=False), # Number of bytes allowed Column('disk_used', Float, nullable=False), #", "# Who sent it? Column('recipient_user_id', String(63), nullable=True), # Who received", "nullable=False), # Reference to a worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid),", "), # Whether the worker and the server have a", "), # What is the id of the bundle that", "Table( 'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "nullable=False), ) # OAuth2 Tables oauth2_client = Table( 'oauth2_client', db_metadata,", "nullable=False ), # When the worker last checked in with", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id),", "immutable) if it is. Column('is_anonymous', Boolean, nullable=False, default=False), Column( 'date_created',", "Table( 'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False),", "Null if user has never logged in Column( 'is_active', Boolean,", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),", "Replace String and Text columns with Unicode and UnicodeText as", "for the CodaLab bundle system tables. \"\"\" # TODO: Replace", "Table( 'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "String(63), nullable=True), # Who sent it? Column('recipient_user_id', String(63), nullable=True), #", "# Number of seconds already used Column('disk_quota', Float, nullable=False), #", ") # Worksheet tags worksheet_tag = Table( 'worksheet_tag', db_metadata, Column(", "system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text, nullable=False),", "), Column('response_type', Enum(\"code\", \"token\"), nullable=False), Column('scopes', Text, nullable=False), # comma-separated", "Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet tags worksheet_tag = Table( 'worksheet_tag',", "description, etc. bundle_metadata = Table( 'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "null) # - type = directive (value != null) #", "= Table( 'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "Available disk space on worker. Column( 'checkin_time', DateTime, nullable=False ),", "nullable=False), Column('name', String(255), nullable=False), Column('owner_id', String(255), nullable=True), Column( 'title', Unicode(255),", "Column('date_sent', DateTime, nullable=True), Column('key', String(64), nullable=False), ) # Stores password", "Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), )", "Boolean, nullable=False, default=False), Column('password', String(128), nullable=False), # Additional information Column('affiliation',", "Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), ) # For each", "= 0x02 # Receive general notifications (new features) # Store", ") # A permission value is one of the following:", "information about users. user = Table( 'user', db_metadata, Column( 'id',", "- type = markup (value != null) # - type", "# Number of bytes allowed Column('disk_used', Float, nullable=False), # Number", "# Socket ID worker listens for messages on. Column( 'shared_file_system',", "Column('command', Text, nullable=True), # The data_hash will be NULL if", "Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet item is either:", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Basic information Column('user_id',", "specific workers. Column('cpus', Integer, nullable=False), # Number of CPUs on", "and the server have a shared filesystem. Column( 'tag_exclusive', Boolean,", "), # Which emails user wants to receive Column('last_login', DateTime),", "Integer, LargeBinary, String, Text, Unicode, ) from sqlalchemy.sql.schema import ForeignKeyConstraint", "create a socket # for the worker before adding the", "the system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text,", "default=NOTIFICATIONS_GENERAL ), # Which emails user wants to receive Column('last_login',", "on worker. Column('gpus', Integer, nullable=False), # Number of GPUs on", "worker table so that we can create a socket #", "command will be NULL except for run bundles. Column('command', Text,", "'user_id'), ) # Permissions for bundles group_bundle_permission = Table( 'group_bundle_permission',", "ForeignKey(\"user.user_id\"), nullable=False), # Whether a user is able to modify", "Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded as integer (see", "comma-separated list of allowed scopes Column('redirect_uris', Text, nullable=False), # comma-separated", "integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False),", "Table( 'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), ) # Stores (email)", "String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'),", "bundle_metadata = Table( 'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "'tag'), ) group = Table( 'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "autoincrement=True, ), # Primary key Column('time', DateTime, nullable=False), # When", "# Needed for the bundle manager. ) # Includes things", "= Table( 'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token', String(255), unique=True), Column('expires', DateTime, nullable=False),", "# encoding, or use appropriate database engine-specific data types for", "for run bundles. Column('command', Text, nullable=True), # The data_hash will", "# The data_hash will be NULL if the bundle's value", "runs on specific workers. Column('cpus', Integer, nullable=False), # Number of", "the bundle's value is still being computed. Column('data_hash', String(63), nullable=True),", "'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False), ) # Store", "to False instead of deleting users to maintain foreign key", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), #", "notifications (new features) # Store information about users. user =", "Text columns with Unicode and UnicodeText as appropriate # This", "table does not have many columns now, but it will", "Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text, nullable=False), # TODO: make this", "either: # - type = bundle (bundle_uuid != null) #", "nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id),", "to a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime, nullable=True),", "primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized list of", "0x00 # Receive no notifications NOTIFICATIONS_IMPORTANT = 0x01 # Receive", "of bytes already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'),", "name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item = Table( 'worksheet_item',", "), # Primary key Column('time', DateTime, nullable=False), # When did", "# Available disk space on worker. Column( 'checkin_time', DateTime, nullable=False", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63),", "'is_active', Boolean, nullable=False, default=True ), # Set to False instead", "nullable=False), # Reference to a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), # No foreign", "ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key',", "String(63), nullable=True), Column('value', Text, nullable=False), # TODO: make this nullable", "worksheet created before v0.5.31; Set to current timestamp by default", "on each worker. worker_dependency = Table( 'worker_dependency', db_metadata, Column('user_id', String(63),", "'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "owner, group, permissions, etc. worksheet = Table( 'worksheet', db_metadata, Column(", "key Column('time', DateTime, nullable=False), # When did the user send", "about workers. worker = Table( 'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id),", "Set to null if the worksheet created before v0.5.31; Set", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id',", "DateTime, nullable=True), Column('key', String(64), nullable=False), ) # Stores password reset", "Permissions encoded as integer (see below) Column('permission', Integer, nullable=False), )", "Additional information Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True), nullable=True),", "Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item = Table( 'worksheet_item', db_metadata,", "primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True), #", "'user_name'), UniqueConstraint('user_id', name='uix_1'), ) # Stores (email) verification keys user_verification", "general notifications (new features) # Store information about users. user", "Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), # Which emails user", "worksheet was frozen (forever immutable) if it is. Column('is_anonymous', Boolean,", "'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id',", "user_group = Table( 'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "bundles and worksheets not (yet) in the system. Column('bundle_uuid', String(63),", "below) Column('permission', Integer, nullable=False), ) # Permissions for worksheets group_object_permission", "this query? Column('sender_user_id', String(63), nullable=True), # Who sent it? Column('recipient_user_id',", "listens for messages on. Column( 'shared_file_system', Boolean, nullable=False ), #", "# Receive only important notifications NOTIFICATIONS_GENERAL = 0x02 # Receive", "ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token', String(255),", "is still being computed. Column('data_hash', String(63), nullable=True), Column('state', String(63), nullable=False),", "nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created',", "system. Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text, nullable=False), ) # The", "if it is. Column('is_anonymous', Boolean, nullable=False, default=False), Column( 'date_created', DateTime", "name='uix_1'), ) # Stores (email) verification keys user_verification = Table(", "that allows for scheduling runs on specific workers. Column('cpus', Integer,", "notifications NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new features)", "# Which emails user wants to receive Column('last_login', DateTime), #", "), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'),", "worker. Column( 'checkin_time', DateTime, nullable=False ), # When the worker", "Set to False instead of deleting users to maintain foreign", "Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id',", "features) # Store information about users. user = Table( 'user',", "worksheet Column( 'frozen', DateTime, nullable=True ), # When the worksheet", "scheduling runs on specific workers. Column('cpus', Integer, nullable=False), # Number", "See WorkerModel for the serialization method. Column('dependencies', LargeBinary, nullable=False), )", "the user send this query? Column('sender_user_id', String(63), nullable=True), # Who", "'shared_file_system', Boolean, nullable=False ), # Whether the worker and the", "Reference to a worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), #", "each child_uuid, we have: key = child_path, target = (parent_uuid,", "Column('scopes', Text, nullable=False), # comma-separated list of allowed scopes Column('redirect_uris',", "id of the worksheet that the sender is on? Column(", "'email', String(254), nullable=False, unique=True ), # Length of 254 to", "notifications value is one of the following: NOTIFICATIONS_NONE = 0x00", "encoded as integer (see below) Column('permission', Integer, nullable=False), ) #", "tags worksheet_tag = Table( 'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "# Store information about users' questions or feedback. chat =", "group_object_permission = Table( 'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "nullable=True), # Who received it? Column('message', Text, nullable=False), # What's", "value is one of the following: none (0), read (1),", "Integer, nullable=False), # Number of GPUs on worker. Column('memory_bytes', BigInteger,", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid),", "Column('recipient_user_id', String(63), nullable=True), # Who received it? Column('message', Text, nullable=False),", "that we can create a socket # for the worker", "primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), nullable=False), Column('name', String(63), nullable=True),", "primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime,", "UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), # Needed for the", "String(63), nullable=True), Column('secret', String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column(", "group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), ) # Permissions", "= 0x01 # Receive only important notifications NOTIFICATIONS_GENERAL = 0x02", "or all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL", "worker_socket = Table( 'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id',", "nullable=False), Column('has_access', Boolean, default=False, nullable=True), Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser',", "modify this group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), )", "DateTime, nullable=True ), # When the worksheet was frozen (forever", "Boolean, nullable=False ), # Whether the worker and the server", "Primary key Column('time', DateTime, nullable=False), # When did the user", "'run_uuid'), ) # Store information about the dependencies available on", "with Unicode and UnicodeText as appropriate # This way, SQLAlchemy", "'subworksheet_uuid'), ) # Worksheet tags worksheet_tag = Table( 'worksheet_tag', db_metadata,", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),", "worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded as", "all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL =", "# Whether the worker and the server have a shared", "Table( 'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", ") # Permissions for bundles group_bundle_permission = Table( 'group_bundle_permission', db_metadata,", "Who received it? Column('message', Text, nullable=False), # What's the content", "CodaLab bundle system tables. \"\"\" # TODO: Replace String and", "Includes things like name, description, etc. bundle_metadata = Table( 'bundle_metadata',", "Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet tags", "about the bundles currently running on each worker. worker_run =", "default=False), Column('password', String(128), nullable=False), # Additional information Column('affiliation', String(255, convert_unicode=True),", "Column('permission', Integer, nullable=False), ) # Permissions for worksheets group_object_permission =", "it? Column('recipient_user_id', String(63), nullable=True), # Who received it? Column('message', Text,", "in the system. Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text, nullable=False), )", ") # The worksheet table does not have many columns", "Column('client_id', String(63), nullable=False), Column('name', String(63), nullable=True), Column('secret', String(255), nullable=True), Column('user_id',", "Column('uuid', String(63), nullable=False), Column('bundle_type', String(63), nullable=False), # The command will", "Text, nullable=False), ) # The worksheet table does not have", "# Reference to a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), #", "read (1), or all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ =", "if the bundle's value is still being computed. Column('data_hash', String(63),", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid),", "0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 # A notifications", "nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group = Table( 'group',", "Socket ID worker listens for messages on. Column( 'shared_file_system', Boolean,", "nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have #", "reset codes user_reset_code = Table( 'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True), # Tag that allows", "to each worker. worker_socket = Table( 'worker_socket', db_metadata, Column('user_id', String(63),", "'group_uuid'), Index('user_id_index', 'user_id'), ) # Permissions for bundles group_bundle_permission =", "user_verification = Table( 'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "to and from UTF-8 # encoding, or use appropriate database", "autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value',", "# TODO: make this nullable Column('type', String(20), nullable=False), Column('sort_key', Integer,", ") # Store information about the dependencies available on each", "and only if they match tags. Column( 'exit_after_num_runs', Integer, nullable=False", "String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index',", "on each worker. worker_run = Table( 'worker_run', db_metadata, Column('user_id', String(63),", "\"password\", \"client_credentials\", \"refresh_token\"), nullable=False, ), Column('response_type', Enum(\"code\", \"token\"), nullable=False), Column('scopes',", "table so that we can create a socket # for", "Table( 'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", ") user_group = Table( 'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "information about users' questions or feedback. chat = Table( 'chat',", "= worksheet (subworksheet_uuid != null) # - type = markup", "being computed. Column('data_hash', String(63), nullable=True), Column('state', String(63), nullable=False), Column('owner_id', String(255),", "!= null) # Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain", "Column('time_used', Float, nullable=False), # Number of seconds already used Column('disk_quota',", "= child_path, target = (parent_uuid, parent_path) bundle_dependency = Table( 'bundle_dependency',", "Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), ) # Stores (email) verification keys", "data. Currently, only worksheet.title uses the Unicode column type. from", "on. Column( 'shared_file_system', Boolean, nullable=False ), # Whether the worker", "sent it? Column('recipient_user_id', String(63), nullable=True), # Who received it? Column('message',", "and worksheets not (yet) in the system. Column('bundle_uuid', String(63), nullable=True),", "primary_key=True, nullable=False, autoincrement=True, ), # Basic information Column('user_id', String(63), nullable=False),", "# Set to False instead of deleting users to maintain", "nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False),", "primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63),", "= (parent_uuid, parent_path) bundle_dependency = Table( 'bundle_dependency', db_metadata, Column( 'id',", "Number of CPUs on worker. Column('gpus', Integer, nullable=False), # Number", "human-readable description of the worksheet Column( 'frozen', DateTime, nullable=True ),", "ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized list of dependencies for", "(value != null) # - type = directive (value !=", "String(128), nullable=False), # Additional information Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url',", "), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text,", "(bundle_uuid != null) # - type = worksheet (subworksheet_uuid !=", "this nullable Column('type', String(20), nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'),", "String(255, convert_unicode=True), nullable=True), # Quotas Column('time_quota', Float, nullable=False), # Number", "list of allowed scopes Column('redirect_uris', Text, nullable=False), # comma-separated list", "nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'),", "allowed to run on worker. Column('is_terminating', Boolean, nullable=False), ) #", "Column('parent_path', Text, nullable=False), ) # The worksheet table does not", "nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), # Needed", "), # When the worksheet was last modified; Set to", "ForeignKey(group.c.uuid), nullable=False), # Reference to a worksheet object Column('object_uuid', String(63),", "Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid',", "bundle = Table( 'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "\"token\"), nullable=False), Column('scopes', Text, nullable=False), # comma-separated list of allowed", "# This way, SQLAlchemy will automatically perform conversions to and", "String(63), nullable=False), Column('name', String(255), nullable=False), Column('owner_id', String(255), nullable=True), Column( 'title',", "key = child_path, target = (parent_uuid, parent_path) bundle_dependency = Table(", "Who sent it? Column('recipient_user_id', String(63), nullable=True), # Who received it?", "allows for scheduling runs on specific workers. Column('cpus', Integer, nullable=False),", "= Table( 'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "like name, description, etc. bundle_metadata = Table( 'bundle_metadata', db_metadata, Column(", "worker runs bundles if and only if they match tags.", "UniqueConstraint from sqlalchemy.types import ( BigInteger, Boolean, DateTime, Enum, Float,", "appropriate # This way, SQLAlchemy will automatically perform conversions to", "the worksheet that the sender is on? Column( 'bundle_uuid', String(63),", "'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('code', String(100), nullable=False),", "Receive only important notifications NOTIFICATIONS_GENERAL = 0x02 # Receive general", "= directive (value != null) # Deliberately omit ForeignKey(bundle.c.uuid), because", "nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), ) #", "# Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain # bundles", "user wants to receive Column('last_login', DateTime), # Null if user", "= Table( 'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "workers. worker = Table( 'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True,", "Column('redirect_uri', String(255), nullable=False), ) # Store information about users' questions", "# Additional information Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True),", "worker to the worker table. Column('socket_id', Integer, primary_key=True, nullable=False), )", "Needed for the bundle manager. ) # Includes things like", "nullable=True), # The data_hash will be NULL if the bundle's", "# TODO: Replace String and Text columns with Unicode and", "worksheet was created; Set to null if the worksheet created", "# A worksheet item is either: # - type =", "# - type = worksheet (subworksheet_uuid != null) # -", "group = Table( 'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "'state'), # Needed for the bundle manager. ) # Includes", "# Primary key Column('time', DateTime, nullable=False), # When did the", "nullable=False), Column('owner_id', String(255), nullable=True), Column( 'title', Unicode(255), nullable=True ), #", "only if they match tags. Column( 'exit_after_num_runs', Integer, nullable=False ),", "'exit_after_num_runs', Integer, nullable=False ), # Number of jobs allowed to", "String(63), nullable=False), Column('name', String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True),", "autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a", "BigInteger, nullable=False), # Total memory of worker. Column('free_disk_bytes', BigInteger, nullable=True),", "following: NOTIFICATIONS_NONE = 0x00 # Receive no notifications NOTIFICATIONS_IMPORTANT =", "ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True), # Tag that allows for", "never logged in Column( 'is_active', Boolean, nullable=False, default=True ), #", "nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index',", "# Serialized list of dependencies for the user/worker combination. #", "they match tags. Column( 'exit_after_num_runs', Integer, nullable=False ), # Number", "# Includes things like name, description, etc. bundle_metadata = Table(", "from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata = MetaData() bundle = Table(", "Column('bundle_type', String(63), nullable=False), # The command will be NULL except", "default=False), Column('is_superuser', Boolean, nullable=False, default=False), Column('password', String(128), nullable=False), # Additional", "Integer, nullable=False), # Socket ID worker listens for messages on.", "Float, nullable=False), # Number of seconds already used Column('disk_quota', Float,", "(subworksheet_uuid != null) # - type = markup (value !=", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), nullable=False),", "nullable=False), ) oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id),", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag',", "String(255), nullable=False), Column('owner_id', String(255), nullable=True), Column( 'title', Unicode(255), nullable=True ),", "String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), # No foreign key", "# Permissions for bundles group_bundle_permission = Table( 'group_bundle_permission', db_metadata, Column(", "nullable=False), Column('scopes', Text, nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token', String(255), unique=True),", "Column('child_path', Text, nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid), because bundles can", "Column('name', String(255), nullable=False), Column('owner_id', String(255), nullable=True), Column( 'title', Unicode(255), nullable=True", "allowed scopes Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed", "What's the content of the chat? Column( 'worksheet_uuid', String(63), nullable=True", "# Permissions for worksheets group_object_permission = Table( 'group_object_permission', db_metadata, Column(", "String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('code', String(64), nullable=False), )", "Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index',", "default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item =", "String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token',", ") # Store information about workers. worker = Table( 'worker',", "GPUs on worker. Column('memory_bytes', BigInteger, nullable=False), # Total memory of", "value is one of the following: NOTIFICATIONS_NONE = 0x00 #", "Text, nullable=False), # What's the content of the chat? Column(", "nullable=True), Column('state', String(63), nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False,", "\"client_credentials\", \"refresh_token\"), nullable=False, ), Column('response_type', Enum(\"code\", \"token\"), nullable=False), Column('scopes', Text,", "String(63), nullable=True ), # What is the id of the", "nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False),", "ID worker listens for messages on. Column( 'shared_file_system', Boolean, nullable=False", "# for the worker before adding the worker to the", "feedback. chat = Table( 'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), ) # Store information about the", "nullable=False), # Total memory of worker. Column('free_disk_bytes', BigInteger, nullable=True), #", "# Basic information Column('user_id', String(63), nullable=False), Column('user_name', String(63), nullable=False, unique=True),", "nullable=False), Column('child_path', Text, nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid), because bundles", "String(63), nullable=False), # The command will be NULL except for", "type = worksheet (subworksheet_uuid != null) # - type =", "= Table( 'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "'date_created', DateTime ), # When the worksheet was created; Set", "already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), ) #", "not (yet) in the system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63),", "String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid', String(63),", "ForeignKey(bundle.c.uuid), because worksheets can contain # bundles and worksheets not", "= 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 # A", "= Table( 'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "Table( 'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127),", "nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag',", "Column('date_created', DateTime, nullable=False), Column('code', String(64), nullable=False), ) # OAuth2 Tables", "currently running on each worker. worker_run = Table( 'worker_run', db_metadata,", "# Store information about workers. worker = Table( 'worker', db_metadata,", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), nullable=False), Column('name', String(63),", "Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True), # Tag that", "about the dependencies available on each worker. worker_dependency = Table(", "nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('owner_id',", "Column( 'tag_exclusive', Boolean, nullable=False ), # Whether worker runs bundles", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id',", "primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False),", "group, permissions, etc. worksheet = Table( 'worksheet', db_metadata, Column( 'id',", "nullable=False), # Number of bytes already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index',", "Column( 'email', String(254), nullable=False, unique=True ), # Length of 254", "does not have many columns now, but it will eventually", "'frozen', DateTime, nullable=True ), # When the worksheet was frozen", "db_metadata = MetaData() bundle = Table( 'bundle', db_metadata, Column( 'id',", "= Table( 'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "No foreign key constraint on the worker table so that", "# Tag that allows for scheduling runs on specific workers.", "the following: none (0), read (1), or all (2). GROUP_OBJECT_PERMISSION_NONE", "'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item = Table( 'worksheet_item', db_metadata, Column(", "default=False), Column( 'date_created', DateTime ), # When the worksheet was", "Text, nullable=False), Column('code', String(100), nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255),", "nullable=True), # Available disk space on worker. Column( 'checkin_time', DateTime,", "BigInteger, Boolean, DateTime, Enum, Float, Integer, LargeBinary, String, Text, Unicode,", "have a shared filesystem. Column( 'tag_exclusive', Boolean, nullable=False ), #", "String(255), nullable=True), Column( 'title', Unicode(255), nullable=True ), # Short human-readable", ") # Stores password reset codes user_reset_code = Table( 'user_reset_code',", "runs bundles if and only if they match tags. Column(", "nullable=False, default=NOTIFICATIONS_GENERAL ), # Which emails user wants to receive", "the worker and the server have a shared filesystem. Column(", ") oauth2_token = Table( 'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "emails user wants to receive Column('last_login', DateTime), # Null if", "Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False), # Whether a user is able", "String(64), nullable=False), ) # Stores password reset codes user_reset_code =", "Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"), nullable=False, ), Column('response_type', Enum(\"code\", \"token\"), nullable=False),", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid',", "String(63), nullable=False), Column('parent_path', Text, nullable=False), ) # The worksheet table", "ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded as integer (see below) Column('permission',", "object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded as integer", "= Table( 'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False,", "Unicode # data. Currently, only worksheet.title uses the Unicode column", "NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new features) #", "worker. Column('is_terminating', Boolean, nullable=False), ) # Store information about all", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Primary key Column('time',", "(yet) in the system. Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text, nullable=False),", "Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index',", "frozen (forever immutable) if it is. Column('is_anonymous', Boolean, nullable=False, default=False),", "id of the bundle that the sender is on? )", "autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('owner_id', String(255),", "# When the worksheet was last modified; Set to null", "), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False), #", "ForeignKeyConstraint db_metadata = MetaData() bundle = Table( 'bundle', db_metadata, Column(", "!= null) # - type = markup (value != null)", "users to maintain foreign key integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name',", "received it? Column('message', Text, nullable=False), # What's the content of", "False instead of deleting users to maintain foreign key integrity", "nullable=False), Column('user_id', String(63), ForeignKey(\"user.user_id\"), nullable=False), # Whether a user is", "nullable=True), Column('tag', Text, nullable=True), # Tag that allows for scheduling", "worksheet was last modified; Set to null if the worksheet", "Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), ) # Permissions for", "bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded as integer", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Basic information", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), nullable=False), Column('name',", "last modified; Set to null if the worksheet created before", "(see below) Column('permission', Integer, nullable=False), ) # Permissions for worksheets", "Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized", "String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime, nullable=True), Column('key',", "autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index',", "254 to be compliant with RFC3696/5321 Column( 'notifications', Integer, nullable=False,", "chat = Table( 'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True,", "nullable=False), # Additional information Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url', String(255,", "system tables. \"\"\" # TODO: Replace String and Text columns", "about users. user = Table( 'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id',", "bytes allowed Column('disk_used', Float, nullable=False), # Number of bytes already", "Enum, Float, Integer, LargeBinary, String, Text, Unicode, ) from sqlalchemy.sql.schema", "), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id',", "Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded as integer (see", "last checked in with the bundle service. Column('socket_id', Integer, nullable=False),", "'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), )", "What is the id of the worksheet that the sender", "match tags. Column( 'exit_after_num_runs', Integer, nullable=False ), # Number of", "type = directive (value != null) # Deliberately omit ForeignKey(bundle.c.uuid),", "worker_dependency = Table( 'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),", "# Number of parallel jobs allowed Column('time_used', Float, nullable=False), #", "db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid',", "Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs allowed Column('time_used',", "ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('code', String(100), nullable=False), Column('expires', DateTime,", "ForeignKey, Index, MetaData, Table, UniqueConstraint from sqlalchemy.types import ( BigInteger,", "the worksheet created before v0.5.31; Set to current_timestamp by default", "will automatically perform conversions to and from UTF-8 # encoding,", "'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid),", "nullable=False), ) # A permission value is one of the", "all sockets currently allocated to each worker. worker_socket = Table(", "Enum(\"code\", \"token\"), nullable=False), Column('scopes', Text, nullable=False), # comma-separated list of", "etc. worksheet = Table( 'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "or feedback. chat = Table( 'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "if user has never logged in Column( 'is_active', Boolean, nullable=False,", "nullable=False), # comma-separated list of allowed scopes Column('redirect_uris', Text, nullable=False),", "# Store information about the dependencies available on each worker.", "query? Column('sender_user_id', String(63), nullable=True), # Who sent it? Column('recipient_user_id', String(63),", "Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), ) # For", "String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded as integer (see below)", "already used Column('disk_quota', Float, nullable=False), # Number of bytes allowed", "Store information about workers. worker = Table( 'worker', db_metadata, Column('user_id',", "String(255, convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True), nullable=True), # Quotas Column('time_quota',", "nullable=False), # comma-separated list of allowed redirect URIs UniqueConstraint('client_id', name='uix_1'),", "String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet item is either: #", "# bundles and worksheets not (yet) in the system. Column('bundle_uuid',", "nullable=False), ) # Permissions for worksheets group_object_permission = Table( 'group_object_permission',", "Number of parallel jobs allowed Column('time_used', Float, nullable=False), # Number", "will be NULL except for run bundles. Column('command', Text, nullable=True),", "Table( 'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True,", "the chat? Column( 'worksheet_uuid', String(63), nullable=True ), # What is", "), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('owner_id', String(255), nullable=True),", "nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized list of dependencies", "Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker. Column('free_disk_bytes', BigInteger,", "TODO: make this nullable Column('type', String(20), nullable=False), Column('sort_key', Integer, nullable=True),", "Column( 'worksheet_uuid', String(63), nullable=True ), # What is the id", "# What is the id of the worksheet that the", "Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), # Needed for the bundle manager.", "it is. Column('is_anonymous', Boolean, nullable=False, default=False), Column( 'date_created', DateTime ),", "RFC3696/5321 Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), # Which emails", "Set to current_timestamp by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index',", "# Number of bytes already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'),", "conversions to and from UTF-8 # encoding, or use appropriate", "of seconds already used Column('disk_quota', Float, nullable=False), # Number of", "nullable=False, unique=True ), # Length of 254 to be compliant", "nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'),", "a worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded", "# Stores (email) verification keys user_verification = Table( 'user_verification', db_metadata,", "String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),", "String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum(\"authorization_code\", \"password\", \"client_credentials\", \"refresh_token\"), nullable=False,", "nullable=False), # A worksheet item is either: # - type", "users. user = Table( 'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "# Whether worker runs bundles if and only if they", "Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), ) # Store information", "# Whether a user is able to modify this group.", "ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('code',", "Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text,", "information Column('user_id', String(63), nullable=False), Column('user_name', String(63), nullable=False, unique=True), Column( 'email',", "disk space on worker. Column( 'checkin_time', DateTime, nullable=False ), #", "BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),", "primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63),", "Column('is_superuser', Boolean, nullable=False, default=False), Column('password', String(128), nullable=False), # Additional information", "), # Whether worker runs bundles if and only if", "String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded as integer (see below)", "nullable=False ), # Number of jobs allowed to run on", "(yet) in the system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True),", "nullable=False), # Number of parallel jobs allowed Column('time_used', Float, nullable=False),", "user send this query? Column('sender_user_id', String(63), nullable=True), # Who sent", "Column('message', Text, nullable=False), # What's the content of the chat?", "# - type = bundle (bundle_uuid != null) # -", "Quotas Column('time_quota', Float, nullable=False), # Number of seconds allowed Column('parallel_run_quota',", "'owner_id'), ) user_group = Table( 'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer,", "), # Basic information Column('user_id', String(63), nullable=False), Column('user_name', String(63), nullable=False,", "\"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), # Primary key Column('time', DateTime,", "nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text, nullable=False), # TODO: make", "bytes already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), )", "worksheets group_object_permission = Table( 'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"),", "Text, nullable=False), # comma-separated list of allowed redirect URIs UniqueConstraint('client_id',", "filesystem. Column( 'tag_exclusive', Boolean, nullable=False ), # Whether worker runs", "'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ),", "Reference to a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions", "currently allocated to each worker. worker_socket = Table( 'worker_socket', db_metadata,", "objects for the CodaLab bundle system tables. \"\"\" # TODO:", "nullable=False), # What's the content of the chat? Column( 'worksheet_uuid',", "Column( 'id', BigInteger().with_variant(Integer, \"sqlite\"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63),", "target = (parent_uuid, parent_path) bundle_dependency = Table( 'bundle_dependency', db_metadata, Column(", "Column('socket_id', Integer, primary_key=True, nullable=False), ) # Store information about the", "= bundle (bundle_uuid != null) # - type = worksheet", "primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('bundle_type', String(63), nullable=False),", "Length of 254 to be compliant with RFC3696/5321 Column( 'notifications',", "nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), ) user_group =", "to null if the worksheet created before v0.5.31; Set to", "of the worksheet that the sender is on? Column( 'bundle_uuid',", "the id of the bundle that the sender is on?", "nullable=True ), # When the worksheet was frozen (forever immutable)", "# Number of jobs allowed to run on worker. Column('is_terminating',", "questions or feedback. chat = Table( 'chat', db_metadata, Column( 'id',", "ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id',", "will be NULL if the bundle's value is still being" ]
[ "protobuf options: host: description: - Target host FQDN or IP", "not in cPath: # List entry does not exist =>", "seconds, the amount of time to wait when trying to", "input['subscription']: item['path'] = self._encodeXpath(item['path']) # Extract duration from input attributes", "TLS subject or subjectAltName (only in the case secure connections", "to interact with the gNMI service. - OpenConfig gNMI specification", "data that is requested: ALL, CONFIG, STATE prefix (str): Path", "cases, when the FQDN or IPv4 address that is used", "from the CLI via the C(--user) or C(-u) options. ini:", "This method will establish the persistent gRPC connection, if not", "vars: - name: ansible_root_certificates_file certificate_chain_file: description: - The PEM encoded", "connection to host %s already exist' % self._target) return grpcEnv", "if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in", "= os.path.join(entry, filename) break if os.path.isfile(filename): try: with open(filename, 'rb')", "for gRPC To use gRPC connections in Ansible one (or", "dict) else val for val in aDict[key].values()] del aDict[key] else:", "without content (no value) => skip continue elif ('path' in", "be a dict\") for key in grpcEnv: if grpcEnv[key]: os.environ[key]", "as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE':", "return before the timeout exceed, an error is generated and", "dKeys: entry['key'] = dKeys mypath.append(entry) return {'elem': mypath} return {}", "Reads a binary certificate/key file Parameters: optionName(str): used to read", "Configures the user password used to authenticate to the remote", "has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection,", "environment settings are not passed to the client process that", "excpetions \"\"\" path = self.get_option('certificate_path') if not path: path =", "\"\"\" Encodes XPATH to dict representation that allows conversion to", "has established successfully') def close(self): \"\"\" Closes the active gRPC", "self.queue_message('v', 'Starting secure gRPC connection') creds = grpc.ssl_channel_credentials(**certs) self._channel =", "\"\"\" Connection plugin for gRPC To use gRPC connections in", "x in eKeys) if dKeys: entry['key'] = dKeys mypath.append(entry) return", "to host %s already exist' % self._target) return grpcEnv =", "gnmi short_description: Provides a persistent gRPC connection for gNMI API", "update break elif 'update' not in entry: # Ignore: entry", "Path Element exists => Change Context cPath = cPath[eleName] if", "update (list): Path/Value pairs to be updated replace (list): Path/Value", "= response.gNMI_version self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding", "in elem['key'].items(): tmp += \"[%s=%s]\" % (k, v) result.append(tmp) return", "(str): Mode of subscription (STREAM, ONCE) subscription (list of dict):", "by the gRPC server. vars: - name: ansible_grpc_environment persistent_connect_timeout: type:", "subject or subjectAltName (only in the case secure connections are", "exist or read excpetions \"\"\" path = self.get_option('certificate_path') if not", "the gNMI service. - OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI", "(XPATH syntax) update (list): Path/Value pairs to be updated replace", "C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need to add", "Must be either JSON or JSON_IETF - If not provided,", "else: if isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key]) return aDict def", "- section: grpc_connection key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars:", "\"\" for elem in path['elem']: tmp = elem['name'] if 'key'", "(list): Paths (str) to be deleted Returns: str: SetResponse message", "raise AnsibleConnectionFailure(\"%s\" % e) output = json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat()", "(fallback). Parameters: prefix (str): Path prefix that is added to", "eKeys) if dKeys: entry['key'] = dKeys mypath.append(entry) return {'elem': mypath}", "syntax) paths (list): List of paths (str) to be captured", "self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self, rawData): result = {} for", "- name: ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding grpc_channel_options: description: -", "def _dictToList(self, aDict): for key in aDict.keys(): if key.startswith('___'): aDict[key[3:]]", "to dict representation that allows conversion to gnmi_pb.Path object Parameters:", "requirements: - grpcio - protobuf options: host: description: - Target", "(bool): Aggregate elements marked as eligible for aggregation Returns: str:", "parameters to match specification for gNMI SetRequest if 'prefix' in", "elif 'timestamp' not in entry: # Subscribe response, enter update", "= json_format.MessageToDict(msg) if 'syncResponse' in entry: # Ignore: SyncResponse is", "added to all paths (XPATH syntax) mode (str): Mode of", "e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE", "has more than just a single entry, # we need", "filename.startswith('~'): filename = os.path.expanduser(filename) if not filename.startswith('/'): for entry in", "value result = self._decodeVal(_upd['val']) prfx = result continue # If", "if port is None else '%s:%d' % (host, port) self._timeout", "prfx_elements cPath = result else: # No path at all,", "name: ansible_host port: type: int description: - Specifies the port", "path string using XPATH syntax Returns: (dict): path dict using", "gnmi_pb2.CapabilityRequest() auth = self._login_credentials try: response = self._stub.Capabilities(request, metadata=auth) except", "section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: -", "https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin provides a persistent communication channel", "os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry, filename) break if os.path.isfile(filename): try:", "Get, Set, Subscribe) requirements: - grpcio - protobuf options: host:", "\"\"\" Encodes value to dict representation that allows conversion to", "and ('elem' in entry['prefix']): prfx_elements = entry['prefix']['elem'] else: prfx_elements =", "'___'+eleName # Path Element has key => must be list()", "continue elif ('path' in _upd) and ('elem' in _upd['path']): path_elements", "in rawData: entry = json_format.MessageToDict(msg) if 'syncResponse' in entry: #", "gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion =", "json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible gNMI plugin does not support encoding", "- Set C(HTTPS_PROXY) to specify your proxy settings (if needed).", "set input = dict(filter(lambda x: x[1], kwargs.items())) # Backup options", "if eleKey not in prfx: # List entry does not", "successfully') def close(self): \"\"\" Closes the active gRPC connection to", "return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise", "path_elements is the leaf element # that needs to be", "username of the logged in user. - Can be configured", "gNMI stub will be created. To get visibility about gNMI", "allows conversion to gnmi_pb.Path object Parameters: xpath (str): path string", "established successfully') def close(self): \"\"\" Closes the active gRPC connection", "not exist => Create cPath[eleName] = {} cPath = cPath[eleName]", "gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection", "entry: result = {} update = entry['update'] if 'prefix' in", "a SSL-enabled channel, if the value is None it reads", "self.get_option('password')) ] host = self.get_option('host') port = self.get_option('port') self._target =", "= self.get_option('host') port = self.get_option('port') self._target = host if port", "% (k, v) result.append(tmp) return '/'.join(result) def _encodeVal(self, data): \"\"\"", "converted into JSON format \"\"\" request = gnmi_pb2.CapabilityRequest() auth =", "filename from options Returns: File content Raises: AnsibleConnectionFailure: file does", "=> Create cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: #", "= self._login_credentials try: response = self._stub.Set(request, metadata=auth) except grpc.RpcError as", "that establishes the gRPC connection. - Set C(GRPC_VERBOSITY) and C(GRPC_TRACE)", "def _encodeVal(self, data): \"\"\" Encodes value to dict representation that", "not in entry: # Ignore: entry without updates break elif", "= cPath[eleName] else: # Path Element does not exist =>", "# that needs to be created/updated leaf_elem = path_elements[-1] if", "v) result.append(tmp) return '/'.join(result) def _encodeVal(self, data): \"\"\" Encodes value", "gRPC sub-plugins use the method `register_service()` with the name of", "input['type'].upper() input['encoding'] = self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth =", "it could create a security vulnerability by logging sensitive information", "# SPDX-License-Identifier: BSD-3-Clause # from __future__ import (absolute_import, division, print_function)", "filename = os.path.join(entry, filename) break if os.path.isfile(filename): try: with open(filename,", "return \"\" for elem in path['elem']: tmp = elem['name'] if", "elem['name'] if 'key' in elem: for k, v in elem['key'].items():", "device is completed, the connection will fail. default: 5 ini:", "= dict(x.split('=', 1) for x in eKeys) if dKeys: entry['key']", "is the leaf element # that needs to be created/updated", "in user. - Can be configured from the CLI via", "to the persistent messages log (see below). - Set C(HTTPS_PROXY)", "= elem['key'] cPath = cPath[eleKey] else: # Path Element does", "\"[%s=%s]\" % (k, v) result.append(tmp) return '/'.join(result) def _encodeVal(self, data):", "# Ignore: entry without updates break elif 'timestamp' not in", "x[1], kwargs.items())) # Adjust input parameters to match specification for", "= {} for entry in rawData: if 'syncResponse' in entry", "f.read() except Exception as exc: raise AnsibleConnectionFailure( 'Failed to read", "dict\") options = options.items() if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']:", "__init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__( play_context, new_stdin, *args,", "- section: grpc_connection key: certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars:", "'%s:%d' % (host, port) self._timeout = self.get_option('persistent_command_timeout') certs = {}", "# Licensed under the BSD 3 Clause license # SPDX-License-Identifier:", "device when first establishing the gRPC connection. vars: - name:", "root certificates from a default location chosen by gRPC at", "name}__{method name} to call a specific method of that sub-plugin.", "'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible gNMI plugin", "serialization is automatically determined based on the remote device capabilities.", "self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials try: response", "\"gRPC connection established for user %s to %s\" % (self.get_option('remote_user'),", "as gnmi_pb.TypedValue object Returns: (dict): dict using gnmi_pb.TypedValue structure for", "prfx = prfx[eleName] prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else:", "self.connected: self.queue_message('v', 'gRPC connection to host %s already exist' %", "gRPC connection to the target host Parameters: None Returns: None", "input['prefix'] = self._encodeXpath(input['prefix']) if 'path' in input: input['path'] = [self._encodeXpath(path)", "remote device is completed, the connection will fail. default: 5", "entry: # Subscribe response, enter update context entry = entry['update']", "ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_log_messages: type: boolean description: -", "% self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v', 'gRPC/gNMI", "if eleKey not in cPath: # List entry does not", "to gnmi_pb.TypedValue object Parameters: data (ANY): data to be encoded", "XPATH syntax \"\"\" result = [] if 'elem' not in", "used for gNMI communication - Must be either JSON or", "- Target host FQDN or IP address to establish gRPC", "- name: ansible_command_timeout persistent_log_messages: type: boolean description: - This flag", "'Starting secure gRPC connection') creds = grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target,", "HAS_GRPC: raise AnsibleError( \"grpcio is required to use gRPC connection", "----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): \"\"\" Encodes XPATH to dict representation", "user password used to authenticate to the remote device when", "(no value) => skip continue elif ('path' in _upd) and", "if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'delete' in", "content Raises: AnsibleConnectionFailure: file does not exist or read excpetions", "input['backup_options'] # Adjust input parameters to match specification for gNMI", "skip continue elif ('path' in _upd) and ('elem' in _upd['path']):", "metadata=auth) if input['mode'] == 'ONCE': responses = [json_format.MessageToDict(response) for response", "specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin", "PEM encoded private key file used to authenticate to the", "gNMI API service description: - This gRPC plugin provides methods", "options=options) else: self.queue_message('v', 'Starting insecure gRPC connection') self._channel = grpc.insecure_channel(self._target,", "sub-plugin. There is a wrapper available that consumes the attribute", "does not exist or read excpetions \"\"\" path = self.get_option('certificate_path')", "\"\")) if not HAS_PROTOBUF: raise AnsibleError( \"protobuf is required to", "else: # No path at all, replace the objecttree with", "= prfx elif prfx_elements: path_elements = prfx_elements cPath = result", "ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file certificate_path: description: - Folder to", "= self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" %", "kwargs.items())) # Backup options are not to be used in", "channel. If the value is None, no certificate chain is", "grpcio - protobuf options: host: description: - Target host FQDN", "the remote device when the gRPC connection is first established.", "- name: ansible_password - name: ansible_ssh_pass private_key_file: description: - The", "one (or more) sub-plugin(s) for the required gRPC service(s) must", "be captured Returns: str: GetResponse message converted into JSON format", "Executes a gNMI Get request Encoding that is used for", "remote device when first establishing the gRPC connection. vars: -", "certificate. This is needed, because the TLS validates hostname or", "= self._login_credentials try: response = self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as", "input: del input['backup'] if 'backup_options' in input: del input['backup_options'] #", "_upd['path']): path_elements = _upd['path']['elem'] cPath = prfx elif prfx_elements: path_elements", "('prefix' in entry) and ('elem' in entry['prefix']): prfx_elements = entry['prefix']['elem']", "prfx[eleKey] else: # Path Element does not exist => Create", "authenticate to the remote device when first establishing the grpc", "section: grpc_connection key: gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING vars: -", "Closes the active gRPC connection to the target host Parameters:", "HAS_PROTOBUF = False from ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection", "error is generated and the connection is closed. default: 300", "gRPC logging. Need to add code for log forwarding of", "type: int description: - Configures the default timeout value (in", "listens for connections when establishing the gRPC connection. If None", "= False from ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import", "Returns: None \"\"\" if self._connected: self.queue_message('v', \"Closing gRPC connection to", "- name: ansible_port remote_user: description: - The username used to", "dict): raise AnsibleConnectionFailure(\"grpc_channel_options must be a dict\") options = options.items()", "specification for gNMI SetRequest if 'prefix' in input: input['prefix'] =", "mode (str): Mode of subscription (STREAM, ONCE) subscription (list of", "gnmiSubscribe(self, *args, **kwargs): \"\"\" Executes a gNMI Subscribe request Encoding", "self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if options: if not isinstance(options, dict):", "file used to create a SSL-enabled channel. If the value", "ansible.plugins.connection import ensure_connect from google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import", "to define environment settings specific to gRPC - The standard", "wrappers for gNMI requests (Capabilities, Get, Set, Subscribe) requirements: -", "if not already done. After this, the gNMI stub will", "entry['prefix']): prfx_elements = entry['prefix']['elem'] else: prfx_elements = [] for elem", "input: input['prefix'] = self._encodeXpath(input['prefix']) if 'delete' in input: input['delete'] =", "just a single entry, # we need to create/navigate to", "Create prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: # Path", "- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_log_messages: type: boolean", "_connect(self): \"\"\" Establish gRPC connection to remote node and create", "RPC timed out, which is okay pass else: raise AnsibleConnectionFailure(\"%s\"", "device in the ansible log file. For this option to", "= self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version self._yangModels", "response in responses] output = self._mergeToSingleDict(responses) else: for update in", "Type of data that is requested: ALL, CONFIG, STATE prefix", "self._connected = True self.queue_message('v', 'gRPC/gNMI connection has established successfully') def", "updates_only (bool): Send only updates to initial state allow_aggregation (bool):", "(bool): Send only updates to initial state allow_aggregation (bool): Aggregate", "**kwargs): \"\"\" Executes a gNMI Subscribe request Encoding that is", "remote devices using gRPC including the underlying transport (TLS). -", "when first establishing the gRPC connection. vars: - name: ansible_password", "response, enter update context entry = entry['update'] else: # Get", "- name: ansible_grpc_channel_options grpc_environment: description: - Key/Value pairs (dict) to", "call methods provided by that sub-plugin. There is a wrapper", "\"Please run 'pip install protobuf'\" ) if not HAS_GRPC: raise", "use gRPC connections in Ansible one (or more) sub-plugin(s) for", "if 'replace' in input: for entry in input['replace']: entry['path'] =", "key: certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file", "xpath.strip('\\t\\n\\r /') if xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e", "used to connect to the device is different from the", "TLS ciphers do not match what is offered by the", "self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if 'replace' in input: for entry", "match specification for gNMI SubscribeRequest if 'mode' in input: input['mode']", "ansible_certificate_chain_file certificate_path: description: - Folder to search for certificate and", "be either JSON or JSON_IETF - If not provided, will", "to setup gRPC logging. Need to add code for log", "used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override) option", "self._stub.Get(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e)", "entry = json_format.MessageToDict(msg) if 'syncResponse' in entry: # Ignore: SyncResponse", "connection plugin provides a persistent communication channel to remote devices", "[] xpath = xpath.strip('\\t\\n\\r /') if xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''',", "Extract duration from input attributes if 'duration' in input: duration", "\"Closing gRPC connection to target host\") self._channel.close() super(Connection, self).close() #", "of paths (str) to be captured Returns: str: GetResponse message", "all paths (XPATH syntax) update (list): Path/Value pairs to be", "is sent after initial update break elif 'update' not in", "type. \" + \"Please run 'pip install grpcio'\" ) self._connected", "can call methods provided by that sub-plugin. There is a", "[] if 'elem' not in path: return \"\" for elem", "dict): Subscription specification (path, interval, submode) duration (int): timeout, to", "\"\"\" mypath = [] xpath = xpath.strip('\\t\\n\\r /') if xpath:", "- section: grpc_connection key: root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars:", "or certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC connection') creds = grpc.ssl_channel_credentials(**certs)", "ansible_grpc_environment persistent_connect_timeout: type: int description: - Configures, in seconds, the", "certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options", "gnmi_encoding: description: - Encoding used for gNMI communication - Must", "path (dict): decoded gnmi_pb2.Path object Returns: (str): path string using", "mypath.append(entry) return {'elem': mypath} return {} def _decodeXpath(self, path): \"\"\"", "at all, replace the objecttree with value result = self._decodeVal(_upd['val'])", "to specify your proxy settings (if needed). - Set C(GRPC_SSL_CIPHER_SUITES)", "will establish the persistent gRPC connection, if not already done.", "- section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars:", "expires before the connection to the remote device is completed,", "env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file certificate_chain_file: description:", "vars: - name: ansible_grpc_environment persistent_connect_timeout: type: int description: - Configures,", "in _upd: # requested path without content (no value) =>", "options to be used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide", "'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE Subscription timed out\") else: # RPC", "False from ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import NetworkConnectionBase", "connections when establishing the gRPC connection. If None only the", "boolean description: - This flag will enable logging the command", "ini: - section: grpc_connection key: root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE", "understand the security implications of enabling this option as it", "re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e in path_elements: entry = {'name': e.split(\"[\",", "representation converted from gnmi_pb.TypedValue object Parameters: val (dict): decoded gnmi_pb.TypedValue", "input['path']] if 'type' in input: input['type'] = input['type'].upper() input['encoding'] =", "def _simplifyUpdates(self, rawData): for msg in rawData: entry = json_format.MessageToDict(msg)", "key => must be list() if eleName in prfx: #", "description: - Encoding used for gNMI communication - Must be", "to all paths (XPATH syntax) mode (str): Mode of subscription", "cPath = cPath[eleName] else: # Path Element does not exist", "metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) output", "'backup' in input: del input['backup'] if 'backup_options' in input: del", "file used to create a SSL-enabled channel, if the value", "content (no value) => skip continue elif ('path' in _upd)", "# Adjust input parameters to match specification for gNMI SubscribeRequest", "a RPC. If the RPC does not return before the", "connections are used). The option must be provided in cases,", "elem['key'] cPath = cPath[eleKey] else: # Path Element does not", "remote node and create gNMI stub. This method will establish", "from ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection import ensure_connect from google.protobuf", "exist => Create prfx[eleName] = {} prfx = prfx[eleName] prfx[eleKey]", "in rawData: if 'syncResponse' in entry and entry['syncResponse']: # Ignore:", "created/updated leaf_elem = path_elements[-1] if 'key' in leaf_elem: eleKey =", "self._encodeVal(entry['val']) if 'replace' in input: for entry in input['replace']: entry['path']", "consumes the attribute name {sub-plugin name}__{method name} to call a", "**kwargs ) self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\")) if not HAS_PROTOBUF: raise", "'JSON_IETF': return {'jsonIetfVal': value} else: return {'jsonVal': value} def _decodeVal(self,", "else: # Path Element hasn't key => must be dict()", "e: raise AnsibleConnectionFailure(\"%s\" % e) output = json_format.MessageToDict(response) output['timestamp'] =", "ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding grpc_channel_options: description: - Key/Value pairs", "for entry in input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val'])", "reads the root certificates from a default location chosen by", "sub-plugin(s) for the required gRPC service(s) must be loaded. To", "(list): Path/Value pairs to be replaced delete (list): Paths (str)", "match what is offered by the gRPC server. vars: -", "self.get_option(optionName) if filename: if filename.startswith('~'): filename = os.path.expanduser(filename) if not", "cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result) def", "ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection import ensure_connect from google.protobuf import", "\" + \"Please run 'pip install protobuf'\" ) if not", "'update' in update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in", "to be set to a file path with write access.", "okay pass else: raise AnsibleConnectionFailure(\"%s\" % e) return json.dumps(output, indent=4).encode()", "gRPC service(s) must be loaded. To load gRPC sub-plugins use", "dict(x.split('=', 1) for x in eKeys) if dKeys: entry['key'] =", "replaced delete (list): Paths (str) to be deleted Returns: str:", "OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This", "_simplifyUpdates(self, rawData): for msg in rawData: entry = json_format.MessageToDict(msg) if", "ALL, CONFIG, STATE prefix (str): Path prefix that is added", "U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override) option to override the TLS", "= json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if eleName not in cPath:", "gNMI plugin does not support encoding for value: %s\" %", "be used in gNMI SetRequest if 'backup' in input: del", "methods provided by that sub-plugin. There is a wrapper available", "grpc_connection key: certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name:", "channel options to be used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) -", "section: grpc_connection key: root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: -", "= result else: # No path at all, replace the", "- Must be either JSON or JSON_IETF - If not", "_encodeXpath(self, xpath='/'): \"\"\" Encodes XPATH to dict representation that allows", "self._channel = grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v', 'Starting insecure gRPC", "for elem in path['elem']: tmp = elem['name'] if 'key' in", "- section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars:", "= path_elements[-1] if 'key' in leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName", "plugin has implemented suppport for JSON_IETF (preferred) and JSON (fallback).", "eleName not in cPath: cPath[eleName] = {} cPath = cPath[eleName]", "value expires before the connection to the remote device is", "certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if", "log forwarding of gRPC related log messages to the persistent", "host %s already exist' % self._target) return grpcEnv = self.get_option('grpc_environment')", "if 'delete' in input: input['delete'] = [self._encodeXpath(entry) for entry in", "== 'ONCE': responses = [json_format.MessageToDict(response) for response in responses] output", "\"Please run 'pip install grpcio'\" ) self._connected = False def", "certificates from a default location chosen by gRPC at runtime.", "to wait when trying to initially establish a persistent connection.", "connection has established successfully') def close(self): \"\"\" Closes the active", "\"\"\" Executes a gNMI Capabilities request Parameters: None Returns: str:", "for elem in prfx_elements: eleName = elem['name'] if 'key' in", "only updates to initial state allow_aggregation (bool): Aggregate elements marked", "Capabilities request Parameters: None Returns: str: gNMI capabilities converted into", "input['encoding'] = self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials", "AnsibleConnectionFailure( 'Failed to read cert/keys file %s: %s' % (filename,", "Parameters: None Returns: str: gNMI capabilities converted into JSON format", "when first establishing the grpc connection. ini: - section: grpc_connection", "from kwargs that are not set input = dict(filter(lambda x:", "('path' in _upd) and ('elem' in _upd['path']): path_elements = _upd['path']['elem']", "option to work the 'log_path' ansible configuration option is required", "None Returns: None \"\"\" if self._connected: self.queue_message('v', \"Closing gRPC connection", "exists => Change Context prfx = prfx[eleName] if eleKey not", "ImportError: HAS_PROTOBUF = False from ansible.errors import AnsibleConnectionFailure, AnsibleError from", "play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__( play_context, new_stdin, *args, **kwargs", "not HAS_GRPC: raise AnsibleError( \"grpcio is required to use gRPC", "Returns: (dict): path dict using gnmi_pb2.Path structure for easy conversion", "cPath = cPath[eleName] if eleKey not in cPath: # List", "for gNMI communication - Must be either JSON or JSON_IETF", "device when the gRPC connection is first established. If the", "completed, the connection will fail. default: 5 ini: - section:", "- The PEM encoded certificate chain file used to create", "response, keep context pass prfx = result if ('prefix' in", "conversion \"\"\" value = base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF': return", "= json.dumps(elem['key']) eleName = '___'+eleName # Path Element has key", "inventory_hostname vars: - name: ansible_host port: type: int description: -", "paths (XPATH syntax) mode (str): Mode of subscription (STREAM, ONCE)", "section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: -", "sent after initial update pass elif 'update' in entry: result", "Clause license # SPDX-License-Identifier: BSD-3-Clause # from __future__ import (absolute_import,", "Context cPath = cPath[eleName] if eleKey not in cPath: #", "certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC connection')", "Path Element exists => Change Context cPath = cPath[eleName] else:", "vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures", "in cPath: cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] =", "is provided in the host certificate. This is needed, because", "if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription' in", "grpc HAS_GRPC = True except ImportError: HAS_GRPC = False try:", "dKeys = dict(x.split('=', 1) for x in eKeys) if dKeys:", "gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if not", "messages to the persistent messages log (see below). - Set", "in elem: eleKey = json.dumps(elem['key']) eleName = '___'+eleName # Path", "if 'key' in leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name']", "- This connection plugin provides a persistent communication channel to", "- Be sure to fully understand the security implications of", "HAS_PROTOBUF: raise AnsibleError( \"protobuf is required to use gRPC connection", "\"\"\" import os import re import json import base64 import", "self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v', 'gRPC/gNMI connection", "Aggregate elements marked as eligible for aggregation Returns: str: Updates", "using gRPC including the underlying transport (TLS). - The plugin", "[self._dictToList(val) if isinstance(val, dict) else val for val in aDict[key].values()]", "- name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: -", "environment in Ansible cannot be used, because those environment settings", "ONCE) subscription (list of dict): Subscription specification (path, interval, submode)", "cPath[eleName] if eleKey not in cPath: # List entry does", "or {} if not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment must be", "C(--user) or C(-u) options. ini: - section: defaults key: remote_user", "Adjust input parameters to match specification for gNMI SetRequest if", "gRPC connection is first established. If the remote_user is not", "not in path: return \"\" for elem in path['elem']: tmp", "output = [] responses = self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode']", "= {} cPath = cPath[eleName] # The last entry of", "from input attributes if 'duration' in input: duration = input['duration']", "the I(ssl_target_name_override) option to override the TLS subject or subjectAltName", "a persistent gRPC connection for gNMI API service description: -", "node and create gNMI stub. This method will establish the", "for entry in input['delete']] if 'update' in input: for entry", "import datetime try: import grpc HAS_GRPC = True except ImportError:", "= host if port is None else '%s:%d' % (host,", "Executes a gNMI Subscribe request Encoding that is used for", "=> Change Context prfx = prfx[eleName] else: # Path Element", "the connection will fail. default: 5 ini: - section: persistent_connection", "run 'pip install protobuf'\" ) if not HAS_GRPC: raise AnsibleError(", "[ ('username', self.get_option('remote_user')), ('password', self.get_option('password')) ] host = self.get_option('host') port", "ONCE Subscription timed out\") else: # RPC timed out, which", "description: - This flag will enable logging the command executed", "msg in rawData: entry = json_format.MessageToDict(msg) if 'syncResponse' in entry:", "'update' in entry: result = {} update = entry['update'] if", "if not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if", "output['prefix'] = self._decodeXpath(output['prefix']) for item in output['response']: item['path'] = self._decodeXpath(item['path'])", "creds, options=options) else: self.queue_message('v', 'Starting insecure gRPC connection') self._channel =", "responses = [json_format.MessageToDict(response) for response in responses] output = self._mergeToSingleDict(responses)", "to create a SSL-enabled channel, if the value is None", "- Encoding used for gNMI communication - Must be either", "_upd in entry['update']: if 'val' not in _upd: # requested", "time to wait when trying to initially establish a persistent", "input['path'] = [self._encodeXpath(path) for path in input['path']] if 'type' in", "(filename, exc) ) else: raise AnsibleConnectionFailure( 'Cert/keys file %s does", "host = self.get_option('host') port = self.get_option('port') self._target = host if", "Parameters: xpath (str): path string using XPATH syntax Returns: (dict):", "paths (str) to be captured Returns: str: GetResponse message converted", "a dict\") options = options.items() if certs['root_certificates'] or certs['private_key'] or", "creds = grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v',", "= self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if options: if not isinstance(options,", "must be loaded. To load gRPC sub-plugins use the method", "certificate/key file Parameters: optionName(str): used to read filename from options", "in cPath: # Path Element exists => Change Context cPath", "self._stub.Set(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e)", "is None, no certificate chain is used. ini: - section:", "name: ansible_port remote_user: description: - The username used to authenticate", "gRPC connections in Ansible one (or more) sub-plugin(s) for the", "self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding = 'JSON'", "_upd['path']['elem'] cPath = prfx elif prfx_elements: path_elements = prfx_elements cPath", "to be captured Returns: str: GetResponse message converted into JSON", "cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result)", "= entry['prefix']['elem'] else: prfx_elements = [] for elem in prfx_elements:", "paths (XPATH syntax) update (list): Path/Value pairs to be updated", "logging the command executed and response received from target device", "# List entry does not exist => Create cPath[eleKey] =", "issuing a call to a RPC. If the RPC does", "key in aDict.keys(): if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if isinstance(val,", "val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal'])) else:", "= datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output: output['prefix'] = self._decodeXpath(output['prefix']) for", "self._encoding = 'JSON' else: raise AnsibleConnectionFailure(\"No compatible supported encoding found", "Element does not exist => Create prfx[eleName] = {} prfx", "= entry['update'] if 'prefix' in update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if", "after initial update pass elif 'update' in entry: result =", "if xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e in path_elements:", "response after issuing a call to a RPC. If the", "Returns: (str): path string using XPATH syntax \"\"\" result =", "chosen by gRPC at runtime. ini: - section: grpc_connection key:", "not in cPath: cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey]", "(JSON or JSON_IETF)\") else: if self._encoding not in ['JSON_IETF', 'JSON']:", "True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__( play_context,", "sub-plugin, Ansible modules can call methods provided by that sub-plugin.", "using gnmi_pb.TypedValue structure for easy conversion \"\"\" value = base64.b64encode(json.dumps(data).encode())", "update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']} yield", "cPath = result else: # No path at all, replace", "this option to work the 'log_path' ansible configuration option is", "the target host Parameters: None Returns: None \"\"\" if self._connected:", "object Parameters: xpath (str): path string using XPATH syntax Returns:", "metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) return", "JSON_IETF)\") else: if self._encoding not in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible", "cPath[eleName] # The last entry of path_elements is the leaf", "the logged in user. - Can be configured from the", "try: output = [] responses = self._stub.Subscribe(iter([request]), duration, metadata=auth) if", "\"\"\" path = self.get_option('certificate_path') if not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates'", "To get visibility about gNMI capabilities of the remote device,", "establish gRPC connection. default: inventory_hostname vars: - name: ansible_host port:", "After loading the sub-plugin, Ansible modules can call methods provided", "persisted. Parameters: None Returns: None \"\"\" if self.connected: self.queue_message('v', 'gRPC", "{'elem': mypath} return {} def _decodeXpath(self, path): \"\"\" Decodes XPATH", "CLI via the C(--user) or C(-u) options. ini: - section:", "No path at all, replace the objecttree with value result", "by gRPC at runtime. ini: - section: grpc_connection key: root_certificates_file", "as eligible for aggregation Returns: str: Updates received converted into", "binds to the gNMI gRPC service. It provide wrappers for", "remote_user is not specified, the connection will use the username", "the root certificates from a default location chosen by gRPC", "[] responses = self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode'] == 'ONCE':", "grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v', 'Starting insecure gRPC connection') self._channel", "for entry in rawData: if 'syncResponse' in entry and entry['syncResponse']:", "exc: raise AnsibleConnectionFailure( 'Failed to read cert/keys file %s: %s'", "result.append(tmp) return '/'.join(result) def _encodeVal(self, data): \"\"\" Encodes value to", "certificate and key files ini: - section: grpc_connection key: certificate_path", "options are not to be used in gNMI SetRequest if", "loading the sub-plugin, Ansible modules can call methods provided by", "- section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars:", "JSON format \"\"\" request = gnmi_pb2.CapabilityRequest() auth = self._login_credentials try:", "key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages", "address to avoid man-in-the-middle attacks. vars: - name: ansible_grpc_channel_options grpc_environment:", "%s\" % json.dumps(val)) def _dictToList(self, aDict): for key in aDict.keys():", "not support encoding for value: %s\" % json.dumps(val)) def _dictToList(self,", "% (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel)", "input parameters from kwargs that are not set input =", "to all paths (XPATH syntax) update (list): Path/Value pairs to", "gRPC plugin provides methods to interact with the gNMI service.", "path_elements[:-1]: eleName = elem['name'] if 'key' in elem: eleKey =", "for item in output['response']: item['path'] = self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode()", "on the remote device that listens for connections when establishing", "timeout value (in seconds) when awaiting a response after issuing", "the leaf element # that needs to be created/updated leaf_elem", "AnsibleConnectionFailure(\"%s\" % e) output = json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if", "gnmi_pb.TypedValue object Parameters: val (dict): decoded gnmi_pb.TypedValue object Returns: (ANY):", "value} def _decodeVal(self, val): \"\"\" Decodes value from dict representation", "for easy conversion \"\"\" mypath = [] xpath = xpath.strip('\\t\\n\\r", "[self._encodeXpath(entry) for entry in input['delete']] if 'update' in input: for", "try: response = self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as e: raise", "STATE prefix (str): Path prefix that is added to all", "needed, because the TLS validates hostname or IP address to", "in prfx_elements: eleName = elem['name'] if 'key' in elem: eleKey", "# List entry does not exist => Create prfx[eleKey] =", "create/navigate to the specified subcontext for elem in path_elements[:-1]: eleName", "(XPATH syntax) mode (str): Mode of subscription (STREAM, ONCE) subscription", "that allows conversion to gnmi_pb.Path object Parameters: xpath (str): path", "marked as eligible for aggregation Returns: str: Updates received converted", "it reads the root certificates from a default location chosen", "gNMI gRPC service. It provide wrappers for gNMI requests (Capabilities,", "the gRPC connection. If None only the C(host) part will", "section: grpc_connection key: certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH vars: -", "used to create a SSL-enabled channel, if the value is", "else: try: del os.environ[key] except KeyError: # no such setting", "read excpetions \"\"\" path = self.get_option('certificate_path') if not path: path", "is None else '%s:%d' % (host, port) self._timeout = self.get_option('persistent_command_timeout')", "SetRequest if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'delete'", "= 20 request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials", "and result will be persisted. Parameters: None Returns: None \"\"\"", "auth = self._login_credentials try: response = self._stub.Capabilities(request, metadata=auth) except grpc.RpcError", "be registered. After loading the sub-plugin, Ansible modules can call", "Path/Value pairs to be updated replace (list): Path/Value pairs to", "else: raise AnsibleConnectionFailure( 'Cert/keys file %s does not exist' %", "file %s: %s' % (filename, exc) ) else: raise AnsibleConnectionFailure(", "deleted Returns: str: SetResponse message converted into JSON format \"\"\"", "persistent gRPC connection, if not already done. After this, the", "HAS_GRPC = True except ImportError: HAS_GRPC = False try: from", "dict\") for key in grpcEnv: if grpcEnv[key]: os.environ[key] = str(grpcEnv[key])", "in input: input['path'] = [self._encodeXpath(path) for path in input['path']] if", "Element exists => Change Context cPath = cPath[eleName] else: #", "else: return {'jsonVal': value} def _decodeVal(self, val): \"\"\" Decodes value", "in input: input['type'] = input['type'].upper() input['encoding'] = self._encoding_value request =", "the BSD 3 Clause license # SPDX-License-Identifier: BSD-3-Clause # from", "the security implications of enabling this option as it could", "to_text class Connection(NetworkConnectionBase): \"\"\" Connection plugin for gRPC To use", "pairs to be replaced delete (list): Paths (str) to be", "chain is used. ini: - section: grpc_connection key: certificate_chain_file env:", "gRPC To use gRPC connections in Ansible one (or more)", "x[1], kwargs.items())) # Backup options are not to be used", "JSON_IETF (preferred) and JSON (fallback). Parameters: type (str): Type of", "leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if eleName not", "persistent gRPC connection for gNMI API service description: - This", "JSON_IETF - If not provided, will run CapabilityRequest for auto-detection", "configuration option is required to be set to a file", "ansible_connect_timeout persistent_command_timeout: type: int description: - Configures the default timeout", "grpc_connection key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name:", "in cases, when the FQDN or IPv4 address that is", "underlying transport (TLS). - The plugin binds to the gNMI", "sent and result will be persisted. Parameters: None Returns: None", "gRPC connection') creds = grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds, options=options)", "except Exception as exc: raise AnsibleConnectionFailure( 'Failed to read cert/keys", "options.items() if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting secure", "duration = input['duration'] del input['duration'] else: duration = 20 request", "Ignore: SyncResponse is sent after initial update pass elif 'update'", "input: input['prefix'] = self._encodeXpath(input['prefix']) if 'path' in input: input['path'] =", "gNMI SetRequest if 'backup' in input: del input['backup'] if 'backup_options'", "= json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials try: response = self._stub.Set(request,", "when awaiting a response after issuing a call to a", "the port on the remote device that listens for connections", "self.get_option('persistent_command_timeout') certs = {} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file')", "description: - Configures, in seconds, the amount of time to", "- This flag will enable logging the command executed and", "yield result else: # Ignore: Invalid message format pass #", "secure connections are used). The option must be provided in", "JSON (fallback). Parameters: type (str): Type of data that is", "item in output['response']: item['path'] = self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect", "break elif 'timestamp' not in entry: # Subscribe response, enter", "are not to be used in gNMI SetRequest if 'backup'", "import grpc HAS_GRPC = True except ImportError: HAS_GRPC = False", "elem['key'] prfx = prfx[eleKey] else: # Path Element does not", "gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding = 'JSON' else: raise AnsibleConnectionFailure(\"No compatible", "def _connect(self): \"\"\" Establish gRPC connection to remote node and", "using gnmi_pb2.Path structure for easy conversion \"\"\" mypath = []", "that is added to all paths (XPATH syntax) update (list):", "self._encodeXpath(input['prefix']) if 'path' in input: input['path'] = [self._encodeXpath(path) for path", "be list() if eleName in cPath: # Path Element exists", "value = base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF': return {'jsonIetfVal': value}", "ansible log file. For this option to work the 'log_path'", "for gNMI SubscribeRequest if 'mode' in input: input['mode'] = input['mode'].upper()", "be provided in cases, when the FQDN or IPv4 address", "None Returns: None \"\"\" if self.connected: self.queue_message('v', 'gRPC connection to", "Key/Value pairs (dict) to define environment settings specific to gRPC", "Path Element does not exist => Create cPath[eleName] = {}", "available that consumes the attribute name {sub-plugin name}__{method name} to", "path_elements: entry = {'name': e.split(\"[\", 1)[0]} eKeys = re.findall('\\[(.*?)\\]', e)", "# Path Element exists => Change Context cPath = cPath[eleName]", "gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding grpc_channel_options:", "initial state allow_aggregation (bool): Aggregate elements marked as eligible for", "gRPC channel options to be used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html)", "in prfx: # Path Element exists => Change Context prfx", "interval, submode) duration (int): timeout, to stop receiving qos (int):", "timeout exceed, an error is generated and the connection is", "if 'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val:", "gnmi_pb2.GetRequest()) auth = self._login_credentials try: response = self._stub.Get(request, metadata=auth) except", "update context entry = entry['update'] else: # Get response, keep", "vars: - name: ansible_user password: description: - Configures the user", "when establishing the gRPC connection. If None only the C(host)", "def _mergeToSingleDict(self, rawData): result = {} for entry in rawData:", "filename.startswith('/'): for entry in path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename =", "flag will enable logging the command executed and response received", "for key in grpcEnv: if grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else:", "grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC connection established for user %s to", "prefix (str): Path prefix that is added to all paths", "=> Create prfx[eleName] = {} prfx = prfx[eleName] for _upd", "is required to use gRPC connection type. \" + \"Please", "prfx[eleName] = {} prfx = prfx[eleName] prfx[eleKey] = elem['key'] prfx", "name: ansible_command_timeout persistent_log_messages: type: boolean description: - This flag will", "not in _upd: # requested path without content (no value)", "structure for easy conversion \"\"\" value = base64.b64encode(json.dumps(data).encode()) if self._encoding", "result else: # Ignore: Invalid message format pass # -----------------------------------------------------------------------", "entry['key'] = dKeys mypath.append(entry) return {'elem': mypath} return {} def", "private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file root_certificates_file:", "'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v',", "input: input['type'] = input['type'].upper() input['encoding'] = self._encoding_value request = json_format.ParseDict(input,", "not filename.startswith('/'): for entry in path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename", "value: %s\" % json.dumps(val)) def _dictToList(self, aDict): for key in", "= input['type'].upper() input['encoding'] = self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth", "entry['val'] = self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials", "dict(filter(lambda x: x[1], kwargs.items())) # Adjust input parameters to match", "chain file used to create a SSL-enabled channel. If the", "pairs (dict) to define gRPC channel options to be used", "prfx[eleKey] else: # Path Element hasn't key => must be", "super(Connection, self).__init__( play_context, new_stdin, *args, **kwargs ) self._task_uuid = to_text(kwargs.get(\"task_uuid\",", "SyncResponse is sent after initial update pass elif 'update' in", "a single entry, # we need to create/navigate to the", "is automatically determined based on the remote device capabilities. This", "play_context, new_stdin, *args, **kwargs ) self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\")) if", "for update in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as e: if", "response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON')", "= self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if 'replace' in input: for", "sure to fully understand the security implications of enabling this", "use the username of the logged in user. - Can", "1) for x in eKeys) if dKeys: entry['key'] = dKeys", "description: - The username used to authenticate to the remote", "gnmi_pb.TypedValue structure for easy conversion \"\"\" value = base64.b64encode(json.dumps(data).encode()) if", "Executes a gNMI Set request Encoding that is used for", "= self._stub.Get(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" %", "input['delete']] if 'update' in input: for entry in input['update']: entry['path']", "with value result = self._decodeVal(_upd['val']) prfx = result continue #", "self._target)) self.queue_message('v', 'Creating gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding =", "section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: -", "in entry and entry['syncResponse']: # Ignore: SyncResponse is sent after", "ensure_connect from google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from", "not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment must be a dict\") for", "vars: - name: ansible_port remote_user: description: - The username used", "google import protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF =", "%s' % (filename, exc) ) else: raise AnsibleConnectionFailure( 'Cert/keys file", "hasn't key => must be dict() if eleName in prfx:", "gNMI plugin has implemented suppport for JSON_IETF (preferred) and JSON", "- name: ansible_private_key_file root_certificates_file: description: - The PEM encoded root", "If this value expires before the connection to the remote", "provided in cases, when the FQDN or IPv4 address that", "entry['update'] if 'prefix' in update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp'", "raise AnsibleConnectionFailure(\"No compatible supported encoding found (JSON or JSON_IETF)\") else:", "Subscription specification (path, interval, submode) duration (int): timeout, to stop", "NetworkConnectionBase from ansible.plugins.connection import ensure_connect from google.protobuf import json_format from", "{} cPath = cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] =", "the connection is closed. default: 300 ini: - section: persistent_connection", "format pass # ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): \"\"\" Executes a", "raise AnsibleConnectionFailure( 'Cert/keys file %s does not exist' % filename", "value is None, no certificate chain is used. ini: -", "Can be configured from the CLI via the C(--user) or", "certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC connection') creds =", "\"\"\" # Remove all input parameters from kwargs that are", "authenticate to the remote device when first establishing the gRPC", "Exception as exc: raise AnsibleConnectionFailure( 'Failed to read cert/keys file", "print_function) __metaclass__ = type DOCUMENTATION = \"\"\" --- author: -", "for entry in input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val'])", "% (host, port) self._timeout = self.get_option('persistent_command_timeout') certs = {} certs['root_certificates']", "create gNMI stub. This method will establish the persistent gRPC", "= cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return", "input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try: output = [] responses", "Be sure to fully understand the security implications of enabling", "the case secure connections are used). The option must be", "None, no certificate chain is used. ini: - section: grpc_connection", "self._gnmiVersion = response.gNMI_version self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings:", "google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text import", "return aDict def _mergeToSingleDict(self, rawData): result = {} for entry", "if not self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response", "provides methods to interact with the gNMI service. - OpenConfig", "a SSL-enabled channel. If the value is None, no certificate", "type (str): Type of data that is requested: ALL, CONFIG,", "base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF': return {'jsonIetfVal': value} else: return", "context pass prfx = result if ('prefix' in entry) and", "'subscription' in input: for item in input['subscription']: item['path'] = self._encodeXpath(item['path'])", "entry: # Ignore: SyncResponse is sent after initial update pass", "- If not provided, will run CapabilityRequest for auto-detection ini:", "in output['response']: item['path'] = self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect def", "be set to a file path with write access. -", "name: ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding grpc_channel_options: description: - Key/Value", "Encodes value to dict representation that allows conversion to gnmi_pb.TypedValue", "Raises: AnsibleConnectionFailure: file does not exist or read excpetions \"\"\"", "and ('elem' in _upd['path']): path_elements = _upd['path']['elem'] cPath = prfx", "for k, v in elem['key'].items(): tmp += \"[%s=%s]\" % (k,", "'mode' in input: input['mode'] = input['mode'].upper() input['encoding'] = self._encoding_value if", "if input['mode'] == 'ONCE': responses = [json_format.MessageToDict(response) for response in", "\"\"\" Decodes value from dict representation converted from gnmi_pb.TypedValue object", "readFile(self, optionName): \"\"\" Reads a binary certificate/key file Parameters: optionName(str):", "import NetworkConnectionBase from ansible.plugins.connection import ensure_connect from google.protobuf import json_format", "'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'path' in input:", "SetRequest if 'backup' in input: del input['backup'] if 'backup_options' in", "self._encoding = self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request", "'val' not in _upd: # requested path without content (no", "*args, **kwargs ) self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\")) if not HAS_PROTOBUF:", "trying to initially establish a persistent connection. If this value", "grpcEnv: if grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else: try: del os.environ[key]", "be configured from the CLI via the C(--user) or C(-u)", "part will be used. ini: - section: defaults key: remote_port", "extracted data \"\"\" if 'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif", "val for val in aDict[key].values()] del aDict[key] else: if isinstance(aDict[key],", "name: ansible_grpc_channel_options grpc_environment: description: - Key/Value pairs (dict) to define", "RPC does not return before the timeout exceed, an error", "option is required to be set to a file path", "already exist' % self._target) return grpcEnv = self.get_option('grpc_environment') or {}", "path_elements has more than just a single entry, # we", "The last entry of path_elements is the leaf element #", "aDict): for key in aDict.keys(): if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val)", "value is None it reads the root certificates from a", "self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args, **kwargs): \"\"\"", "fully understand the security implications of enabling this option as", "path at all, replace the objecttree with value result =", "entry in input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if", "{} prfx = prfx[eleName] for _upd in entry['update']: if 'val'", "to the remote device when the gRPC connection is first", "sub-plugin to be registered. After loading the sub-plugin, Ansible modules", "= self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self, rawData): for msg in", "= self._encodeXpath(input['prefix']) if 'path' in input: input['path'] = [self._encodeXpath(path) for", "path['elem']: tmp = elem['name'] if 'key' in elem: for k,", "(@wisotzky)\" connection: gnmi short_description: Provides a persistent gRPC connection for", "security vulnerability by logging sensitive information in log file. default:", "val): \"\"\" Decodes value from dict representation converted from gnmi_pb.TypedValue", "def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__( play_context, new_stdin,", "= dict(filter(lambda x: x[1], kwargs.items())) # Adjust input parameters to", "log messages to the persistent messages log (see below). -", "<reponame>hansthienpondt/ansible-networking-collections # (c) 2020 Nokia # # Licensed under the", "'type' in input: input['type'] = input['type'].upper() input['encoding'] = self._encoding_value request", "v in elem['key'].items(): tmp += \"[%s=%s]\" % (k, v) result.append(tmp)", "self._decodeVal(_upd['val']) prfx = result continue # If path_elements has more", "exists => Change Context cPath = cPath[eleName] else: # Path", "json.dumps(val)) def _dictToList(self, aDict): for key in aDict.keys(): if key.startswith('___'):", "dict representation converted from gnmi_pb.TypedValue object Parameters: val (dict): decoded", "self.queue_message('v', \"gRPC connection established for user %s to %s\" %", "BSD-3-Clause # from __future__ import (absolute_import, division, print_function) __metaclass__ =", "object Returns: (dict): dict using gnmi_pb.TypedValue structure for easy conversion", "for elem in path_elements[:-1]: eleName = elem['name'] if 'key' in", "if 'mode' in input: input['mode'] = input['mode'].upper() input['encoding'] = self._encoding_value", "del input['backup_options'] # Adjust input parameters to match specification for", "= [ ('username', self.get_option('remote_user')), ('password', self.get_option('password')) ] host = self.get_option('host')", "self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self, rawData):", "Establish gRPC connection to remote node and create gNMI stub.", "ok pass self._login_credentials = [ ('username', self.get_option('remote_user')), ('password', self.get_option('password')) ]", "in path_elements[:-1]: eleName = elem['name'] if 'key' in elem: eleKey", "options = options.items() if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']: self.queue_message('v',", "Parameters: None Returns: None \"\"\" if self._connected: self.queue_message('v', \"Closing gRPC", "does not exist => Create cPath[eleName] = {} cPath =", "eKeys = re.findall('\\[(.*?)\\]', e) dKeys = dict(x.split('=', 1) for x", "self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v',", "or subjectAltName (only in the case secure connections are used).", "e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE': raise", "that sub-plugin. \"\"\" transport = \"nokia.grpc.gnmi\" has_pipelining = True def", "= self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth", "be a dict\") options = options.items() if certs['root_certificates'] or certs['private_key']", "exist => Create cPath[eleName] = {} cPath = cPath[eleName] #", "AnsibleConnectionFailure(\"grpc_channel_options must be a dict\") options = options.items() if certs['root_certificates']", "for key in aDict.keys(): if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if", "{'name': e.split(\"[\", 1)[0]} eKeys = re.findall('\\[(.*?)\\]', e) dKeys = dict(x.split('=',", "if 'prefix' in update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp' in", "in path['elem']: tmp = elem['name'] if 'key' in elem: for", "name: ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path gnmi_encoding: description: - Encoding", "format \"\"\" request = gnmi_pb2.CapabilityRequest() auth = self._login_credentials try: response", "executed and response received from target device in the ansible", "raise AnsibleError( \"protobuf is required to use gRPC connection type.", "path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry, filename) break if", "search for certificate and key files ini: - section: grpc_connection", "dict representation that allows conversion to gnmi_pb.Path object Parameters: xpath", "that sub-plugin. There is a wrapper available that consumes the", "name: ansible_private_key_file root_certificates_file: description: - The PEM encoded root certificate", "if 'val' not in _upd: # requested path without content", "= '___'+leaf_elem['name'] if eleName not in cPath: cPath[eleName] = {}", "to avoid man-in-the-middle attacks. vars: - name: ansible_grpc_channel_options grpc_environment: description:", "name: ansible_user password: description: - Configures the user password used", "or IP address to establish gRPC connection. default: inventory_hostname vars:", "The PEM encoded private key file used to authenticate to", "(int): DSCP marking that is used updates_only (bool): Send only", "and JSON (fallback). Parameters: prefix (str): Path prefix that is", "base64 import datetime try: import grpc HAS_GRPC = True except", "aDict[key] else: if isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key]) return aDict", "{} for entry in rawData: if 'syncResponse' in entry and", "under the BSD 3 Clause license # SPDX-License-Identifier: BSD-3-Clause #", "gRPC connection, if not already done. After this, the gNMI", "to the remote device when first establishing the gRPC connection.", "ini: - section: grpc_connection key: gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING", "self._connected = False def readFile(self, optionName): \"\"\" Reads a binary", "capabilities of the remote device, a gNM CapabilityRequest will be", "SetRequest if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'path'", "connection will use the username of the logged in user.", "= cPath[eleKey] else: # Path Element hasn't key => must", "List entry does not exist => Create prfx[eleKey] = elem['key']", "self._encodeXpath(item['path']) # Extract duration from input attributes if 'duration' in", "= prfx[eleName] for _upd in entry['update']: if 'val' not in", "capabilities. This gNMI plugin has implemented suppport for JSON_IETF (preferred)", "by that sub-plugin. There is a wrapper available that consumes", "host: description: - Target host FQDN or IP address to", "options: if not isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options must be a", "1)[0]} eKeys = re.findall('\\[(.*?)\\]', e) dKeys = dict(x.split('=', 1) for", "must be provided in cases, when the FQDN or IPv4", "path dict using gnmi_pb2.Path structure for easy conversion \"\"\" mypath", "result if ('prefix' in entry) and ('elem' in entry['prefix']): prfx_elements", "certificate chain is used. ini: - section: grpc_connection key: certificate_chain_file", "to define gRPC channel options to be used - gRPC", "compatible supported encoding found (JSON or JSON_IETF)\") else: if self._encoding", "isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self, rawData):", "ansible_persistent_log_messages \"\"\" import os import re import json import base64", "vars: - name: ansible_password - name: ansible_ssh_pass private_key_file: description: -", "except ImportError: HAS_GRPC = False try: from google import protobuf", "be updated replace (list): Path/Value pairs to be replaced delete", "json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args, **kwargs): \"\"\" Executes a", "Decodes value from dict representation converted from gnmi_pb.TypedValue object Parameters:", "@ensure_connect def gnmiGet(self, *args, **kwargs): \"\"\" Executes a gNMI Get", "aDict[key] = self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self, rawData): result =", "in output: output['prefix'] = self._decodeXpath(output['prefix']) for item in output['response']: item['path']", "- name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages \"\"\" import os", "type. \" + \"Please run 'pip install protobuf'\" ) if", "self.queue_message('v', 'gRPC/gNMI connection has established successfully') def close(self): \"\"\" Closes", "name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures the default", "logging sensitive information in log file. default: False ini: -", "'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON or JSON_IETF)\" %", "if 'syncResponse' in entry: # Ignore: SyncResponse is sent after", "e) return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args, **kwargs): \"\"\" Executes", "options = self.get_option('grpc_channel_options') if options: if not isinstance(options, dict): raise", "'syncResponse' in entry and entry['syncResponse']: # Ignore: SyncResponse is sent", "establishing the gRPC connection. If None only the C(host) part", "= True self.queue_message('v', 'gRPC/gNMI connection has established successfully') def close(self):", "the remote device that listens for connections when establishing the", "host\") self._channel.close() super(Connection, self).close() # ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): \"\"\"", "del input['duration'] else: duration = 20 request = json_format.ParseDict({'subscribe': input},", "Returns: str: SetResponse message converted into JSON format \"\"\" #", "below). - Set C(HTTPS_PROXY) to specify your proxy settings (if", "else '%s:%d' % (host, port) self._timeout = self.get_option('persistent_command_timeout') certs =", "# Remove all input parameters from kwargs that are not", "does not exist => Create cPath[eleKey] = elem['key'] cPath =", "input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription' in input: for item in", "certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file certificate_path:", "remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port remote_user:", "be created/updated leaf_elem = path_elements[-1] if 'key' in leaf_elem: eleKey", "ini: - section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT", "\"\"\" transport = \"nokia.grpc.gnmi\" has_pipelining = True def __init__(self, play_context,", "'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF')", "\"\"\" Closes the active gRPC connection to the target host", "gRPC connection type. \" + \"Please run 'pip install grpcio'\"", "description: - Configures the default timeout value (in seconds) when", "remote device capabilities. This gNMI plugin has implemented suppport for", "{} cPath = cPath[eleName] # The last entry of path_elements", "['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON or JSON_IETF)\"", "else: self.queue_message('v', 'Starting insecure gRPC connection') self._channel = grpc.insecure_channel(self._target, options=options)", "json.dumps(elem['key']) eleName = '___'+eleName # Path Element has key =>", "suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: type (str):", "Context prfx = prfx[eleName] else: # Path Element does not", "return {'elem': mypath} return {} def _decodeXpath(self, path): \"\"\" Decodes", "method will establish the persistent gRPC connection, if not already", "- gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override) option to", "enter update context entry = entry['update'] else: # Get response,", "the user password used to authenticate to the remote device", "self.queue_message('v', 'gRPC connection to host %s already exist' % self._target)", "are not set input = dict(filter(lambda x: x[1], kwargs.items())) #", "gRPC - The standard mechanism to provide/set the environment in", "to gnmi_pb.Path object Parameters: xpath (str): path string using XPATH", "not provided, will run CapabilityRequest for auto-detection ini: - section:", "int description: - Configures, in seconds, the amount of time", "used for data serialization is automatically determined based on the", "= cPath[eleName] cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: #", "establish the persistent gRPC connection, if not already done. After", "that is added to all paths (XPATH syntax) mode (str):", "gNMI requests (Capabilities, Get, Set, Subscribe) requirements: - grpcio -", "elem['key'].items(): tmp += \"[%s=%s]\" % (k, v) result.append(tmp) return '/'.join(result)", "DOCUMENTATION = \"\"\" --- author: - \"<NAME> (@HansThienpondt)\" - \"<NAME>", "(or more) sub-plugin(s) for the required gRPC service(s) must be", "(in seconds) when awaiting a response after issuing a call", "XPATH to dict representation that allows conversion to gnmi_pb.Path object", "to be updated replace (list): Path/Value pairs to be replaced", "input['duration'] else: duration = 20 request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest())", "of that sub-plugin. \"\"\" transport = \"nokia.grpc.gnmi\" has_pipelining = True", "in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription' in input: for", "- Can be configured from the CLI via the C(--user)", "objecttree with value result = self._decodeVal(_upd['val']) prfx = result continue", "(dict) to define environment settings specific to gRPC - The", "= dKeys mypath.append(entry) return {'elem': mypath} return {} def _decodeXpath(self,", "= self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options =", "conversion \"\"\" mypath = [] xpath = xpath.strip('\\t\\n\\r /') if", "to authenticate to the remote device when the gRPC connection", "decoded gnmi_pb.TypedValue object Returns: (ANY): extracted data \"\"\" if 'jsonIetfVal'", "C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do not match", "AnsibleConnectionFailure(\"No compatible supported encoding found (JSON or JSON_IETF)\") else: if", "using XPATH syntax \"\"\" result = [] if 'elem' not", "used updates_only (bool): Send only updates to initial state allow_aggregation", "result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']} yield result", "'%s' requested (JSON or JSON_IETF)\" % self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding)", "'Starting insecure gRPC connection') self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC", "is added to all paths (XPATH syntax) mode (str): Mode", "grpc_channel_options: description: - Key/Value pairs (dict) to define gRPC channel", "update in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as e: if e.code()", "import json import base64 import datetime try: import grpc HAS_GRPC", "entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if 'replace' in input:", "entry does not exist => Create cPath[eleKey] = elem['key'] cPath", "JSON format \"\"\" # Remove all input parameters from kwargs", "to work the 'log_path' ansible configuration option is required to", "@ensure_connect def gnmiSubscribe(self, *args, **kwargs): \"\"\" Executes a gNMI Subscribe", "cPath = prfx elif prfx_elements: path_elements = prfx_elements cPath =", "not exist => Create prfx[eleKey] = elem['key'] prfx = prfx[eleKey]", "update = entry['update'] if 'prefix' in update: result['prefix'] = '/'+self._decodeXpath(update['prefix'])", "be sent and result will be persisted. Parameters: None Returns:", "dict representation that allows conversion to gnmi_pb.TypedValue object Parameters: data", "response = self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\"", "log (see below). - Set C(HTTPS_PROXY) to specify your proxy", "key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port", "dict using gnmi_pb.TypedValue structure for easy conversion \"\"\" value =", "is used to connect to the device is different from", "process that establishes the gRPC connection. - Set C(GRPC_VERBOSITY) and", "If the RPC does not return before the timeout exceed,", "succeeded') self._gnmiVersion = response.gNMI_version self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in", "not exist or read excpetions \"\"\" path = self.get_option('certificate_path') if", "prfx elif prfx_elements: path_elements = prfx_elements cPath = result else:", "representation converted from gnmi_pb.Path object Parameters: path (dict): decoded gnmi_pb2.Path", "Target host FQDN or IP address to establish gRPC connection.", "define environment settings specific to gRPC - The standard mechanism", "5 ini: - section: persistent_connection key: connect_timeout env: - name:", "(fallback). Parameters: type (str): Type of data that is requested:", "json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials try: response = self._stub.Get(request, metadata=auth)", "visibility about gNMI capabilities of the remote device, a gNM", "prfx: # List entry does not exist => Create prfx[eleKey]", "type: int description: - Configures, in seconds, the amount of", "set to a file path with write access. - Be", "specification for gNMI SubscribeRequest if 'mode' in input: input['mode'] =", "= [] responses = self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode'] ==", "the amount of time to wait when trying to initially", "or JSON_IETF)\") else: if self._encoding not in ['JSON_IETF', 'JSON']: raise", "auth = self._login_credentials try: output = [] responses = self._stub.Subscribe(iter([request]),", "Provides a persistent gRPC connection for gNMI API service description:", "private key file used to authenticate to the remote device", "try: response = self._stub.Set(request, metadata=auth) except grpc.RpcError as e: raise", "determined based on the remote device capabilities. This gNMI plugin", "Element exists => Change Context prfx = prfx[eleName] else: #", "\"\"\" result = [] if 'elem' not in path: return", "messages log (see below). - Set C(HTTPS_PROXY) to specify your", "not in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON", "if eleName in cPath: # Path Element exists => Change", "str: Updates received converted into JSON format \"\"\" # Remove", "Connection plugin for gRPC To use gRPC connections in Ansible", "port = self.get_option('port') self._target = host if port is None", "with open(filename, 'rb') as f: return f.read() except Exception as", "= entry['update'] else: # Get response, keep context pass prfx", "json import base64 import datetime try: import grpc HAS_GRPC =", "gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v', 'Run CapabilityRequest()')", "this, the gNMI stub will be created. To get visibility", "prfx = result continue # If path_elements has more than", "in response.supported_encodings: self._encoding = 'JSON' else: raise AnsibleConnectionFailure(\"No compatible supported", "to remote node and create gNMI stub. This method will", "try: del os.environ[key] except KeyError: # no such setting in", "API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin provides a persistent communication", "supported encoding found (JSON or JSON_IETF)\") else: if self._encoding not", "'backup_options' in input: del input['backup_options'] # Adjust input parameters to", "passed to the client process that establishes the gRPC connection.", "- section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars:", "aDict def _mergeToSingleDict(self, rawData): result = {} for entry in", "if self.connected: self.queue_message('v', 'gRPC connection to host %s already exist'", "'delete' in input: input['delete'] = [self._encodeXpath(entry) for entry in input['delete']]", "because those environment settings are not passed to the client", "plugin for gRPC To use gRPC connections in Ansible one", "binary certificate/key file Parameters: optionName(str): used to read filename from", "and create gNMI stub. This method will establish the persistent", "import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = \"\"\"", "in leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if eleName", "key => must be dict() if eleName in cPath: #", "updated replace (list): Path/Value pairs to be replaced delete (list):", "- Folder to search for certificate and key files ini:", "gnmi_pb.TypedValue object Parameters: data (ANY): data to be encoded as", "to the device is different from the subject name that", "name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file certificate_chain_file: description: - The", "in input['delete']] if 'update' in input: for entry in input['update']:", "persistent_log_messages: type: boolean description: - This flag will enable logging", "\"\"\" Executes a gNMI Set request Encoding that is used", "=> must be list() if eleName in cPath: # Path", "gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v', 'gRPC/gNMI connection has established successfully')", "dict(filter(lambda x: x[1], kwargs.items())) # Backup options are not to", "return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args, **kwargs): \"\"\" Executes a", "in log file. default: False ini: - section: persistent_connection key:", "if eleName not in cPath: cPath[eleName] = {} cPath =", "provide/set the environment in Ansible cannot be used, because those", "= prfx[eleKey] else: # Path Element does not exist =>", "gRPC connection. If None only the C(host) part will be", "input['delete'] = [self._encodeXpath(entry) for entry in input['delete']] if 'update' in", "'log_path' ansible configuration option is required to be set to", "If the value is None, no certificate chain is used.", "self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version self._yangModels =", "entry) and ('elem' in entry['prefix']): prfx_elements = entry['prefix']['elem'] else: prfx_elements", "is requested: ALL, CONFIG, STATE prefix (str): Path prefix that", "('username', self.get_option('remote_user')), ('password', self.get_option('password')) ] host = self.get_option('host') port =", "DSCP marking that is used updates_only (bool): Send only updates", "aDict[key].values()] del aDict[key] else: if isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key])", "Ignore: SyncResponse is sent after initial update break elif 'update'", "except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) return json_format.MessageToJson(response)", "JSON_IETF)\" % self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v',", "allow_aggregation (bool): Aggregate elements marked as eligible for aggregation Returns:", "to call a specific method of that sub-plugin. \"\"\" transport", "open(filename, 'rb') as f: return f.read() except Exception as exc:", "SubscribeRequest if 'mode' in input: input['mode'] = input['mode'].upper() input['encoding'] =", "dKeys mypath.append(entry) return {'elem': mypath} return {} def _decodeXpath(self, path):", "for value: %s\" % json.dumps(val)) def _dictToList(self, aDict): for key", "the value is None it reads the root certificates from", "else val for val in aDict[key].values()] del aDict[key] else: if", "of path_elements is the leaf element # that needs to", "when the gRPC connection is first established. If the remote_user", "If not provided, will run CapabilityRequest for auto-detection ini: -", "for path in input['path']] if 'type' in input: input['type'] =", "gRPC connection to target host\") self._channel.close() super(Connection, self).close() # -----------------------------------------------------------------------", "def _decodeVal(self, val): \"\"\" Decodes value from dict representation converted", "gNMI Capabilities request Parameters: None Returns: str: gNMI capabilities converted", "'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val: return", "in entry['update']: if 'val' not in _upd: # requested path", "from ansible.plugins.connection import ensure_connect from google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb", "as f: return f.read() except Exception as exc: raise AnsibleConnectionFailure(", "Parameters: None Returns: None \"\"\" if self.connected: self.queue_message('v', 'gRPC connection", "plugin does not support encoding for value: %s\" % json.dumps(val))", "CapabilityRequest will be sent and result will be persisted. Parameters:", "= {'name': e.split(\"[\", 1)[0]} eKeys = re.findall('\\[(.*?)\\]', e) dKeys =", "be dict() if eleName in cPath: # Path Element exists", "= options.items() if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting", "install protobuf'\" ) if not HAS_GRPC: raise AnsibleError( \"grpcio is", "300 ini: - section: persistent_connection key: command_timeout env: - name:", "for msg in rawData: entry = json_format.MessageToDict(msg) if 'syncResponse' in", "first established. If the remote_user is not specified, the connection", "cert/keys file %s: %s' % (filename, exc) ) else: raise", "Decodes XPATH from dict representation converted from gnmi_pb.Path object Parameters:", "in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'delete' in input: input['delete']", "cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else:", "= json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try: output =", "will use the username of the logged in user. -", "connection to the remote device is completed, the connection will", "updates break elif 'timestamp' not in entry: # Subscribe response,", "key => must be dict() if eleName in prfx: #", "exceed, an error is generated and the connection is closed.", "in input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if 'replace'", "The PEM encoded certificate chain file used to create a", "data (ANY): data to be encoded as gnmi_pb.TypedValue object Returns:", "# Ignore: SyncResponse is sent after initial update pass elif", "the FQDN or IPv4 address that is used to connect", "self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v', 'gRPC/gNMI connection has", "entry, # we need to create/navigate to the specified subcontext", "- name: ansible_certificate_path gnmi_encoding: description: - Encoding used for gNMI", "encoded root certificate file used to create a SSL-enabled channel,", "response received from target device in the ansible log file.", "input: input['mode'] = input['mode'].upper() input['encoding'] = self._encoding_value if 'prefix' in", "Set, Subscribe) requirements: - grpcio - protobuf options: host: description:", "the remote device when first establishing the grpc connection. ini:", "gNMI capabilities of the remote device, a gNM CapabilityRequest will", "attributes if 'duration' in input: duration = input['duration'] del input['duration']", "= result if ('prefix' in entry) and ('elem' in entry['prefix']):", "AnsibleConnectionFailure: file does not exist or read excpetions \"\"\" path", "syntax \"\"\" result = [] if 'elem' not in path:", "+= \"[%s=%s]\" % (k, v) result.append(tmp) return '/'.join(result) def _encodeVal(self,", "result continue # If path_elements has more than just a", "to be deleted Returns: str: SetResponse message converted into JSON", "code for log forwarding of gRPC related log messages to", "elif 'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible gNMI", "of enabling this option as it could create a security", "raise AnsibleConnectionFailure(\"%s\" % e) return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args,", "device is different from the subject name that is provided", "List of paths (str) to be captured Returns: str: GetResponse", "{} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file')", "# Path Element exists => Change Context prfx = prfx[eleName]", "pass prfx = result if ('prefix' in entry) and ('elem'", "cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] = elem['key'] cPath", "GetResponse message converted into JSON format \"\"\" # Remove all", "import ensure_connect from google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2", "input['encoding'] = self._encoding_value if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix'])", "x: x[1], kwargs.items())) # Adjust input parameters to match specification", "= prfx_elements cPath = result else: # No path at", "generated and the connection is closed. default: 300 ini: -", "automatically determined based on the remote device capabilities. This gNMI", "device that listens for connections when establishing the gRPC connection.", "vulnerability by logging sensitive information in log file. default: False", "ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection", "in path_elements: entry = {'name': e.split(\"[\", 1)[0]} eKeys = re.findall('\\[(.*?)\\]',", "for gNMI SetRequest if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix'])", "/') if xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e in", "request Parameters: None Returns: str: gNMI capabilities converted into JSON", "# RPC timed out, which is okay pass else: raise", "- Configures the user password used to authenticate to the", "cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: # Path Element", "section: grpc_connection key: certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: -", "establishes the gRPC connection. - Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to", "gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin provides a persistent", "service(s) must be loaded. To load gRPC sub-plugins use the", "gnmi_pb2.SetRequest()) auth = self._login_credentials try: response = self._stub.Set(request, metadata=auth) except", "input['prefix'] = self._encodeXpath(input['prefix']) if 'delete' in input: input['delete'] = [self._encodeXpath(entry)", "eleKey = json.dumps(elem['key']) eleName = '___'+eleName # Path Element has", "Send only updates to initial state allow_aggregation (bool): Aggregate elements", "be used. ini: - section: defaults key: remote_port env: -", "new_stdin, *args, **kwargs): super(Connection, self).__init__( play_context, new_stdin, *args, **kwargs )", "value} else: return {'jsonVal': value} def _decodeVal(self, val): \"\"\" Decodes", "{} if not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment must be a", "elem: eleKey = json.dumps(elem['key']) eleName = '___'+eleName # Path Element", "= {} cPath = cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']]", "= self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials try:", "filename ) return None def _connect(self): \"\"\" Establish gRPC connection", "grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE Subscription timed", "new_stdin, *args, **kwargs ) self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\")) if not", "Configures the default timeout value (in seconds) when awaiting a", "is needed, because the TLS validates hostname or IP address", "to match specification for gNMI SubscribeRequest if 'mode' in input:", "to create/navigate to the specified subcontext for elem in path_elements[:-1]:", "output.append(update) except grpc.RpcError as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if", "a security vulnerability by logging sensitive information in log file.", "a default location chosen by gRPC at runtime. ini: -", "= self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials try:", "in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON or", "get visibility about gNMI capabilities of the remote device, a", "Returns: str: gNMI capabilities converted into JSON format \"\"\" request", "interact with the gNMI service. - OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md", "% e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect def", "enabling this option as it could create a security vulnerability", "'ONCE': responses = [json_format.MessageToDict(response) for response in responses] output =", "marking that is used updates_only (bool): Send only updates to", "Returns: (ANY): extracted data \"\"\" if 'jsonIetfVal' in val: return", "cPath: # List entry does not exist => Create cPath[eleKey]", "request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials try: response =", "address that is used to connect to the device is", "object Parameters: path (dict): decoded gnmi_pb2.Path object Returns: (str): path", "This gNMI plugin has implemented suppport for JSON_IETF (preferred) and", "if 'syncResponse' in entry and entry['syncResponse']: # Ignore: SyncResponse is", "provided in the host certificate. This is needed, because the", "self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:", "ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description: -", "not set input = dict(filter(lambda x: x[1], kwargs.items())) # Backup", "not already done. After this, the gNMI stub will be", "the persistent messages log (see below). - Set C(HTTPS_PROXY) to", "\"<NAME> (@HansThienpondt)\" - \"<NAME> (@wisotzky)\" connection: gnmi short_description: Provides a", "return {} def _decodeXpath(self, path): \"\"\" Decodes XPATH from dict", "description: - The PEM encoded certificate chain file used to", "from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text import to_text class Connection(NetworkConnectionBase):", "if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE': raise AnsibleConnectionFailure(\"gNMI", "port: type: int description: - Specifies the port on the", "import os import re import json import base64 import datetime", "= '___'+eleName # Path Element has key => must be", "not exist => Create cPath[eleKey] = elem['key'] cPath = cPath[eleKey]", "json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text import to_text class", "gRPC service. It provide wrappers for gNMI requests (Capabilities, Get,", "XPATH from dict representation converted from gnmi_pb.Path object Parameters: path", "to initially establish a persistent connection. If this value expires", "specific method of that sub-plugin. \"\"\" transport = \"nokia.grpc.gnmi\" has_pipelining", "raise AnsibleConnectionFailure(\"Ansible gNMI plugin does not support encoding for value:", "== 'JSON_IETF': return {'jsonIetfVal': value} else: return {'jsonVal': value} def", "*args, **kwargs): \"\"\" Executes a gNMI Subscribe request Encoding that", "'key' in elem: for k, v in elem['key'].items(): tmp +=", "self._login_credentials try: output = [] responses = self._stub.Subscribe(iter([request]), duration, metadata=auth)", "grpc connection. ini: - section: grpc_connection key: private_key_file env: -", "break elif 'update' not in entry: # Ignore: entry without", "updates to initial state allow_aggregation (bool): Aggregate elements marked as", "the host certificate. This is needed, because the TLS validates", "port) self._timeout = self.get_option('persistent_command_timeout') certs = {} certs['root_certificates'] = self.readFile('root_certificates_file')", "gNMI stub. This method will establish the persistent gRPC connection,", "output['response']: item['path'] = self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self,", "leaf element # that needs to be created/updated leaf_elem =", "into JSON format \"\"\" # Remove all input parameters from", "call to a RPC. If the RPC does not return", "%s\" % (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI stub') self._stub =", "json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible", "(dict): dict using gnmi_pb.TypedValue structure for easy conversion \"\"\" value", "communication - Must be either JSON or JSON_IETF - If", "path_elements = _upd['path']['elem'] cPath = prfx elif prfx_elements: path_elements =", "name: ansible_password - name: ansible_ssh_pass private_key_file: description: - The PEM", "break if os.path.isfile(filename): try: with open(filename, 'rb') as f: return", "= json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials try: response = self._stub.Get(request,", "gRPC connection type. \" + \"Please run 'pip install protobuf'\"", "datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for", "a gNMI Capabilities request Parameters: None Returns: str: gNMI capabilities", "(absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = \"\"\" ---", "connection to remote node and create gNMI stub. This method", "be persisted. Parameters: None Returns: None \"\"\" if self.connected: self.queue_message('v',", "in path: return \"\" for elem in path['elem']: tmp =", "setup gRPC logging. Need to add code for log forwarding", "certs = {} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key']", "username used to authenticate to the remote device when the", "aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else val for val", "but thats ok pass self._login_credentials = [ ('username', self.get_option('remote_user')), ('password',", "C(HTTPS_PROXY) to specify your proxy settings (if needed). - Set", "added to all paths (XPATH syntax) paths (list): List of", "user %s to %s\" % (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI", "root_certificates_file: description: - The PEM encoded root certificate file used", "RPC. If the RPC does not return before the timeout", "mechanism to provide/set the environment in Ansible cannot be used,", "target host\") self._channel.close() super(Connection, self).close() # ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'):", "run 'pip install grpcio'\" ) self._connected = False def readFile(self,", "prfx = prfx[eleKey] else: # Path Element does not exist", "of the sub-plugin to be registered. After loading the sub-plugin,", "gRPC connection. vars: - name: ansible_password - name: ansible_ssh_pass private_key_file:", "elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding = 'JSON' else: raise AnsibleConnectionFailure(\"No", "return self._dictToList(result) def _simplifyUpdates(self, rawData): for msg in rawData: entry", "to add code for log forwarding of gRPC related log", "a gNM CapabilityRequest will be sent and result will be", "\"\"\" Establish gRPC connection to remote node and create gNMI", "- name: ansible_ssh_pass private_key_file: description: - The PEM encoded private", "duration (int): timeout, to stop receiving qos (int): DSCP marking", "input parameters to match specification for gNMI SubscribeRequest if 'mode'", "ansible_ssh_pass private_key_file: description: - The PEM encoded private key file", "'elem' not in path: return \"\" for elem in path['elem']:", "= self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self,", "the value is None, no certificate chain is used. ini:", "FQDN or IPv4 address that is used to connect to", "ini: - section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES", ") return None def _connect(self): \"\"\" Establish gRPC connection to", "(JSON or JSON_IETF)\" % self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected =", "k, v in elem['key'].items(): tmp += \"[%s=%s]\" % (k, v)", "established. If the remote_user is not specified, the connection will", "for certificate and key files ini: - section: grpc_connection key:", "if dKeys: entry['key'] = dKeys mypath.append(entry) return {'elem': mypath} return", "(Capabilities, Get, Set, Subscribe) requirements: - grpcio - protobuf options:", "The PEM encoded root certificate file used to create a", "will run CapabilityRequest for auto-detection ini: - section: grpc_connection key:", "is None it reads the root certificates from a default", "name: ansible_certificate_chain_file certificate_path: description: - Folder to search for certificate", "initially establish a persistent connection. If this value expires before", "# Ignore: Invalid message format pass # ----------------------------------------------------------------------- @ensure_connect def", "default: inventory_hostname vars: - name: ansible_host port: type: int description:", "I(ssl_target_name_override) option to override the TLS subject or subjectAltName (only", "environment settings specific to gRPC - The standard mechanism to", "will be sent and result will be persisted. Parameters: None", "Returns: str: Updates received converted into JSON format \"\"\" #", "input: for item in input['subscription']: item['path'] = self._encodeXpath(item['path']) # Extract", "continue # If path_elements has more than just a single", "item in input['subscription']: item['path'] = self._encodeXpath(item['path']) # Extract duration from", "service description: - This gRPC plugin provides methods to interact", "because the TLS validates hostname or IP address to avoid", "gnmi_pb.TypedValue object Returns: (dict): dict using gnmi_pb.TypedValue structure for easy", "re.findall('\\[(.*?)\\]', e) dKeys = dict(x.split('=', 1) for x in eKeys)", "self._login_credentials try: response = self._stub.Set(request, metadata=auth) except grpc.RpcError as e:", "and response received from target device in the ansible log", "if input['mode'] == 'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE Subscription timed out\")", "connection. If this value expires before the connection to the", "in entry: result = {} update = entry['update'] if 'prefix'", "f: return f.read() except Exception as exc: raise AnsibleConnectionFailure( 'Failed", "= self._login_credentials try: output = [] responses = self._stub.Subscribe(iter([request]), duration,", "format \"\"\" # Remove all input parameters from kwargs that", "without updates break elif 'timestamp' not in entry: # Subscribe", "{self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']} yield result else: #", "= self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self, *args, **kwargs):", "Set request Encoding that is used for data serialization is", "- name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file root_certificates_file: description: -", "else: # Path Element does not exist => Create prfx[eleName]", "json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args, **kwargs): \"\"\" Executes a gNMI", "('password', self.get_option('password')) ] host = self.get_option('host') port = self.get_option('port') self._target", "os.path.join(entry, filename) break if os.path.isfile(filename): try: with open(filename, 'rb') as", "= elem['key'] prfx = prfx[eleKey] else: # Path Element does", "not in entry: # Subscribe response, enter update context entry", "root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file certificate_chain_file:", "self._encodeXpath(input['prefix']) if 'subscription' in input: for item in input['subscription']: item['path']", "else: # Path Element does not exist => Create cPath[eleName]", "entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest())", "if 'timestamp' in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in", "object Parameters: val (dict): decoded gnmi_pb.TypedValue object Returns: (ANY): extracted", "implemented suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: prefix", "requested: ALL, CONFIG, STATE prefix (str): Path prefix that is", "if options: if not isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options must be", "Element hasn't key => must be dict() if eleName in", "password used to authenticate to the remote device when first", "define gRPC channel options to be used - gRPC reference", "import gnmi_pb2 from ansible.module_utils._text import to_text class Connection(NetworkConnectionBase): \"\"\" Connection", "= self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args, **kwargs):", "log file. default: False ini: - section: persistent_connection key: log_messages", "[self._encodeXpath(path) for path in input['path']] if 'type' in input: input['type']", "not HAS_PROTOBUF: raise AnsibleError( \"protobuf is required to use gRPC", "Ignore: entry without updates break elif 'timestamp' not in entry:", "in entry['prefix']): prfx_elements = entry['prefix']['elem'] else: prfx_elements = [] for", "request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try: output", "gNMI capabilities converted into JSON format \"\"\" request = gnmi_pb2.CapabilityRequest()", "input: input['path'] = [self._encodeXpath(path) for path in input['path']] if 'type'", "a persistent connection. If this value expires before the connection", "not in prfx: # List entry does not exist =>", "not exist' % filename ) return None def _connect(self): \"\"\"", "gnmi_pb2 from ansible.module_utils._text import to_text class Connection(NetworkConnectionBase): \"\"\" Connection plugin", "SyncResponse is sent after initial update break elif 'update' not", "message format pass # ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): \"\"\" Executes", "cPath = cPath[eleName] cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else:", "return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self, *args, **kwargs): \"\"\" Executes", "try: with open(filename, 'rb') as f: return f.read() except Exception", "or certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC connection') creds", "del input['backup'] if 'backup_options' in input: del input['backup_options'] # Adjust", "def gnmiSet(self, *args, **kwargs): \"\"\" Executes a gNMI Set request", "xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e in path_elements: entry", "self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC connection established for user", "keep context pass prfx = result if ('prefix' in entry)", "{sub-plugin name}__{method name} to call a specific method of that", "- The PEM encoded private key file used to authenticate", "# Extract duration from input attributes if 'duration' in input:", "options=options) self.queue_message('v', \"gRPC connection established for user %s to %s\"", "including the underlying transport (TLS). - The plugin binds to", "out, which is okay pass else: raise AnsibleConnectionFailure(\"%s\" % e)", "# Path Element does not exist => Create cPath[eleName] =", "Connection(NetworkConnectionBase): \"\"\" Connection plugin for gRPC To use gRPC connections", "in input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request =", "connection to the target host Parameters: None Returns: None \"\"\"", "{} update = entry['update'] if 'prefix' in update: result['prefix'] =", "persistent messages log (see below). - Set C(HTTPS_PROXY) to specify", "raise AnsibleConnectionFailure(\"grpc_channel_options must be a dict\") options = options.items() if", "if 'key' in elem: eleKey = json.dumps(elem['key']) eleName = '___'+eleName", "Path Element hasn't key => must be dict() if eleName", "load gRPC sub-plugins use the method `register_service()` with the name", "access. - Be sure to fully understand the security implications", "replace the objecttree with value result = self._decodeVal(_upd['val']) prfx =", "persistent_command_timeout: type: int description: - Configures the default timeout value", "prfx: # Path Element exists => Change Context prfx =", "reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override) option to override the", "requests (Capabilities, Get, Set, Subscribe) requirements: - grpcio - protobuf", "Change Context cPath = cPath[eleName] if eleKey not in cPath:", "to a RPC. If the RPC does not return before", "key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout", "It provide wrappers for gNMI requests (Capabilities, Get, Set, Subscribe)", "ansible_grpc_channel_options grpc_environment: description: - Key/Value pairs (dict) to define environment", "more than just a single entry, # we need to", "input: input['delete'] = [self._encodeXpath(entry) for entry in input['delete']] if 'update'", "isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment must be a dict\") for key", "already done. After this, the gNMI stub will be created.", "wrapper available that consumes the attribute name {sub-plugin name}__{method name}", "be deleted Returns: str: SetResponse message converted into JSON format", "elem['key'] cPath = cPath[eleKey] else: # Path Element hasn't key", "of dict): Subscription specification (path, interval, submode) duration (int): timeout,", "name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages \"\"\" import os import", "env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type:", "in path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry, filename) break", "= result continue # If path_elements has more than just", "exist => Create cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey]", "that needs to be created/updated leaf_elem = path_elements[-1] if 'key'", "closed. default: 300 ini: - section: persistent_connection key: command_timeout env:", "persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name:", "when trying to initially establish a persistent connection. If this", "Create cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] = elem['key']", "(str): path string using XPATH syntax Returns: (dict): path dict", "= True except ImportError: HAS_GRPC = False try: from google", "('elem' in entry['prefix']): prfx_elements = entry['prefix']['elem'] else: prfx_elements = []", "response.supported_encodings: self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding =", "self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self, rawData): for msg in rawData:", "= type DOCUMENTATION = \"\"\" --- author: - \"<NAME> (@HansThienpondt)\"", "be encoded as gnmi_pb.TypedValue object Returns: (dict): dict using gnmi_pb.TypedValue", "Returns: str: GetResponse message converted into JSON format \"\"\" #", "gRPC including the underlying transport (TLS). - The plugin binds", "CapabilityRequest for auto-detection ini: - section: grpc_connection key: gnmi_encoding env:", "for JSON_IETF (preferred) and JSON (fallback). Parameters: prefix (str): Path", "specification (path, interval, submode) duration (int): timeout, to stop receiving", "input parameters to match specification for gNMI SetRequest if 'prefix'", "json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if eleName not in cPath: cPath[eleName]", "path_elements = prfx_elements cPath = result else: # No path", "license # SPDX-License-Identifier: BSD-3-Clause # from __future__ import (absolute_import, division,", "used to create a SSL-enabled channel. If the value is", "is first established. If the remote_user is not specified, the", "if not filename.startswith('/'): for entry in path.split(':'): if os.path.isfile(os.path.join(entry, filename)):", "self._target = host if port is None else '%s:%d' %", "is okay pass else: raise AnsibleConnectionFailure(\"%s\" % e) return json.dumps(output,", "If path_elements has more than just a single entry, #", "pairs (dict) to define environment settings specific to gRPC -", "in case the default TLS ciphers do not match what", "name {sub-plugin name}__{method name} to call a specific method of", "Create prfx[eleName] = {} prfx = prfx[eleName] for _upd in", "gRPC connection to remote node and create gNMI stub. This", "= self.get_option('persistent_command_timeout') certs = {} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] =", "json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials try: response = self._stub.Set(request, metadata=auth)", "%s does not exist' % filename ) return None def", "to be registered. After loading the sub-plugin, Ansible modules can", "Change Context prfx = prfx[eleName] if eleKey not in prfx:", "return None def _connect(self): \"\"\" Establish gRPC connection to remote", "add code for log forwarding of gRPC related log messages", "file %s does not exist' % filename ) return None", "= re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e in path_elements: entry = {'name':", "entry without updates break elif 'timestamp' not in entry: #", "False ini: - section: persistent_connection key: log_messages env: - name:", "Key/Value pairs (dict) to define gRPC channel options to be", "connection. If None only the C(host) part will be used.", "'timestamp' in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update:", "optionName(str): used to read filename from options Returns: File content", "to read filename from options Returns: File content Raises: AnsibleConnectionFailure:", "specified, the connection will use the username of the logged", "Ansible modules can call methods provided by that sub-plugin. There", "(str): Type of data that is requested: ALL, CONFIG, STATE", "for response in responses] output = self._mergeToSingleDict(responses) else: for update", "for e in path_elements: entry = {'name': e.split(\"[\", 1)[0]} eKeys", "(list): List of paths (str) to be captured Returns: str:", "configured from the CLI via the C(--user) or C(-u) options.", "persistent connection. If this value expires before the connection to", "elif prfx_elements: path_elements = prfx_elements cPath = result else: #", "path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for e in path_elements: entry =", "the remote device capabilities. This gNMI plugin has implemented suppport", "Create cPath[eleName] = {} cPath = cPath[eleName] # The last", "= [self._encodeXpath(path) for path in input['path']] if 'type' in input:", "in responses] output = self._mergeToSingleDict(responses) else: for update in self._simplifyUpdates(responses):", "Provide the I(ssl_target_name_override) option to override the TLS subject or", "HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF = False from ansible.errors", "the gNMI stub will be created. To get visibility about", "stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if not self._encoding:", "method `register_service()` with the name of the sub-plugin to be", "= base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF': return {'jsonIetfVal': value} else:", "% e) return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args, **kwargs): \"\"\"", "# ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): \"\"\" Executes a gNMI Capabilities", "the remote device, a gNM CapabilityRequest will be sent and", "encoding found (JSON or JSON_IETF)\") else: if self._encoding not in", "('elem' in _upd['path']): path_elements = _upd['path']['elem'] cPath = prfx elif", "current environment, but thats ok pass self._login_credentials = [ ('username',", "result = {} update = entry['update'] if 'prefix' in update:", "a dict\") for key in grpcEnv: if grpcEnv[key]: os.environ[key] =", "self.get_option('host') port = self.get_option('port') self._target = host if port is", "from dict representation converted from gnmi_pb.Path object Parameters: path (dict):", "host FQDN or IP address to establish gRPC connection. default:", "- name: ansible_root_certificates_file certificate_chain_file: description: - The PEM encoded certificate", "key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout", "# Adjust input parameters to match specification for gNMI SetRequest", "e: raise AnsibleConnectionFailure(\"%s\" % e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output,", "= {} prfx = prfx[eleName] prfx[eleKey] = elem['key'] prfx =", "to authenticate to the remote device when first establishing the", "prfx[eleName] if eleKey not in prfx: # List entry does", "the command executed and response received from target device in", "else: for update in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as e:", "must be a dict\") options = options.items() if certs['root_certificates'] or", "proxy settings (if needed). - Set C(GRPC_SSL_CIPHER_SUITES) in case the", "self._encoding == 'JSON_IETF': return {'jsonIetfVal': value} else: return {'jsonVal': value}", "the remote device is completed, the connection will fail. default:", "provides a persistent communication channel to remote devices using gRPC", "AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON or JSON_IETF)\" % self._encoding) self._encoding_value", "_decodeVal(self, val): \"\"\" Decodes value from dict representation converted from", "prfx[eleName] prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: # Path", "last entry of path_elements is the leaf element # that", "self._decodeXpath(output['prefix']) for item in output['response']: item['path'] = self._decodeXpath(item['path']) return json.dumps(output,", "port is None else '%s:%d' % (host, port) self._timeout =", "not passed to the client process that establishes the gRPC", "elem: for k, v in elem['key'].items(): tmp += \"[%s=%s]\" %", "+ \"Please run 'pip install protobuf'\" ) if not HAS_GRPC:", "methods to interact with the gNMI service. - OpenConfig gNMI", "# Backup options are not to be used in gNMI", "= [self._dictToList(val) if isinstance(val, dict) else val for val in", "= False try: from google import protobuf HAS_PROTOBUF = True", "= [json_format.MessageToDict(response) for response in responses] output = self._mergeToSingleDict(responses) else:", "grpc_connection key: root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name:", "to remote devices using gRPC including the underlying transport (TLS).", "This gRPC plugin provides methods to interact with the gNMI", "data serialization is automatically determined based on the remote device", "(STREAM, ONCE) subscription (list of dict): Subscription specification (path, interval,", "default: 300 ini: - section: persistent_connection key: command_timeout env: -", "metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version self._yangModels = response.supported_models", "of subscription (STREAM, ONCE) subscription (list of dict): Subscription specification", "To load gRPC sub-plugins use the method `register_service()` with the", "be dict() if eleName in prfx: # Path Element exists", "None def _connect(self): \"\"\" Establish gRPC connection to remote node", "input attributes if 'duration' in input: duration = input['duration'] del", "xpath (str): path string using XPATH syntax Returns: (dict): path", "entry['update']: if 'val' not in _upd: # requested path without", "PEM encoded root certificate file used to create a SSL-enabled", "the specified subcontext for elem in path_elements[:-1]: eleName = elem['name']", "device when first establishing the grpc connection. ini: - section:", "gnmi_pb.TypedValue object Returns: (ANY): extracted data \"\"\" if 'jsonIetfVal' in", "gNMI Get request Encoding that is used for data serialization", "element # that needs to be created/updated leaf_elem = path_elements[-1]", "None else '%s:%d' % (host, port) self._timeout = self.get_option('persistent_command_timeout') certs", "in Ansible cannot be used, because those environment settings are", "from the subject name that is provided in the host", "- The plugin binds to the gNMI gRPC service. It", "exist' % filename ) return None def _connect(self): \"\"\" Establish", "Set C(HTTPS_PROXY) to specify your proxy settings (if needed). -", "input: for entry in input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] =", "if 'prefix' in output: output['prefix'] = self._decodeXpath(output['prefix']) for item in", "elem in path['elem']: tmp = elem['name'] if 'key' in elem:", "=> Create prfx[eleName] = {} prfx = prfx[eleName] prfx[eleKey] =", "that is used updates_only (bool): Send only updates to initial", "entry['val'] = self._encodeVal(entry['val']) if 'replace' in input: for entry in", "if not isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options must be a dict\")", "env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description:", "vars: - name: ansible_certificate_path gnmi_encoding: description: - Encoding used for", "ansible_port remote_user: description: - The username used to authenticate to", "those environment settings are not passed to the client process", "after issuing a call to a RPC. If the RPC", "HAS_GRPC = False try: from google import protobuf HAS_PROTOBUF =", "connection. - Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging.", "host if port is None else '%s:%d' % (host, port)", "(list of dict): Subscription specification (path, interval, submode) duration (int):", "at runtime. ini: - section: grpc_connection key: root_certificates_file env: -", "self.get_option('certificate_path') if not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName)", "command executed and response received from target device in the", "the device is different from the subject name that is", "_upd) and ('elem' in _upd['path']): path_elements = _upd['path']['elem'] cPath =", "self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding = 'JSON_IETF'", "we need to create/navigate to the specified subcontext for elem", "the required gRPC service(s) must be loaded. To load gRPC", "response.gNMI_version self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding =", "cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self, rawData): for msg", "grpcio'\" ) self._connected = False def readFile(self, optionName): \"\"\" Reads", "= self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self, rawData): result = {}", "location chosen by gRPC at runtime. ini: - section: grpc_connection", "that listens for connections when establishing the gRPC connection. If", "to create a SSL-enabled channel. If the value is None,", "the gNMI gRPC service. It provide wrappers for gNMI requests", "for gNMI API service description: - This gRPC plugin provides", "else: if self._encoding not in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding", "of the logged in user. - Can be configured from", "object Parameters: data (ANY): data to be encoded as gnmi_pb.TypedValue", "awaiting a response after issuing a call to a RPC.", "persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name:", "try: response = self._stub.Get(request, metadata=auth) except grpc.RpcError as e: raise", "name: ansible_root_certificates_file certificate_chain_file: description: - The PEM encoded certificate chain", "the C(--user) or C(-u) options. ini: - section: defaults key:", "(XPATH syntax) paths (list): List of paths (str) to be", "self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth =", "= self._encodeXpath(input['prefix']) if 'delete' in input: input['delete'] = [self._encodeXpath(entry) for", "to the gNMI gRPC service. It provide wrappers for gNMI", "is required to be set to a file path with", "Encodes XPATH to dict representation that allows conversion to gnmi_pb.Path", "# Ignore: SyncResponse is sent after initial update break elif", "= self._decodeXpath(output['prefix']) for item in output['response']: item['path'] = self._decodeXpath(item['path']) return", "representation that allows conversion to gnmi_pb.TypedValue object Parameters: data (ANY):", "options: host: description: - Target host FQDN or IP address", "filename) break if os.path.isfile(filename): try: with open(filename, 'rb') as f:", "entry and entry['syncResponse']: # Ignore: SyncResponse is sent after initial", "entry of path_elements is the leaf element # that needs", "- name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures the", "does not exist => Create prfx[eleKey] = elem['key'] prfx =", "connection type. \" + \"Please run 'pip install protobuf'\" )", "to read cert/keys file %s: %s' % (filename, exc) )", "used). The option must be provided in cases, when the", "ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file root_certificates_file: description: - The PEM", "persistent_connect_timeout: type: int description: - Configures, in seconds, the amount", "'duration' in input: duration = input['duration'] del input['duration'] else: duration", "easy conversion \"\"\" value = base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF':", "datetime try: import grpc HAS_GRPC = True except ImportError: HAS_GRPC", "Invalid message format pass # ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): \"\"\"", "amount of time to wait when trying to initially establish", "target device in the ansible log file. For this option", "no certificate chain is used. ini: - section: grpc_connection key:", "log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages \"\"\"", "default location chosen by gRPC at runtime. ini: - section:", "else: raise AnsibleConnectionFailure(\"Ansible gNMI plugin does not support encoding for", "else: raise AnsibleConnectionFailure(\"No compatible supported encoding found (JSON or JSON_IETF)\")", "settings (if needed). - Set C(GRPC_SSL_CIPHER_SUITES) in case the default", "- Configures the default timeout value (in seconds) when awaiting", "stop receiving qos (int): DSCP marking that is used updates_only", "from google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text", "os import re import json import base64 import datetime try:", "in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal']))", "result else: # No path at all, replace the objecttree", "'timestamp' not in entry: # Subscribe response, enter update context", "grpc_connection key: certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH vars: - name:", "as exc: raise AnsibleConnectionFailure( 'Failed to read cert/keys file %s:", "# ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): \"\"\" Encodes XPATH to dict", "raise AnsibleConnectionFailure(\"%s\" % e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode()", "CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest()", "to dict representation that allows conversion to gnmi_pb.TypedValue object Parameters:", "specify your proxy settings (if needed). - Set C(GRPC_SSL_CIPHER_SUITES) in", "the sub-plugin, Ansible modules can call methods provided by that", "by logging sensitive information in log file. default: False ini:", "Get response, keep context pass prfx = result if ('prefix'", "input = dict(filter(lambda x: x[1], kwargs.items())) # Backup options are", "= self._encodeVal(entry['val']) if 'replace' in input: for entry in input['replace']:", "method of that sub-plugin. \"\"\" transport = \"nokia.grpc.gnmi\" has_pipelining =", "is added to all paths (XPATH syntax) update (list): Path/Value", "with the gNMI service. - OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md -", "= str(grpcEnv[key]) else: try: del os.environ[key] except KeyError: # no", "timeout, to stop receiving qos (int): DSCP marking that is", "AnsibleError from ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection import ensure_connect from", "= cPath[eleKey] else: # Path Element does not exist =>", "prefix that is added to all paths (XPATH syntax) mode", "if not HAS_PROTOBUF: raise AnsibleError( \"protobuf is required to use", "self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode'] == 'ONCE': responses = [json_format.MessageToDict(response)", "= to_text(kwargs.get(\"task_uuid\", \"\")) if not HAS_PROTOBUF: raise AnsibleError( \"protobuf is", "to override the TLS subject or subjectAltName (only in the", "= grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC connection established for user %s", "% (filename, exc) ) else: raise AnsibleConnectionFailure( 'Cert/keys file %s", "to the remote device when first establishing the grpc connection.", "the gRPC connection. - Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup", "gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings:", "= cPath[eleName] # The last entry of path_elements is the", "datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output: output['prefix'] = self._decodeXpath(output['prefix']) for item", "= json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output: output['prefix']", "- grpcio - protobuf options: host: description: - Target host", "Parameters: prefix (str): Path prefix that is added to all", "'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding = 'JSON' else: raise", "cPath: cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val'])", "if ('prefix' in entry) and ('elem' in entry['prefix']): prfx_elements =", "from ansible.module_utils._text import to_text class Connection(NetworkConnectionBase): \"\"\" Connection plugin for", "= prfx[eleKey] else: # Path Element hasn't key => must", "added to all paths (XPATH syntax) update (list): Path/Value pairs", "create a SSL-enabled channel. If the value is None, no", "in the host certificate. This is needed, because the TLS", "# No path at all, replace the objecttree with value", "subscription (list of dict): Subscription specification (path, interval, submode) duration", "grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else: try: del os.environ[key] except KeyError:", "=> Create cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] =", "elif ('path' in _upd) and ('elem' in _upd['path']): path_elements =", "remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password:", "return grpcEnv = self.get_option('grpc_environment') or {} if not isinstance(grpcEnv, dict):", "gNMI SetRequest if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if", "- name: ansible_gnmi_encoding grpc_channel_options: description: - Key/Value pairs (dict) to", "prfx = prfx[eleName] for _upd in entry['update']: if 'val' not", "name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_log_messages: type: boolean description:", "= {} prfx = prfx[eleName] for _upd in entry['update']: if", "= xpath.strip('\\t\\n\\r /') if xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath) for", "val (dict): decoded gnmi_pb.TypedValue object Returns: (ANY): extracted data \"\"\"", "the C(host) part will be used. ini: - section: defaults", "Returns: File content Raises: AnsibleConnectionFailure: file does not exist or", "% filename ) return None def _connect(self): \"\"\" Establish gRPC", "(dict) to define gRPC channel options to be used -", "parameters from kwargs that are not set input = dict(filter(lambda", "self._mergeToSingleDict(responses) else: for update in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as", "entry['update'] else: # Get response, keep context pass prfx =", "IP address to avoid man-in-the-middle attacks. vars: - name: ansible_grpc_channel_options", "(TLS). - The plugin binds to the gNMI gRPC service.", "name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: - Configures", "do not match what is offered by the gRPC server.", "parameters to match specification for gNMI SubscribeRequest if 'mode' in", "of gRPC related log messages to the persistent messages log", "'Failed to read cert/keys file %s: %s' % (filename, exc)", "TLS validates hostname or IP address to avoid man-in-the-middle attacks.", "True except ImportError: HAS_GRPC = False try: from google import", "- name: ansible_grpc_environment persistent_connect_timeout: type: int description: - Configures, in", "self._timeout = self.get_option('persistent_command_timeout') certs = {} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain']", "Create cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: # Path", "the timeout exceed, an error is generated and the connection", "all, replace the objecttree with value result = self._decodeVal(_upd['val']) prfx", "self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if options: if", "case the default TLS ciphers do not match what is", "else: prfx_elements = [] for elem in prfx_elements: eleName =", "= [self._encodeXpath(entry) for entry in input['delete']] if 'update' in input:", "filename = self.get_option(optionName) if filename: if filename.startswith('~'): filename = os.path.expanduser(filename)", "path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if filename: if", "- section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars:", "--- author: - \"<NAME> (@HansThienpondt)\" - \"<NAME> (@wisotzky)\" connection: gnmi", "ini: - section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT", "except KeyError: # no such setting in current environment, but", "name: ansible_grpc_environment persistent_connect_timeout: type: int description: - Configures, in seconds,", "CONFIG, STATE prefix (str): Path prefix that is added to", "update pass elif 'update' in entry: result = {} update", "self._dictToList(result) def _simplifyUpdates(self, rawData): for msg in rawData: entry =", "{} cPath = cPath[eleName] cPath[eleKey] = elem['key'] cPath = cPath[eleKey]", "not specified, the connection will use the username of the", "`register_service()` with the name of the sub-plugin to be registered.", "= dict(filter(lambda x: x[1], kwargs.items())) # Backup options are not", "no such setting in current environment, but thats ok pass", "as e: raise AnsibleConnectionFailure(\"%s\" % e) output = json_format.MessageToDict(response) output['timestamp']", "type: boolean description: - This flag will enable logging the", "is generated and the connection is closed. default: 300 ini:", "if not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment must be a dict\")", "(self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding", "that is requested: ALL, CONFIG, STATE prefix (str): Path prefix", "path: return \"\" for elem in path['elem']: tmp = elem['name']", "default: 5 ini: - section: persistent_connection key: connect_timeout env: -", "rawData: if 'syncResponse' in entry and entry['syncResponse']: # Ignore: SyncResponse", "provided, will run CapabilityRequest for auto-detection ini: - section: grpc_connection", "or read excpetions \"\"\" path = self.get_option('certificate_path') if not path:", "{} def _decodeXpath(self, path): \"\"\" Decodes XPATH from dict representation", "entry: # Ignore: entry without updates break elif 'timestamp' not", "log file. For this option to work the 'log_path' ansible", "required to be set to a file path with write", "in _upd['path']): path_elements = _upd['path']['elem'] cPath = prfx elif prfx_elements:", "before the connection to the remote device is completed, the", "device capabilities. This gNMI plugin has implemented suppport for JSON_IETF", "private_key_file: description: - The PEM encoded private key file used", "Encoding that is used for data serialization is automatically determined", "gRPC related log messages to the persistent messages log (see", "elem in prfx_elements: eleName = elem['name'] if 'key' in elem:", "(preferred) and JSON (fallback). Parameters: prefix (str): Path prefix that", "to be encoded as gnmi_pb.TypedValue object Returns: (dict): dict using", "3 Clause license # SPDX-License-Identifier: BSD-3-Clause # from __future__ import", "will fail. default: 5 ini: - section: persistent_connection key: connect_timeout", "server. vars: - name: ansible_grpc_environment persistent_connect_timeout: type: int description: -", "be created. To get visibility about gNMI capabilities of the", "for log forwarding of gRPC related log messages to the", "Subscription timed out\") else: # RPC timed out, which is", "(k, v) result.append(tmp) return '/'.join(result) def _encodeVal(self, data): \"\"\" Encodes", "super(Connection, self).close() # ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): \"\"\" Encodes XPATH", "(ANY): extracted data \"\"\" if 'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal']))", "Path Element exists => Change Context prfx = prfx[eleName] if", "prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: # Path Element", "input['mode'] == 'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE Subscription timed out\") else:", "with write access. - Be sure to fully understand the", "required to use gRPC connection type. \" + \"Please run", "- name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port remote_user: description: -", "= input['duration'] del input['duration'] else: duration = 20 request =", "in update['update']} yield result else: # Ignore: Invalid message format", "section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: -", "= gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v', 'Run", "Path prefix that is added to all paths (XPATH syntax)", "with the name of the sub-plugin to be registered. After", "a file path with write access. - Be sure to", "= prfx[eleName] prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: #", "only the C(host) part will be used. ini: - section:", "certificate chain file used to create a SSL-enabled channel. If", "and the connection is closed. default: 300 ini: - section:", "grpcEnv = self.get_option('grpc_environment') or {} if not isinstance(grpcEnv, dict): raise", "into JSON format \"\"\" request = gnmi_pb2.CapabilityRequest() auth = self._login_credentials", "cPath: # Path Element exists => Change Context cPath =", "received converted into JSON format \"\"\" # Remove all input", "either JSON or JSON_IETF - If not provided, will run", "'syncResponse' in entry: # Ignore: SyncResponse is sent after initial", "in input: for item in input['subscription']: item['path'] = self._encodeXpath(item['path']) #", "duration, metadata=auth) if input['mode'] == 'ONCE': responses = [json_format.MessageToDict(response) for", "self._login_credentials = [ ('username', self.get_option('remote_user')), ('password', self.get_option('password')) ] host =", "on the remote device capabilities. This gNMI plugin has implemented", "kwargs.items())) # Adjust input parameters to match specification for gNMI", "= gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion", "created. To get visibility about gNMI capabilities of the remote", "key: gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding", "'JSON' else: raise AnsibleConnectionFailure(\"No compatible supported encoding found (JSON or", "path in input['path']] if 'type' in input: input['type'] = input['type'].upper()", "= self._stub.Set(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" %", "update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update: result['values'] =", "output = json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output:", "default TLS ciphers do not match what is offered by", "if os.path.isfile(filename): try: with open(filename, 'rb') as f: return f.read()", "provided by that sub-plugin. There is a wrapper available that", "except ImportError: HAS_PROTOBUF = False from ansible.errors import AnsibleConnectionFailure, AnsibleError", "in seconds, the amount of time to wait when trying", "if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'path' in", "first establishing the grpc connection. ini: - section: grpc_connection key:", "attacks. vars: - name: ansible_grpc_channel_options grpc_environment: description: - Key/Value pairs", "the active gRPC connection to the target host Parameters: None", "entry['prefix']['elem'] else: prfx_elements = [] for elem in prfx_elements: eleName", "to the remote device is completed, the connection will fail.", "such setting in current environment, but thats ok pass self._login_credentials", "dict using gnmi_pb2.Path structure for easy conversion \"\"\" mypath =", "establishing the gRPC connection. vars: - name: ansible_password - name:", "- \"<NAME> (@wisotzky)\" connection: gnmi short_description: Provides a persistent gRPC", "input['mode'] == 'ONCE': responses = [json_format.MessageToDict(response) for response in responses]", "(preferred) and JSON (fallback). Parameters: type (str): Type of data", "gNMI communication - Must be either JSON or JSON_IETF -", "= self.get_option(optionName) if filename: if filename.startswith('~'): filename = os.path.expanduser(filename) if", "the name of the sub-plugin to be registered. After loading", "if 'type' in input: input['type'] = input['type'].upper() input['encoding'] = self._encoding_value", "default timeout value (in seconds) when awaiting a response after", "all input parameters from kwargs that are not set input", "converted from gnmi_pb.TypedValue object Parameters: val (dict): decoded gnmi_pb.TypedValue object", "- name: ansible_persistent_log_messages \"\"\" import os import re import json", "prfx[eleName] = {} prfx = prfx[eleName] for _upd in entry['update']:", "@ensure_connect def gnmiCapabilities(self): \"\"\" Executes a gNMI Capabilities request Parameters:", "the TLS validates hostname or IP address to avoid man-in-the-middle", "os.path.expanduser(filename) if not filename.startswith('/'): for entry in path.split(':'): if os.path.isfile(os.path.join(entry,", "will be persisted. Parameters: None Returns: None \"\"\" if self.connected:", "description: - The PEM encoded private key file used to", "self._encoding not in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested", "= input['mode'].upper() input['encoding'] = self._encoding_value if 'prefix' in input: input['prefix']", "- name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file certificate_chain_file: description: -", "return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args, **kwargs): \"\"\" Executes", "not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if filename:", "and JSON (fallback). Parameters: type (str): Type of data that", "Adjust input parameters to match specification for gNMI SubscribeRequest if", "json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self, *args, **kwargs): \"\"\" Executes a", "auth = self._login_credentials try: response = self._stub.Set(request, metadata=auth) except grpc.RpcError", "try: from google import protobuf HAS_PROTOBUF = True except ImportError:", "for user %s to %s\" % (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating", "grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) return json_format.MessageToJson(response) @ensure_connect", "Ansible one (or more) sub-plugin(s) for the required gRPC service(s)", "= self.get_option('grpc_environment') or {} if not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment", "%s: %s' % (filename, exc) ) else: raise AnsibleConnectionFailure( 'Cert/keys", "(ANY): data to be encoded as gnmi_pb.TypedValue object Returns: (dict):", "done. After this, the gNMI stub will be created. To", "ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages \"\"\" import os import re", "command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_log_messages:", "before the timeout exceed, an error is generated and the", "if filename: if filename.startswith('~'): filename = os.path.expanduser(filename) if not filename.startswith('/'):", "prefix that is added to all paths (XPATH syntax) paths", "rawData): for msg in rawData: entry = json_format.MessageToDict(msg) if 'syncResponse'", "is completed, the connection will fail. default: 5 ini: -", "in cPath: # List entry does not exist => Create", "connect to the device is different from the subject name", "\"\"\" value = base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF': return {'jsonIetfVal':", "in response.supported_encodings: self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding", "None only the C(host) part will be used. ini: -", "aDict.keys(): if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else", "timed out, which is okay pass else: raise AnsibleConnectionFailure(\"%s\" %", "implications of enabling this option as it could create a", "file. For this option to work the 'log_path' ansible configuration", "sub-plugins use the method `register_service()` with the name of the", "your proxy settings (if needed). - Set C(GRPC_SSL_CIPHER_SUITES) in case", "env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages \"\"\" import", "stub. This method will establish the persistent gRPC connection, if", "cPath[eleKey] else: # Path Element does not exist => Create", "self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version self._yangModels = response.supported_models if", "\"\"\" Reads a binary certificate/key file Parameters: optionName(str): used to", "key file used to authenticate to the remote device when", "support encoding for value: %s\" % json.dumps(val)) def _dictToList(self, aDict):", "plugin binds to the gNMI gRPC service. It provide wrappers", "in update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']}", "options. ini: - section: defaults key: remote_user env: - name:", "eleKey not in prfx: # List entry does not exist", "if eleName in prfx: # Path Element exists => Change", "gNMI service. - OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API", "For this option to work the 'log_path' ansible configuration option", "in Ansible one (or more) sub-plugin(s) for the required gRPC", "set input = dict(filter(lambda x: x[1], kwargs.items())) # Adjust input", "connection: gnmi short_description: Provides a persistent gRPC connection for gNMI", "- Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need", "self).__init__( play_context, new_stdin, *args, **kwargs ) self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\"))", "paths (list): List of paths (str) to be captured Returns:", "for item in input['subscription']: item['path'] = self._encodeXpath(item['path']) # Extract duration", "implemented suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: type", "eligible for aggregation Returns: str: Updates received converted into JSON", "eleKey = json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if eleName not in", "=> must be list() if eleName in prfx: # Path", "cPath = cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val'])", "__future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION =", "item['path'] = self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args,", "**kwargs): super(Connection, self).__init__( play_context, new_stdin, *args, **kwargs ) self._task_uuid =", "channel to remote devices using gRPC including the underlying transport", "def _encodeXpath(self, xpath='/'): \"\"\" Encodes XPATH to dict representation that", "request Encoding that is used for data serialization is automatically", "prfx_elements: path_elements = prfx_elements cPath = result else: # No", "the default timeout value (in seconds) when awaiting a response", "eleName = elem['name'] if 'key' in elem: eleKey = json.dumps(elem['key'])", "% json.dumps(val)) def _dictToList(self, aDict): for key in aDict.keys(): if", "gnmiCapabilities(self): \"\"\" Executes a gNMI Capabilities request Parameters: None Returns:", "subjectAltName (only in the case secure connections are used). The", "# Get response, keep context pass prfx = result if", "are not passed to the client process that establishes the", "the remote device when first establishing the gRPC connection. vars:", "description: - The PEM encoded root certificate file used to", "the username of the logged in user. - Can be", "The option must be provided in cases, when the FQDN", "if 'key' in elem: for k, v in elem['key'].items(): tmp", "result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp' in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat()", "defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name:", "'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'delete' in input:", "modules can call methods provided by that sub-plugin. There is", "gnmiGet(self, *args, **kwargs): \"\"\" Executes a gNMI Get request Encoding", "the persistent gRPC connection, if not already done. After this,", "to %s\" % (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI stub') self._stub", "= self._login_credentials try: response = self._stub.Get(request, metadata=auth) except grpc.RpcError as", "sub-plugin. \"\"\" transport = \"nokia.grpc.gnmi\" has_pipelining = True def __init__(self,", "to target host\") self._channel.close() super(Connection, self).close() # ----------------------------------------------------------------------- def _encodeXpath(self,", "Parameters: val (dict): decoded gnmi_pb.TypedValue object Returns: (ANY): extracted data", "The username used to authenticate to the remote device when", "int description: - Configures the default timeout value (in seconds)", "return f.read() except Exception as exc: raise AnsibleConnectionFailure( 'Failed to", "= self.get_option('certificate_path') if not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename =", "this value expires before the connection to the remote device", "self.queue_message('v', 'Creating gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding')", "del aDict[key] else: if isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key]) return", "input: duration = input['duration'] del input['duration'] else: duration = 20", "syntax Returns: (dict): path dict using gnmi_pb2.Path structure for easy", "section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: -", "and entry['syncResponse']: # Ignore: SyncResponse is sent after initial update", "elif 'update' not in entry: # Ignore: entry without updates", "entry in input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request", "protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF = False from", "- OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto -", "(str): Path prefix that is added to all paths (XPATH", "in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update: result['values']", "protobuf'\" ) if not HAS_GRPC: raise AnsibleError( \"grpcio is required", "security implications of enabling this option as it could create", "try: import grpc HAS_GRPC = True except ImportError: HAS_GRPC =", "If None only the C(host) part will be used. ini:", "description: - Target host FQDN or IP address to establish", "string using XPATH syntax \"\"\" result = [] if 'elem'", "to be used in gNMI SetRequest if 'backup' in input:", "input: del input['backup_options'] # Adjust input parameters to match specification", "*args, **kwargs): super(Connection, self).__init__( play_context, new_stdin, *args, **kwargs ) self._task_uuid", "key: certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path", "communication channel to remote devices using gRPC including the underlying", "request = gnmi_pb2.CapabilityRequest() auth = self._login_credentials try: response = self._stub.Capabilities(request,", "gnmi_pb.Path object Parameters: xpath (str): path string using XPATH syntax", "This is needed, because the TLS validates hostname or IP", "remote device that listens for connections when establishing the gRPC", "This connection plugin provides a persistent communication channel to remote", "eleName = '___'+leaf_elem['name'] if eleName not in cPath: cPath[eleName] =", "\"\"\" Executes a gNMI Subscribe request Encoding that is used", "ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text import to_text class Connection(NetworkConnectionBase): \"\"\"", "for auto-detection ini: - section: grpc_connection key: gnmi_encoding env: -", "or IP address to avoid man-in-the-middle attacks. vars: - name:", "= self.get_option('grpc_channel_options') if options: if not isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options", "= {} update = entry['update'] if 'prefix' in update: result['prefix']", "prefix that is added to all paths (XPATH syntax) update", "tmp += \"[%s=%s]\" % (k, v) result.append(tmp) return '/'.join(result) def", "if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC", "JSON (fallback). Parameters: prefix (str): Path prefix that is added", "use the method `register_service()` with the name of the sub-plugin", "representation that allows conversion to gnmi_pb.Path object Parameters: xpath (str):", "JSON_IETF (preferred) and JSON (fallback). Parameters: prefix (str): Path prefix", "self._login_credentials try: response = self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as e:", "exist => Create prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else:", "return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible gNMI plugin does not support", "(str) to be captured Returns: str: GetResponse message converted into", "e) dKeys = dict(x.split('=', 1) for x in eKeys) if", "= self._encoding_value if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if", "\"nokia.grpc.gnmi\" has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs):", "def readFile(self, optionName): \"\"\" Reads a binary certificate/key file Parameters:", "return {'jsonIetfVal': value} else: return {'jsonVal': value} def _decodeVal(self, val):", "sent after initial update break elif 'update' not in entry:", "division, print_function) __metaclass__ = type DOCUMENTATION = \"\"\" --- author:", "is closed. default: 300 ini: - section: persistent_connection key: command_timeout", "elem in path_elements[:-1]: eleName = elem['name'] if 'key' in elem:", "settings are not passed to the client process that establishes", "if 'elem' not in path: return \"\" for elem in", "input = dict(filter(lambda x: x[1], kwargs.items())) # Adjust input parameters", "elem['name'] if 'key' in elem: eleKey = json.dumps(elem['key']) eleName =", "return '/'.join(result) def _encodeVal(self, data): \"\"\" Encodes value to dict", "that is used to connect to the device is different", "qos (int): DSCP marking that is used updates_only (bool): Send", "== grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE Subscription", "ansible_password - name: ansible_ssh_pass private_key_file: description: - The PEM encoded", "must be a dict\") for key in grpcEnv: if grpcEnv[key]:", "- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int", "None \"\"\" if self.connected: self.queue_message('v', 'gRPC connection to host %s", "used. ini: - section: defaults key: remote_port env: - name:", "create a SSL-enabled channel, if the value is None it", "%s to %s\" % (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI stub')", "to use gRPC connection type. \" + \"Please run 'pip", "input['mode'].upper() input['encoding'] = self._encoding_value if 'prefix' in input: input['prefix'] =", "elem['key'] prfx = prfx[eleKey] else: # Path Element hasn't key", "entry = entry['update'] else: # Get response, keep context pass", "section: grpc_connection key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: -", ") self._connected = False def readFile(self, optionName): \"\"\" Reads a", "u in update['update']} yield result else: # Ignore: Invalid message", "- This gRPC plugin provides methods to interact with the", "does not exist => Create prfx[eleName] = {} prfx =", "(str) to be deleted Returns: str: SetResponse message converted into", "None it reads the root certificates from a default location", "from gnmi_pb.TypedValue object Parameters: val (dict): decoded gnmi_pb.TypedValue object Returns:", "responses] output = self._mergeToSingleDict(responses) else: for update in self._simplifyUpdates(responses): output.append(update)", "(list): Path/Value pairs to be updated replace (list): Path/Value pairs", "env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file certificate_path: description:", "isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options must be a dict\") options =", "dict() if eleName in prfx: # Path Element exists =>", "=> Change Context cPath = cPath[eleName] else: # Path Element", "key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else val for", "True self.queue_message('v', 'gRPC/gNMI connection has established successfully') def close(self): \"\"\"", "to be created/updated leaf_elem = path_elements[-1] if 'key' in leaf_elem:", "env: - name: ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding grpc_channel_options: description:", "\" + \"Please run 'pip install grpcio'\" ) self._connected =", "ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file certificate_chain_file: description: - The PEM", "be replaced delete (list): Paths (str) to be deleted Returns:", "address to establish gRPC connection. default: inventory_hostname vars: - name:", "input['mode'] = input['mode'].upper() input['encoding'] = self._encoding_value if 'prefix' in input:", "else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self, rawData): for", "'gRPC connection to host %s already exist' % self._target) return", "response = self._stub.Get(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\"", "in aDict[key].values()] del aDict[key] else: if isinstance(aDict[key], dict): aDict[key] =", "'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription' in input:", "xpath = xpath.strip('\\t\\n\\r /') if xpath: path_elements = re.split('''/(?=(?:[^\\[\\]]|\\[[^\\[\\]]+\\])*$)''', xpath)", "override the TLS subject or subjectAltName (only in the case", "**kwargs): \"\"\" Executes a gNMI Set request Encoding that is", "Parameters: type (str): Type of data that is requested: ALL,", "capabilities converted into JSON format \"\"\" request = gnmi_pb2.CapabilityRequest() auth", "fail. default: 5 ini: - section: persistent_connection key: connect_timeout env:", "to match specification for gNMI SetRequest if 'prefix' in input:", "self._login_credentials try: response = self._stub.Get(request, metadata=auth) except grpc.RpcError as e:", "from google import protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF", "exists => Change Context prfx = prfx[eleName] else: # Path", "has key => must be list() if eleName in prfx:", "Specifies the port on the remote device that listens for", "in the ansible log file. For this option to work", "a persistent communication channel to remote devices using gRPC including", "entry in path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry, filename)", "use gRPC connection type. \" + \"Please run 'pip install", "- Provide the I(ssl_target_name_override) option to override the TLS subject", "=> Create prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: #", "in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'path' in input: input['path']", "prfx = prfx[eleName] if eleKey not in prfx: # List", "x: x[1], kwargs.items())) # Backup options are not to be", "encoding '%s' requested (JSON or JSON_IETF)\" % self._encoding) self._encoding_value =", "input: input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription' in input: for item", "=> Change Context prfx = prfx[eleName] if eleKey not in", "response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version", "persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name:", "to stop receiving qos (int): DSCP marking that is used", "close(self): \"\"\" Closes the active gRPC connection to the target", "for x in eKeys) if dKeys: entry['key'] = dKeys mypath.append(entry)", "kwargs that are not set input = dict(filter(lambda x: x[1],", "an error is generated and the connection is closed. default:", "active gRPC connection to the target host Parameters: None Returns:", "the gRPC connection is first established. If the remote_user is", "exist => Create cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else:", "e.split(\"[\", 1)[0]} eKeys = re.findall('\\[(.*?)\\]', e) dKeys = dict(x.split('=', 1)", "input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request = json_format.ParseDict(input,", "not self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response =", "name that is provided in the host certificate. This is", "raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON or JSON_IETF)\" % self._encoding)", "root certificate file used to create a SSL-enabled channel, if", "None Returns: str: gNMI capabilities converted into JSON format \"\"\"", "elements marked as eligible for aggregation Returns: str: Updates received", "pass # ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): \"\"\" Executes a gNMI", "True except ImportError: HAS_PROTOBUF = False from ansible.errors import AnsibleConnectionFailure,", "JSON or JSON_IETF - If not provided, will run CapabilityRequest", "import base64 import datetime try: import grpc HAS_GRPC = True", "setting in current environment, but thats ok pass self._login_credentials =", "(dict): decoded gnmi_pb2.Path object Returns: (str): path string using XPATH", "- The username used to authenticate to the remote device", "cPath = cPath[eleName] # The last entry of path_elements is", "= self._mergeToSingleDict(responses) else: for update in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError", "ini: - section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER", "in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as e: if e.code() ==", "Subscribe) requirements: - grpcio - protobuf options: host: description: -", "target host Parameters: None Returns: None \"\"\" if self._connected: self.queue_message('v',", "API service description: - This gRPC plugin provides methods to", "for gNMI requests (Capabilities, Get, Set, Subscribe) requirements: - grpcio", "- protobuf options: host: description: - Target host FQDN or", "service. It provide wrappers for gNMI requests (Capabilities, Get, Set,", "in elem: for k, v in elem['key'].items(): tmp += \"[%s=%s]\"", "is used for data serialization is automatically determined based on", "Folder to search for certificate and key files ini: -", "PEM encoded certificate chain file used to create a SSL-enabled", "ansible_user password: description: - Configures the user password used to", "False def readFile(self, optionName): \"\"\" Reads a binary certificate/key file", "requested path without content (no value) => skip continue elif", "value) => skip continue elif ('path' in _upd) and ('elem'", "\"\"\" Executes a gNMI Get request Encoding that is used", "= {} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] =", "(str): path string using XPATH syntax \"\"\" result = []", "and C(GRPC_TRACE) to setup gRPC logging. Need to add code", "for u in update['update']} yield result else: # Ignore: Invalid", "captured Returns: str: GetResponse message converted into JSON format \"\"\"", "if isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self,", "submode) duration (int): timeout, to stop receiving qos (int): DSCP", "FQDN or IP address to establish gRPC connection. default: inventory_hostname", "e in path_elements: entry = {'name': e.split(\"[\", 1)[0]} eKeys =", "using XPATH syntax Returns: (dict): path dict using gnmi_pb2.Path structure", "settings specific to gRPC - The standard mechanism to provide/set", "cPath[eleName] cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: # Path", "dict): aDict[key] = self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self, rawData): result", "in input: input['mode'] = input['mode'].upper() input['encoding'] = self._encoding_value if 'prefix'", "related log messages to the persistent messages log (see below).", "host Parameters: None Returns: None \"\"\" if self._connected: self.queue_message('v', \"Closing", "mypath} return {} def _decodeXpath(self, path): \"\"\" Decodes XPATH from", "%s already exist' % self._target) return grpcEnv = self.get_option('grpc_environment') or", "does not exist' % filename ) return None def _connect(self):", "option must be provided in cases, when the FQDN or", "connection type. \" + \"Please run 'pip install grpcio'\" )", "raise AnsibleConnectionFailure(\"gNMI ONCE Subscription timed out\") else: # RPC timed", "del os.environ[key] except KeyError: # no such setting in current", "**kwargs): \"\"\" Executes a gNMI Get request Encoding that is", "ansible_private_key_file root_certificates_file: description: - The PEM encoded root certificate file", "a specific method of that sub-plugin. \"\"\" transport = \"nokia.grpc.gnmi\"", "if self._encoding == 'JSON_IETF': return {'jsonIetfVal': value} else: return {'jsonVal':", "runtime. ini: - section: grpc_connection key: root_certificates_file env: - name:", "description: - Configures the user password used to authenticate to", "insecure gRPC connection') self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC connection", "gRPC connection') self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC connection established", "update['update']} yield result else: # Ignore: Invalid message format pass", "- Key/Value pairs (dict) to define environment settings specific to", "= prfx[eleName] else: # Path Element does not exist =>", "converted into JSON format \"\"\" # Remove all input parameters", "initial update pass elif 'update' in entry: result = {}", "prfx[eleName] else: # Path Element does not exist => Create", "in input: for entry in input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val']", "replace (list): Path/Value pairs to be replaced delete (list): Paths", "= '/'+self._decodeXpath(update['prefix']) if 'timestamp' in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if", "name: ansible_gnmi_encoding grpc_channel_options: description: - Key/Value pairs (dict) to define", ") if not HAS_GRPC: raise AnsibleError( \"grpcio is required to", "the connection will use the username of the logged in", "to provide/set the environment in Ansible cannot be used, because", "AnsibleError( \"protobuf is required to use gRPC connection type. \"", "information in log file. default: False ini: - section: persistent_connection", "is offered by the gRPC server. vars: - name: ansible_grpc_environment", "There is a wrapper available that consumes the attribute name", "AnsibleConnectionFailure(\"%s\" % e) return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args, **kwargs):", "in entry: # Ignore: entry without updates break elif 'timestamp'", "entry in rawData: if 'syncResponse' in entry and entry['syncResponse']: #", "str(grpcEnv[key]) else: try: del os.environ[key] except KeyError: # no such", "=> Change Context cPath = cPath[eleName] if eleKey not in", "indent=4).encode() @ensure_connect def gnmiSet(self, *args, **kwargs): \"\"\" Executes a gNMI", "'___'+leaf_elem['name'] if eleName not in cPath: cPath[eleName] = {} cPath", "e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self,", "user. - Can be configured from the CLI via the", "ini: - section: grpc_connection key: certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE", "This flag will enable logging the command executed and response", "else: # RPC timed out, which is okay pass else:", "write access. - Be sure to fully understand the security", "output: output['prefix'] = self._decodeXpath(output['prefix']) for item in output['response']: item['path'] =", "is used updates_only (bool): Send only updates to initial state", "remote device when the gRPC connection is first established. If", "if isinstance(val, dict) else val for val in aDict[key].values()] del", "path = self.get_option('certificate_path') if not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename", "= re.findall('\\[(.*?)\\]', e) dKeys = dict(x.split('=', 1) for x in", "entry = {'name': e.split(\"[\", 1)[0]} eKeys = re.findall('\\[(.*?)\\]', e) dKeys", "'Cert/keys file %s does not exist' % filename ) return", "- name: ansible_user password: description: - Configures the user password", "'path' in input: input['path'] = [self._encodeXpath(path) for path in input['path']]", "raise AnsibleConnectionFailure(\"grpc_environment must be a dict\") for key in grpcEnv:", "result = self._decodeVal(_upd['val']) prfx = result continue # If path_elements", "else: # Ignore: Invalid message format pass # ----------------------------------------------------------------------- @ensure_connect", "thats ok pass self._login_credentials = [ ('username', self.get_option('remote_user')), ('password', self.get_option('password'))", "name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description:", "To use gRPC connections in Ansible one (or more) sub-plugin(s)", "persistent communication channel to remote devices using gRPC including the", "a gNMI Subscribe request Encoding that is used for data", "first establishing the gRPC connection. vars: - name: ansible_password -", "ansible_root_certificates_file certificate_chain_file: description: - The PEM encoded certificate chain file", "Ansible cannot be used, because those environment settings are not", "grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v', 'Starting insecure", "Mode of subscription (STREAM, ONCE) subscription (list of dict): Subscription", "file. default: False ini: - section: persistent_connection key: log_messages env:", "pairs to be updated replace (list): Path/Value pairs to be", "# Path Element does not exist => Create prfx[eleName] =", "KeyError: # no such setting in current environment, but thats", "certs['private_key'] = self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if options: if not", "= {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']} yield result else:", "cPath[eleKey] else: # Path Element hasn't key => must be", "gNMI Set request Encoding that is used for data serialization", "except grpc.RpcError as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode']", "service. - OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto", "prfx = result if ('prefix' in entry) and ('elem' in", "to fully understand the security implications of enabling this option", "encoded certificate chain file used to create a SSL-enabled channel.", "needed). - Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers", "Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do not", "filename)): filename = os.path.join(entry, filename) break if os.path.isfile(filename): try: with", "forwarding of gRPC related log messages to the persistent messages", "tmp = elem['name'] if 'key' in elem: for k, v", "ansible.module_utils._text import to_text class Connection(NetworkConnectionBase): \"\"\" Connection plugin for gRPC", "prfx[eleName] for _upd in entry['update']: if 'val' not in _upd:", "standard mechanism to provide/set the environment in Ansible cannot be", "import to_text class Connection(NetworkConnectionBase): \"\"\" Connection plugin for gRPC To", "self.queue_message('v', \"Closing gRPC connection to target host\") self._channel.close() super(Connection, self).close()", "be list() if eleName in prfx: # Path Element exists", "as it could create a security vulnerability by logging sensitive", "BSD 3 Clause license # SPDX-License-Identifier: BSD-3-Clause # from __future__", "gRPC server. vars: - name: ansible_grpc_environment persistent_connect_timeout: type: int description:", "all paths (XPATH syntax) paths (list): List of paths (str)", "from gnmi_pb.Path object Parameters: path (dict): decoded gnmi_pb2.Path object Returns:", "= gnmi_pb2.CapabilityRequest() auth = self._login_credentials try: response = self._stub.Capabilities(request, metadata=auth)", "state allow_aggregation (bool): Aggregate elements marked as eligible for aggregation", "- \"<NAME> (@HansThienpondt)\" - \"<NAME> (@wisotzky)\" connection: gnmi short_description: Provides", "indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args, **kwargs): \"\"\" Executes a gNMI", "that is added to all paths (XPATH syntax) paths (list):", "- Specifies the port on the remote device that listens", "used, because those environment settings are not passed to the", "prfx = prfx[eleName] else: # Path Element does not exist", "used to authenticate to the remote device when the gRPC", "path): \"\"\" Decodes XPATH from dict representation converted from gnmi_pb.Path", "= \"nokia.grpc.gnmi\" has_pipelining = True def __init__(self, play_context, new_stdin, *args,", "establishing the grpc connection. ini: - section: grpc_connection key: private_key_file", "(if needed). - Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS", "\"\"\" if self.connected: self.queue_message('v', 'gRPC connection to host %s already", "string using XPATH syntax Returns: (dict): path dict using gnmi_pb2.Path", "that allows conversion to gnmi_pb.TypedValue object Parameters: data (ANY): data", "gnmiSet(self, *args, **kwargs): \"\"\" Executes a gNMI Set request Encoding", "entry in input['delete']] if 'update' in input: for entry in", "key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file", "class Connection(NetworkConnectionBase): \"\"\" Connection plugin for gRPC To use gRPC", "# If path_elements has more than just a single entry,", "env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port remote_user: description:", "connection will fail. default: 5 ini: - section: persistent_connection key:", "def _decodeXpath(self, path): \"\"\" Decodes XPATH from dict representation converted", "vars: - name: ansible_persistent_log_messages \"\"\" import os import re import", "description: - Key/Value pairs (dict) to define gRPC channel options", "'prefix' in output: output['prefix'] = self._decodeXpath(output['prefix']) for item in output['response']:", "e: raise AnsibleConnectionFailure(\"%s\" % e) return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self,", "= self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if options:", "self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options = self.get_option('grpc_channel_options')", "channel, if the value is None it reads the root", "as e: raise AnsibleConnectionFailure(\"%s\" % e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return", "decoded gnmi_pb2.Path object Returns: (str): path string using XPATH syntax", "def gnmiGet(self, *args, **kwargs): \"\"\" Executes a gNMI Get request", "Subscribe request Encoding that is used for data serialization is", "[] for elem in prfx_elements: eleName = elem['name'] if 'key'", "and key files ini: - section: grpc_connection key: certificate_path env:", "has key => must be list() if eleName in cPath:", "hostname or IP address to avoid man-in-the-middle attacks. vars: -", "be used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override)", "transport = \"nokia.grpc.gnmi\" has_pipelining = True def __init__(self, play_context, new_stdin,", "initial update break elif 'update' not in entry: # Ignore:", "establish a persistent connection. If this value expires before the", "env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_log_messages: type:", "for connections when establishing the gRPC connection. If None only", "in current environment, but thats ok pass self._login_credentials = [", "Get request Encoding that is used for data serialization is", "self._encoding_value if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription'", "{} prfx = prfx[eleName] prfx[eleKey] = elem['key'] prfx = prfx[eleKey]", "# (c) 2020 Nokia # # Licensed under the BSD", "name: ansible_persistent_log_messages \"\"\" import os import re import json import", "_mergeToSingleDict(self, rawData): result = {} for entry in rawData: if", "if 'update' in update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u", "(host, port) self._timeout = self.get_option('persistent_command_timeout') certs = {} certs['root_certificates'] =", "= _upd['path']['elem'] cPath = prfx elif prfx_elements: path_elements = prfx_elements", "if 'path' in input: input['path'] = [self._encodeXpath(path) for path in", "= [] xpath = xpath.strip('\\t\\n\\r /') if xpath: path_elements =", "the sub-plugin to be registered. After loading the sub-plugin, Ansible", "- The PEM encoded root certificate file used to create", "ciphers do not match what is offered by the gRPC", "Need to add code for log forwarding of gRPC related", "20 request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try:", "= elem['key'] cPath = cPath[eleKey] else: # Path Element hasn't", "all paths (XPATH syntax) mode (str): Mode of subscription (STREAM,", "optionName): \"\"\" Reads a binary certificate/key file Parameters: optionName(str): used", "result will be persisted. Parameters: None Returns: None \"\"\" if", "@ensure_connect def gnmiSet(self, *args, **kwargs): \"\"\" Executes a gNMI Set", "hasn't key => must be dict() if eleName in cPath:", "filename = os.path.expanduser(filename) if not filename.startswith('/'): for entry in path.split(':'):", "Element has key => must be list() if eleName in", "not exist => Create prfx[eleName] = {} prfx = prfx[eleName]", "=> skip continue elif ('path' in _upd) and ('elem' in", "a gNMI Get request Encoding that is used for data", "== 'ONCE': raise AnsibleConnectionFailure(\"gNMI ONCE Subscription timed out\") else: #", "plugin provides methods to interact with the gNMI service. -", "subject name that is provided in the host certificate. This", "prfx_elements: eleName = elem['name'] if 'key' in elem: eleKey =", "= False def readFile(self, optionName): \"\"\" Reads a binary certificate/key", "prfx_elements = [] for elem in prfx_elements: eleName = elem['name']", "value to dict representation that allows conversion to gnmi_pb.TypedValue object", "the method `register_service()` with the name of the sub-plugin to", "cPath[eleName] else: # Path Element does not exist => Create", "default: False ini: - section: persistent_connection key: log_messages env: -", "in entry: # Ignore: SyncResponse is sent after initial update", "syntax) update (list): Path/Value pairs to be updated replace (list):", "syntax) mode (str): Mode of subscription (STREAM, ONCE) subscription (list", "self.get_option('port') self._target = host if port is None else '%s:%d'", "if grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else: try: del os.environ[key] except", "connection, if not already done. After this, the gNMI stub", "after initial update break elif 'update' not in entry: #", "_encodeVal(self, data): \"\"\" Encodes value to dict representation that allows", "install grpcio'\" ) self._connected = False def readFile(self, optionName): \"\"\"", "gNMI Subscribe request Encoding that is used for data serialization", "=> must be dict() if eleName in prfx: # Path", "vars: - name: ansible_command_timeout persistent_log_messages: type: boolean description: - This", "= self._decodeVal(_upd['val']) prfx = result continue # If path_elements has", "self.get_option('remote_user')), ('password', self.get_option('password')) ] host = self.get_option('host') port = self.get_option('port')", "'update' in input: for entry in input['update']: entry['path'] = self._encodeXpath(entry['path'])", "raise AnsibleError( \"grpcio is required to use gRPC connection type.", "if os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry, filename) break if os.path.isfile(filename):", "entry does not exist => Create prfx[eleKey] = elem['key'] prfx", "different from the subject name that is provided in the", "'prefix' in update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp' in update:", "IP address to establish gRPC connection. default: inventory_hostname vars: -", "# no such setting in current environment, but thats ok", "gRPC at runtime. ini: - section: grpc_connection key: root_certificates_file env:", "if 'update' in input: for entry in input['update']: entry['path'] =", "description: - This gRPC plugin provides methods to interact with", "call a specific method of that sub-plugin. \"\"\" transport =", "specified subcontext for elem in path_elements[:-1]: eleName = elem['name'] if", "the connection to the remote device is completed, the connection", ") else: raise AnsibleConnectionFailure( 'Cert/keys file %s does not exist'", "the gRPC server. vars: - name: ansible_grpc_environment persistent_connect_timeout: type: int", "grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification'])", "run CapabilityRequest for auto-detection ini: - section: grpc_connection key: gnmi_encoding", "output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self, *args,", "\"\"\" if 'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in", "_decodeXpath(self, path): \"\"\" Decodes XPATH from dict representation converted from", "file does not exist or read excpetions \"\"\" path =", "a wrapper available that consumes the attribute name {sub-plugin name}__{method", "vars: - name: ansible_host port: type: int description: - Specifies", "SSL-enabled channel. If the value is None, no certificate chain", "if not HAS_GRPC: raise AnsibleError( \"grpcio is required to use", "File content Raises: AnsibleConnectionFailure: file does not exist or read", "Backup options are not to be used in gNMI SetRequest", "cannot be used, because those environment settings are not passed", "that is used for data serialization is automatically determined based", "certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC connection') creds = grpc.ssl_channel_credentials(**certs) self._channel", "option as it could create a security vulnerability by logging", "Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need to", "must be list() if eleName in cPath: # Path Element", "ANSIBLE_REMOTE_PORT vars: - name: ansible_port remote_user: description: - The username", "raise AnsibleConnectionFailure( 'Failed to read cert/keys file %s: %s' %", "eleName in prfx: # Path Element exists => Change Context", "'/'+self._decodeXpath(update['prefix']) if 'timestamp' in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update'", "which is okay pass else: raise AnsibleConnectionFailure(\"%s\" % e) return", "seconds) when awaiting a response after issuing a call to", "response.supported_encodings: self._encoding = 'JSON' else: raise AnsibleConnectionFailure(\"No compatible supported encoding", "cPath = cPath[eleKey] else: # Path Element hasn't key =>", "result = [] if 'elem' not in path: return \"\"", "= grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v', 'Starting insecure gRPC connection')", "connection') self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v', \"gRPC connection established for", "the attribute name {sub-plugin name}__{method name} to call a specific", "env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file root_certificates_file: description:", "the gRPC connection. vars: - name: ansible_password - name: ansible_ssh_pass", "prfx = prfx[eleKey] else: # Path Element hasn't key =>", "# we need to create/navigate to the specified subcontext for", "of data that is requested: ALL, CONFIG, STATE prefix (str):", "in aDict.keys(): if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict)", "= '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if filename: if filename.startswith('~'): filename", "required gRPC service(s) must be loaded. To load gRPC sub-plugins", "ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path gnmi_encoding: description: - Encoding used", "used in gNMI SetRequest if 'backup' in input: del input['backup']", "for easy conversion \"\"\" value = base64.b64encode(json.dumps(data).encode()) if self._encoding ==", "'replace' in input: for entry in input['replace']: entry['path'] = self._encodeXpath(entry['path'])", "def gnmiCapabilities(self): \"\"\" Executes a gNMI Capabilities request Parameters: None", "exist' % self._target) return grpcEnv = self.get_option('grpc_environment') or {} if", ") self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\")) if not HAS_PROTOBUF: raise AnsibleError(", "path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if filename: if filename.startswith('~'):", "a call to a RPC. If the RPC does not", "List entry does not exist => Create cPath[eleKey] = elem['key']", "the 'log_path' ansible configuration option is required to be set", "avoid man-in-the-middle attacks. vars: - name: ansible_grpc_channel_options grpc_environment: description: -", "= self.get_option('port') self._target = host if port is None else", "Returns: (dict): dict using gnmi_pb.TypedValue structure for easy conversion \"\"\"", "if self._connected: self.queue_message('v', \"Closing gRPC connection to target host\") self._channel.close()", "Subscribe response, enter update context entry = entry['update'] else: #", "= elem['name'] if 'key' in elem: eleKey = json.dumps(elem['key']) eleName", "= elem['key'] prfx = prfx[eleKey] else: # Path Element hasn't", "need to create/navigate to the specified subcontext for elem in", "not to be used in gNMI SetRequest if 'backup' in", "wait when trying to initially establish a persistent connection. If", "will enable logging the command executed and response received from", "from a default location chosen by gRPC at runtime. ini:", "is used. ini: - section: grpc_connection key: certificate_chain_file env: -", "via the C(--user) or C(-u) options. ini: - section: defaults", "the underlying transport (TLS). - The plugin binds to the", "in update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp' in update: result['timestamp']", "from target device in the ansible log file. For this", "(path, interval, submode) duration (int): timeout, to stop receiving qos", "AnsibleConnectionFailure(\"grpc_environment must be a dict\") for key in grpcEnv: if", "Create prfx[eleName] = {} prfx = prfx[eleName] prfx[eleKey] = elem['key']", "xpath='/'): \"\"\" Encodes XPATH to dict representation that allows conversion", "enable logging the command executed and response received from target", "file Parameters: optionName(str): used to read filename from options Returns:", "os.environ[key] = str(grpcEnv[key]) else: try: del os.environ[key] except KeyError: #", "filename: if filename.startswith('~'): filename = os.path.expanduser(filename) if not filename.startswith('/'): for", "subscription (STREAM, ONCE) subscription (list of dict): Subscription specification (path,", "the ansible log file. For this option to work the", "grpc_connection key: gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING vars: - name:", "ansible_gnmi_encoding grpc_channel_options: description: - Key/Value pairs (dict) to define gRPC", "name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port remote_user: description: - The", "file path with write access. - Be sure to fully", "not return before the timeout exceed, an error is generated", "to all paths (XPATH syntax) paths (list): List of paths", "password: description: - Configures the user password used to authenticate", "connection established for user %s to %s\" % (self.get_option('remote_user'), self._target))", "match specification for gNMI SetRequest if 'prefix' in input: input['prefix']", "aggregation Returns: str: Updates received converted into JSON format \"\"\"", "AnsibleConnectionFailure( 'Cert/keys file %s does not exist' % filename )", "delete (list): Paths (str) to be deleted Returns: str: SetResponse", "'Creating gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if", "'gRPC/gNMI connection has established successfully') def close(self): \"\"\" Closes the", "= gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v', 'gRPC/gNMI connection has established", "from options Returns: File content Raises: AnsibleConnectionFailure: file does not", "C(GRPC_TRACE) to setup gRPC logging. Need to add code for", "is not specified, the connection will use the username of", "self._task_uuid = to_text(kwargs.get(\"task_uuid\", \"\")) if not HAS_PROTOBUF: raise AnsibleError( \"protobuf", "as e: raise AnsibleConnectionFailure(\"%s\" % e) return json_format.MessageToJson(response) @ensure_connect def", "path_elements[-1] if 'key' in leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName =", "request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials try: response =", "specific to gRPC - The standard mechanism to provide/set the", "that consumes the attribute name {sub-plugin name}__{method name} to call", "easy conversion \"\"\" mypath = [] xpath = xpath.strip('\\t\\n\\r /')", "cPath = cPath[eleKey] else: # Path Element does not exist", "logged in user. - Can be configured from the CLI", "= response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding = 'JSON_IETF' elif", "connection is first established. If the remote_user is not specified,", "'rb') as f: return f.read() except Exception as exc: raise", "to initial state allow_aggregation (bool): Aggregate elements marked as eligible", "to a file path with write access. - Be sure", "json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output: output['prefix'] =", "if 'subscription' in input: for item in input['subscription']: item['path'] =", "name of the sub-plugin to be registered. After loading the", "Encoding used for gNMI communication - Must be either JSON", "name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file certificate_path: description: - Folder", "# Path Element hasn't key => must be dict() if", "description: - Folder to search for certificate and key files", "work the 'log_path' ansible configuration option is required to be", "update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp' in update: result['timestamp'] =", "in input: del input['backup'] if 'backup_options' in input: del input['backup_options']", "(c) 2020 Nokia # # Licensed under the BSD 3", "from dict representation converted from gnmi_pb.TypedValue object Parameters: val (dict):", "if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else val", "type DOCUMENTATION = \"\"\" --- author: - \"<NAME> (@HansThienpondt)\" -", "in the case secure connections are used). The option must", "connection is closed. default: 300 ini: - section: persistent_connection key:", "not set input = dict(filter(lambda x: x[1], kwargs.items())) # Adjust", "gnmi_pb.Path object Parameters: path (dict): decoded gnmi_pb2.Path object Returns: (str):", "ansible_host port: type: int description: - Specifies the port on", "connection. ini: - section: grpc_connection key: private_key_file env: - name:", "If the remote_user is not specified, the connection will use", "connection to target host\") self._channel.close() super(Connection, self).close() # ----------------------------------------------------------------------- def", "- Configures, in seconds, the amount of time to wait", "key => must be list() if eleName in cPath: #", "= \"\"\" --- author: - \"<NAME> (@HansThienpondt)\" - \"<NAME> (@wisotzky)\"", "= True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(", "if the value is None it reads the root certificates", "the TLS subject or subjectAltName (only in the case secure", "could create a security vulnerability by logging sensitive information in", "secure gRPC connection') creds = grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds,", "except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) output =", "has implemented suppport for JSON_IETF (preferred) and JSON (fallback). Parameters:", "is sent after initial update pass elif 'update' in entry:", "= grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v', 'Starting", "timed out\") else: # RPC timed out, which is okay", "environment, but thats ok pass self._login_credentials = [ ('username', self.get_option('remote_user')),", "a binary certificate/key file Parameters: optionName(str): used to read filename", "grpc_environment: description: - Key/Value pairs (dict) to define environment settings", "remote device, a gNM CapabilityRequest will be sent and result", "\"\"\" --- author: - \"<NAME> (@HansThienpondt)\" - \"<NAME> (@wisotzky)\" connection:", "gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override) option to override", "message converted into JSON format \"\"\" # Remove all input", "__metaclass__ = type DOCUMENTATION = \"\"\" --- author: - \"<NAME>", "item['path'] = self._encodeXpath(item['path']) # Extract duration from input attributes if", "options Returns: File content Raises: AnsibleConnectionFailure: file does not exist", "- name: ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path gnmi_encoding: description: -", "# from __future__ import (absolute_import, division, print_function) __metaclass__ = type", "must be list() if eleName in prfx: # Path Element", "read cert/keys file %s: %s' % (filename, exc) ) else:", "for the required gRPC service(s) must be loaded. To load", "sensitive information in log file. default: False ini: - section:", "- section: grpc_connection key: gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING vars:", "xpath) for e in path_elements: entry = {'name': e.split(\"[\", 1)[0]}", "{'jsonVal': value} def _decodeVal(self, val): \"\"\" Decodes value from dict", "read filename from options Returns: File content Raises: AnsibleConnectionFailure: file", "object Returns: (ANY): extracted data \"\"\" if 'jsonIetfVal' in val:", "cPath[eleName] = {} cPath = cPath[eleName] # The last entry", "in grpcEnv: if grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else: try: del", "conversion to gnmi_pb.TypedValue object Parameters: data (ANY): data to be", "re import json import base64 import datetime try: import grpc", "mypath = [] xpath = xpath.strip('\\t\\n\\r /') if xpath: path_elements", "\"grpcio is required to use gRPC connection type. \" +", "= {} cPath = cPath[eleName] cPath[eleKey] = elem['key'] cPath =", "device, a gNM CapabilityRequest will be sent and result will", "used to authenticate to the remote device when first establishing", "Ignore: Invalid message format pass # ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self):", "= elem['name'] if 'key' in elem: for k, v in", "AnsibleError( \"grpcio is required to use gRPC connection type. \"", "file used to authenticate to the remote device when first", "in input: duration = input['duration'] del input['duration'] else: duration =", "AnsibleConnectionFailure(\"gNMI ONCE Subscription timed out\") else: # RPC timed out,", "= True except ImportError: HAS_PROTOBUF = False from ansible.errors import", "when the FQDN or IPv4 address that is used to", "% e) output = json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix'", "list() if eleName in prfx: # Path Element exists =>", "=> Create cPath[eleName] = {} cPath = cPath[eleName] # The", "for entry in path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry,", "self._target) return grpcEnv = self.get_option('grpc_environment') or {} if not isinstance(grpcEnv,", "subcontext for elem in path_elements[:-1]: eleName = elem['name'] if 'key'", "json_format.MessageToDict(msg) if 'syncResponse' in entry: # Ignore: SyncResponse is sent", "ini: - section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT", "exist => Create prfx[eleName] = {} prfx = prfx[eleName] for", "eleKey not in cPath: # List entry does not exist", "gRPC connection. - Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC", "be used, because those environment settings are not passed to", "\"\"\" request = gnmi_pb2.CapabilityRequest() auth = self._login_credentials try: response =", "used. ini: - section: grpc_connection key: certificate_chain_file env: - name:", "= self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request =", "pass self._login_credentials = [ ('username', self.get_option('remote_user')), ('password', self.get_option('password')) ] host", "= self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode'] == 'ONCE': responses =", "Nokia # # Licensed under the BSD 3 Clause license", "the CLI via the C(--user) or C(-u) options. ini: -", "= [] if 'elem' not in path: return \"\" for", "in input: for entry in input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val']", "or IPv4 address that is used to connect to the", "return {'jsonVal': value} def _decodeVal(self, val): \"\"\" Decodes value from", "result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update: result['values'] = {self._decodeXpath(u['path']):", "client process that establishes the gRPC connection. - Set C(GRPC_VERBOSITY)", "dict representation converted from gnmi_pb.Path object Parameters: path (dict): decoded", "self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self, *args, **kwargs): \"\"\"", "this option as it could create a security vulnerability by", "in input['path']] if 'type' in input: input['type'] = input['type'].upper() input['encoding']", "Path Element has key => must be list() if eleName", "does not support encoding for value: %s\" % json.dumps(val)) def", "str: GetResponse message converted into JSON format \"\"\" # Remove", "gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try: output = [] responses =", "ansible_command_timeout persistent_log_messages: type: boolean description: - This flag will enable", "entry['syncResponse']: # Ignore: SyncResponse is sent after initial update break", "----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): \"\"\" Executes a gNMI Capabilities request", "Path Element exists => Change Context prfx = prfx[eleName] else:", "# requested path without content (no value) => skip continue", "will be created. To get visibility about gNMI capabilities of", "- Key/Value pairs (dict) to define gRPC channel options to", "is a wrapper available that consumes the attribute name {sub-plugin", "'/'.join(result) def _encodeVal(self, data): \"\"\" Encodes value to dict representation", "isinstance(val, dict) else val for val in aDict[key].values()] del aDict[key]", "Context prfx = prfx[eleName] if eleKey not in prfx: #", "env: - name: ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path gnmi_encoding: description:", "key in grpcEnv: if grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else: try:", "After this, the gNMI stub will be created. To get", "(dict): path dict using gnmi_pb2.Path structure for easy conversion \"\"\"", "else: # Get response, keep context pass prfx = result", "eleName in cPath: # Path Element exists => Change Context", "\"protobuf is required to use gRPC connection type. \" +", "(dict): decoded gnmi_pb.TypedValue object Returns: (ANY): extracted data \"\"\" if", "(only in the case secure connections are used). The option", "= prfx[eleName] if eleKey not in prfx: # List entry", "gRPC connection. default: inventory_hostname vars: - name: ansible_host port: type:", "encoded private key file used to authenticate to the remote", "val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible gNMI plugin does not", "grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e) output = json_format.MessageToDict(response)", "+ \"Please run 'pip install grpcio'\" ) self._connected = False", "converted from gnmi_pb.Path object Parameters: path (dict): decoded gnmi_pb2.Path object", "key: root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file", "list() if eleName in cPath: # Path Element exists =>", "port on the remote device that listens for connections when", "certificate_path: description: - Folder to search for certificate and key", "- The standard mechanism to provide/set the environment in Ansible", "from ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import NetworkConnectionBase from", "request = gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded')", "leaf_elem = path_elements[-1] if 'key' in leaf_elem: eleKey = json.dumps(leaf_elem['key'])", "dict() if eleName in cPath: # Path Element exists =>", "Change Context prfx = prfx[eleName] else: # Path Element does", "suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: prefix (str):", "def gnmiSubscribe(self, *args, **kwargs): \"\"\" Executes a gNMI Subscribe request", "encoding for value: %s\" % json.dumps(val)) def _dictToList(self, aDict): for", "*args, **kwargs): \"\"\" Executes a gNMI Get request Encoding that", "str: SetResponse message converted into JSON format \"\"\" # Remove", "SPDX-License-Identifier: BSD-3-Clause # from __future__ import (absolute_import, division, print_function) __metaclass__", "elif 'update' in entry: result = {} update = entry['update']", "encoded as gnmi_pb.TypedValue object Returns: (dict): dict using gnmi_pb.TypedValue structure", "auth = self._login_credentials try: response = self._stub.Get(request, metadata=auth) except grpc.RpcError", "Remove all input parameters from kwargs that are not set", "are used). The option must be provided in cases, when", "self._channel.close() super(Connection, self).close() # ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): \"\"\" Encodes", "e) output = json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in", "self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\" % e)", "vars: - name: ansible_grpc_channel_options grpc_environment: description: - Key/Value pairs (dict)", "author: - \"<NAME> (@HansThienpondt)\" - \"<NAME> (@wisotzky)\" connection: gnmi short_description:", "allows conversion to gnmi_pb.TypedValue object Parameters: data (ANY): data to", "gNMI SubscribeRequest if 'mode' in input: input['mode'] = input['mode'].upper() input['encoding']", "be loaded. To load gRPC sub-plugins use the method `register_service()`", "the objecttree with value result = self._decodeVal(_upd['val']) prfx = result", "- name: ansible_certificate_chain_file certificate_path: description: - Folder to search for", "self._decodeVal(u['val']) for u in update['update']} yield result else: # Ignore:", "conversion to gnmi_pb.Path object Parameters: xpath (str): path string using", "vars: - name: ansible_gnmi_encoding grpc_channel_options: description: - Key/Value pairs (dict)", "value (in seconds) when awaiting a response after issuing a", "(@HansThienpondt)\" - \"<NAME> (@wisotzky)\" connection: gnmi short_description: Provides a persistent", "'/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if filename: if filename.startswith('~'): filename =", "# Subscribe response, enter update context entry = entry['update'] else:", "Path/Value pairs to be replaced delete (list): Paths (str) to", "more) sub-plugin(s) for the required gRPC service(s) must be loaded.", "type: int description: - Specifies the port on the remote", "(see below). - Set C(HTTPS_PROXY) to specify your proxy settings", "connections in Ansible one (or more) sub-plugin(s) for the required", "AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection import ensure_connect", "The standard mechanism to provide/set the environment in Ansible cannot", "if self._encoding not in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure(\"Incompatible encoding '%s'", "in input: del input['backup_options'] # Adjust input parameters to match", "'key' in leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if", "connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout:", "- gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin provides a", "self._connected: self.queue_message('v', \"Closing gRPC connection to target host\") self._channel.close() super(Connection,", "\"\"\" Decodes XPATH from dict representation converted from gnmi_pb.Path object", "of the remote device, a gNM CapabilityRequest will be sent", "the default TLS ciphers do not match what is offered", "self).close() # ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): \"\"\" Encodes XPATH to", "is different from the subject name that is provided in", "create a security vulnerability by logging sensitive information in log", "- section: grpc_connection key: certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH vars:", "ImportError: HAS_GRPC = False try: from google import protobuf HAS_PROTOBUF", "self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request,", "self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials try: response", "in _upd) and ('elem' in _upd['path']): path_elements = _upd['path']['elem'] cPath", "to establish gRPC connection. default: inventory_hostname vars: - name: ansible_host", "in gNMI SetRequest if 'backup' in input: del input['backup'] if", "result = {} for entry in rawData: if 'syncResponse' in", "path with write access. - Be sure to fully understand", "stub will be created. To get visibility about gNMI capabilities", "path without content (no value) => skip continue elif ('path'", "in input['subscription']: item['path'] = self._encodeXpath(item['path']) # Extract duration from input", "ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: - Configures the", "if 'backup' in input: del input['backup'] if 'backup_options' in input:", "'update' not in entry: # Ignore: entry without updates break", "Updates received converted into JSON format \"\"\" # Remove all", "import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text import to_text", "Returns: None \"\"\" if self.connected: self.queue_message('v', 'gRPC connection to host", "in entry) and ('elem' in entry['prefix']): prfx_elements = entry['prefix']['elem'] else:", "Paths (str) to be deleted Returns: str: SetResponse message converted", "the RPC does not return before the timeout exceed, an", "data \"\"\" if 'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal'", "output = self._mergeToSingleDict(responses) else: for update in self._simplifyUpdates(responses): output.append(update) except", "Configures, in seconds, the amount of time to wait when", "case secure connections are used). The option must be provided", "self.queue_message('v', 'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials)", "object Returns: (str): path string using XPATH syntax \"\"\" result", "the remote_user is not specified, the connection will use the", "to the client process that establishes the gRPC connection. -", "exists => Change Context cPath = cPath[eleName] if eleKey not", "in prfx: # List entry does not exist => Create", "attribute name {sub-plugin name}__{method name} to call a specific method", "SetResponse message converted into JSON format \"\"\" # Remove all", "to the specified subcontext for elem in path_elements[:-1]: eleName =", "ini: - section: grpc_connection key: certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH", "man-in-the-middle attacks. vars: - name: ansible_grpc_channel_options grpc_environment: description: - Key/Value", "remote_user: description: - The username used to authenticate to the", "gnmi_pb2.Path structure for easy conversion \"\"\" mypath = [] xpath", "context entry = entry['update'] else: # Get response, keep context", "grpc.RpcError as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] ==", "input['duration'] del input['duration'] else: duration = 20 request = json_format.ParseDict({'subscribe':", "ansible configuration option is required to be set to a", "files ini: - section: grpc_connection key: certificate_path env: - name:", "\"\"\" if self._connected: self.queue_message('v', \"Closing gRPC connection to target host\")", "self._encodeXpath(input['prefix']) if 'delete' in input: input['delete'] = [self._encodeXpath(entry) for entry", "must be dict() if eleName in prfx: # Path Element", "key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user", "connection for gNMI API service description: - This gRPC plugin", "duration = 20 request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth =", "option to override the TLS subject or subjectAltName (only in", "to_text(kwargs.get(\"task_uuid\", \"\")) if not HAS_PROTOBUF: raise AnsibleError( \"protobuf is required", "pass elif 'update' in entry: result = {} update =", "response = self._stub.Set(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure(\"%s\"", "the grpc connection. ini: - section: grpc_connection key: private_key_file env:", "about gNMI capabilities of the remote device, a gNM CapabilityRequest", "authenticate to the remote device when the gRPC connection is", "connection. default: inventory_hostname vars: - name: ansible_host port: type: int", "C(-u) options. ini: - section: defaults key: remote_user env: -", "import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection import", "import protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF = False", "of time to wait when trying to initially establish a", "os.path.isfile(filename): try: with open(filename, 'rb') as f: return f.read() except", "not match what is offered by the gRPC server. vars:", "vars: - name: ansible_certificate_chain_file certificate_path: description: - Folder to search", "- name: ansible_host port: type: int description: - Specifies the", "= os.path.expanduser(filename) if not filename.startswith('/'): for entry in path.split(':'): if", "'pip install grpcio'\" ) self._connected = False def readFile(self, optionName):", "receiving qos (int): DSCP marking that is used updates_only (bool):", "AnsibleConnectionFailure(\"%s\" % e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect", "'key' in elem: eleKey = json.dumps(elem['key']) eleName = '___'+eleName #", "False try: from google import protobuf HAS_PROTOBUF = True except", "structure for easy conversion \"\"\" mypath = [] xpath =", "Parameters: optionName(str): used to read filename from options Returns: File", "eleName = '___'+eleName # Path Element has key => must", "for _upd in entry['update']: if 'val' not in _upd: #", "that are not set input = dict(filter(lambda x: x[1], kwargs.items()))", "self.get_option('grpc_environment') or {} if not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure(\"grpc_environment must", "host certificate. This is needed, because the TLS validates hostname", "received from target device in the ansible log file. For", "registered. After loading the sub-plugin, Ansible modules can call methods", "% self._target) return grpcEnv = self.get_option('grpc_environment') or {} if not", "in entry: # Subscribe response, enter update context entry =", "XPATH syntax Returns: (dict): path dict using gnmi_pb2.Path structure for", "description: - Specifies the port on the remote device that", "to be replaced delete (list): Paths (str) to be deleted", "int description: - Specifies the port on the remote device", "rawData: entry = json_format.MessageToDict(msg) if 'syncResponse' in entry: # Ignore:", "dict): raise AnsibleConnectionFailure(\"grpc_environment must be a dict\") for key in", "Licensed under the BSD 3 Clause license # SPDX-License-Identifier: BSD-3-Clause", "os.environ[key] except KeyError: # no such setting in current environment,", "a response after issuing a call to a RPC. If", "must be dict() if eleName in cPath: # Path Element", "https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin provides", "Element does not exist => Create cPath[eleName] = {} cPath", "for data serialization is automatically determined based on the remote", "Element exists => Change Context prfx = prfx[eleName] if eleKey", "= self._encodeXpath(item['path']) # Extract duration from input attributes if 'duration'", "_upd: # requested path without content (no value) => skip", "input: for entry in input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] =", "name: ansible_ssh_pass private_key_file: description: - The PEM encoded private key", "exc) ) else: raise AnsibleConnectionFailure( 'Cert/keys file %s does not", "Element exists => Change Context cPath = cPath[eleName] if eleKey", "the client process that establishes the gRPC connection. - Set", "gnmi_pb2.Path object Returns: (str): path string using XPATH syntax \"\"\"", "rawData): result = {} for entry in rawData: if 'syncResponse'", "in input: input['delete'] = [self._encodeXpath(entry) for entry in input['delete']] if", "Path Element does not exist => Create prfx[eleName] = {}", "defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name:", "loaded. To load gRPC sub-plugins use the method `register_service()` with", "requested (JSON or JSON_IETF)\" % self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected", "or JSON_IETF)\" % self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected = True", "short_description: Provides a persistent gRPC connection for gNMI API service", "to be used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the", "found (JSON or JSON_IETF)\") else: if self._encoding not in ['JSON_IETF',", "import re import json import base64 import datetime try: import", "vars: - name: ansible_private_key_file root_certificates_file: description: - The PEM encoded", "self.get_option('grpc_channel_options') if options: if not isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options must", "certificate_chain_file: description: - The PEM encoded certificate chain file used", "certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path gnmi_encoding:", "the subject name that is provided in the host certificate.", "devices using gRPC including the underlying transport (TLS). - The", "connection. vars: - name: ansible_password - name: ansible_ssh_pass private_key_file: description:", "= datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val'])", "used to read filename from options Returns: File content Raises:", "needs to be created/updated leaf_elem = path_elements[-1] if 'key' in", "Context cPath = cPath[eleName] else: # Path Element does not", "responses = self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode'] == 'ONCE': responses", "= [] for elem in prfx_elements: eleName = elem['name'] if", "if 'duration' in input: duration = input['duration'] del input['duration'] else:", "for val in aDict[key].values()] del aDict[key] else: if isinstance(aDict[key], dict):", "\"<NAME> (@wisotzky)\" connection: gnmi short_description: Provides a persistent gRPC connection", "to connect to the device is different from the subject", "{'jsonIetfVal': value} else: return {'jsonVal': value} def _decodeVal(self, val): \"\"\"", "that is provided in the host certificate. This is needed,", "= 'JSON' else: raise AnsibleConnectionFailure(\"No compatible supported encoding found (JSON", "or JSON_IETF - If not provided, will run CapabilityRequest for", "description: - Key/Value pairs (dict) to define environment settings specific", "in val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure(\"Ansible gNMI plugin does", "than just a single entry, # we need to create/navigate", "validates hostname or IP address to avoid man-in-the-middle attacks. vars:", "_dictToList(self, aDict): for key in aDict.keys(): if key.startswith('___'): aDict[key[3:]] =", "# # Licensed under the BSD 3 Clause license #", "plugin provides a persistent communication channel to remote devices using", "or C(-u) options. ini: - section: defaults key: remote_user env:", "will be used. ini: - section: defaults key: remote_port env:", "val in aDict[key].values()] del aDict[key] else: if isinstance(aDict[key], dict): aDict[key]", "else: duration = 20 request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth", "out\") else: # RPC timed out, which is okay pass", "input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if 'replace' in", "ini: - section: grpc_connection key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE", "prfx_elements = entry['prefix']['elem'] else: prfx_elements = [] for elem in", "connection') creds = grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds, options=options) else:", "Parameters: data (ANY): data to be encoded as gnmi_pb.TypedValue object", "The plugin binds to the gNMI gRPC service. It provide", "offered by the gRPC server. vars: - name: ansible_grpc_environment persistent_connect_timeout:", "data): \"\"\" Encodes value to dict representation that allows conversion", "transport (TLS). - The plugin binds to the gNMI gRPC", "established for user %s to %s\" % (self.get_option('remote_user'), self._target)) self.queue_message('v',", "= 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding = 'JSON' else:", "the environment in Ansible cannot be used, because those environment", "=> must be dict() if eleName in cPath: # Path", "json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try: output = []", "# Path Element has key => must be list() if", "to gRPC - The standard mechanism to provide/set the environment", "# The last entry of path_elements is the leaf element", "input['backup'] if 'backup_options' in input: del input['backup_options'] # Adjust input", "output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output: output['prefix'] = self._decodeXpath(output['prefix'])", "from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION", "] host = self.get_option('host') port = self.get_option('port') self._target = host", "AnsibleConnectionFailure(\"Ansible gNMI plugin does not support encoding for value: %s\"", "= cPath[eleName] if eleKey not in cPath: # List entry", "duration from input attributes if 'duration' in input: duration =", "if filename.startswith('~'): filename = os.path.expanduser(filename) if not filename.startswith('/'): for entry", "self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest()", "SSL-enabled channel, if the value is None it reads the", "Executes a gNMI Capabilities request Parameters: None Returns: str: gNMI", "- name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file certificate_path: description: -", "does not return before the timeout exceed, an error is", "for JSON_IETF (preferred) and JSON (fallback). Parameters: type (str): Type", "for aggregation Returns: str: Updates received converted into JSON format", "paths (XPATH syntax) paths (list): List of paths (str) to", "- Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do", "key files ini: - section: grpc_connection key: certificate_path env: -", "C(host) part will be used. ini: - section: defaults key:", "self.queue_message('v', 'Starting insecure gRPC connection') self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v',", "data to be encoded as gnmi_pb.TypedValue object Returns: (dict): dict", "IPv4 address that is used to connect to the device", "certificate file used to create a SSL-enabled channel, if the", "name: ansible_certificate_path gnmi_encoding: description: - Encoding used for gNMI communication", "to the target host Parameters: None Returns: None \"\"\" if", "gNM CapabilityRequest will be sent and result will be persisted.", "a gNMI Set request Encoding that is used for data", "None \"\"\" if self._connected: self.queue_message('v', \"Closing gRPC connection to target", "input['type'] = input['type'].upper() input['encoding'] = self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest())", "provide wrappers for gNMI requests (Capabilities, Get, Set, Subscribe) requirements:", "logging. Need to add code for log forwarding of gRPC", "in eKeys) if dKeys: entry['key'] = dKeys mypath.append(entry) return {'elem':", "Parameters: path (dict): decoded gnmi_pb2.Path object Returns: (str): path string", "str: gNMI capabilities converted into JSON format \"\"\" request =", "Change Context cPath = cPath[eleName] else: # Path Element does", "single entry, # we need to create/navigate to the specified", "2020 Nokia # # Licensed under the BSD 3 Clause", "name} to call a specific method of that sub-plugin. \"\"\"", "'pip install protobuf'\" ) if not HAS_GRPC: raise AnsibleError( \"grpcio", "def close(self): \"\"\" Closes the active gRPC connection to the", "name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file root_certificates_file: description: - The", "= self._encodeXpath(input['prefix']) if 'subscription' in input: for item in input['subscription']:", "[json_format.MessageToDict(response) for response in responses] output = self._mergeToSingleDict(responses) else: for", "ansible_certificate_path gnmi_encoding: description: - Encoding used for gNMI communication -", "auto-detection ini: - section: grpc_connection key: gnmi_encoding env: - name:", "based on the remote device capabilities. This gNMI plugin has", "*args, **kwargs): \"\"\" Executes a gNMI Set request Encoding that", "is added to all paths (XPATH syntax) paths (list): List", "(int): timeout, to stop receiving qos (int): DSCP marking that", "what is offered by the gRPC server. vars: - name:", "not isinstance(options, dict): raise AnsibleConnectionFailure(\"grpc_channel_options must be a dict\") options", "gRPC connection for gNMI API service description: - This gRPC", "to search for certificate and key files ini: - section:", "value from dict representation converted from gnmi_pb.TypedValue object Parameters: val", "remote device when first establishing the grpc connection. ini: -", "path string using XPATH syntax \"\"\" result = [] if", "if 'backup_options' in input: del input['backup_options'] # Adjust input parameters" ]