hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fd1105fe7302b867a2ec3c45f21c0dc13f8525 | 45,223 | py | Python | mysql-dst/mysql-cluster/libevent/event_rpcgen.py | SJTU-IPADS/dst | 897b929a692642cbf295c105d9d6e64090abb673 | [
"Apache-2.0"
] | 9 | 2020-12-17T01:59:13.000Z | 2022-03-30T16:25:08.000Z | mysql-dst/mysql-cluster/libevent/event_rpcgen.py | SJTU-IPADS/dst | 897b929a692642cbf295c105d9d6e64090abb673 | [
"Apache-2.0"
] | 1 | 2021-07-30T12:06:33.000Z | 2021-07-31T10:16:09.000Z | mysql-dst/mysql-cluster/libevent/event_rpcgen.py | SJTU-IPADS/dst | 897b929a692642cbf295c105d9d6e64090abb673 | [
"Apache-2.0"
] | 1 | 2021-08-01T13:47:07.000Z | 2021-08-01T13:47:07.000Z | #!/usr/bin/env python
#
# Copyright (c) 2005 Niels Provos <provos@citi.umich.edu>
# All rights reserved.
#
# Generates marshaling code based on libevent.
import sys
import re
#
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
_STRUCT_RE = '[a-z][a-z_0-9]*'
# Globals
line_count = 0
white = re.compile(r'^\s+')
cppcomment = re.compile(r'\/\/.*$')
headerdirect = []
cppdirect = []
# Holds everything that makes a struct
class Struct:
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
print >>sys.stderr, ' Created struct: %s' % name
def AddEntry(self, entry):
if self._tags.has_key(entry.Tag()):
print >>sys.stderr, ( 'Entry "%s" duplicates tag number '
'%d from "%s" around line %d' ) % (
entry.Name(), entry.Tag(),
self._tags[entry.Tag()], line_count)
sys.exit(1)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
print >>sys.stderr, ' Added entry: %s' % entry.Name()
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
def PrintIdented(self, file, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
print >>file, '%s%s' % (ident, entry)
def PrintTags(self, file):
"""Prints the tag definitions for a structure."""
print >>file, '/* Tag definition for %s */' % self._name
print >>file, 'enum %s_ {' % self._name.lower()
for entry in self._entries:
print >>file, ' %s=%d,' % (self.EntryTagName(entry),
entry.Tag())
print >>file, ' %s_MAX_TAGS' % (self._name.upper())
print >>file, '};\n'
def PrintForwardDeclaration(self, file):
print >>file, 'struct %s;' % self._name
def PrintDeclaration(self, file):
print >>file, '/* Structure declaration for %s */' % self._name
print >>file, 'struct %s_access_ {' % self._name
for entry in self._entries:
dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
dcl.extend(
entry.GetDeclaration('(*%s_get)' % entry.Name()))
if entry.Array():
dcl.extend(
entry.AddDeclaration('(*%s_add)' % entry.Name()))
self.PrintIdented(file, ' ', dcl)
print >>file, '};\n'
print >>file, 'struct %s {' % self._name
print >>file, ' struct %s_access_ *base;\n' % self._name
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIdented(file, ' ', dcl)
print >>file, ''
for entry in self._entries:
print >>file, ' uint8_t %s_set;' % entry.Name()
print >>file, '};\n'
print >>file, \
"""struct %(name)s *%(name)s_new(void);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, uint32_t,
struct %(name)s *);""" % { 'name' : self._name }
# Write a setting function of every variable
for entry in self._entries:
self.PrintIdented(file, '', entry.AssignDeclaration(
entry.AssignFuncName()))
self.PrintIdented(file, '', entry.GetDeclaration(
entry.GetFuncName()))
if entry.Array():
self.PrintIdented(file, '', entry.AddDeclaration(
entry.AddFuncName()))
print >>file, '/* --- %s done --- */\n' % self._name
def PrintCode(self, file):
print >>file, ('/*\n'
' * Implementation of %s\n'
' */\n') % self._name
print >>file, \
'static struct %(name)s_access_ __%(name)s_base = {' % \
{ 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeBase())
print >>file, '};\n'
# Creation
print >>file, (
'struct %(name)s *\n'
'%(name)s_new(void)\n'
'{\n'
' struct %(name)s *tmp;\n'
' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
' event_warn("%%s: malloc", __func__);\n'
' return (NULL);\n'
' }\n'
' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeNew('tmp'))
print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
print >>file, (
' return (tmp);\n'
'}\n')
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIdented(file, '', entry.CodeAdd())
print >>file, ''
# Assigning
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeAssign())
print >>file, ''
# Getting
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeGet())
print >>file, ''
# Clearing
print >>file, ( 'void\n'
'%(name)s_clear(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeClear('tmp'))
print >>file, '}\n'
# Freeing
print >>file, ( 'void\n'
'%(name)s_free(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeFree('tmp'))
print >>file, (' free(tmp);\n'
'}\n')
# Marshaling
print >>file, ('void\n'
'%(name)s_marshal(struct evbuffer *evbuf, '
'const struct %(name)s *tmp)'
'{') % { 'name' : self._name }
for entry in self._entries:
indent = ' '
# Optional entries do not have to be set
if entry.Optional():
indent += ' '
print >>file, ' if (tmp->%s_set) {' % entry.Name()
self.PrintIdented(
file, indent,
entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp'))
if entry.Optional():
print >>file, ' }'
print >>file, '}\n'
# Unmarshaling
print >>file, ('int\n'
'%(name)s_unmarshal(struct %(name)s *tmp, '
' struct evbuffer *evbuf)\n'
'{\n'
' uint32_t tag;\n'
' while (EVBUFFER_LENGTH(evbuf) > 0) {\n'
' if (evtag_peek(evbuf, &tag) == -1)\n'
' return (-1);\n'
' switch (tag) {\n'
) % { 'name' : self._name }
for entry in self._entries:
print >>file, ' case %s:\n' % self.EntryTagName(entry)
if not entry.Array():
print >>file, (
' if (tmp->%s_set)\n'
' return (-1);'
) % (entry.Name())
self.PrintIdented(
file, ' ',
entry.CodeUnmarshal('evbuf',
self.EntryTagName(entry), 'tmp'))
print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
' break;\n' )
print >>file, ( ' default:\n'
' return -1;\n'
' }\n'
' }\n' )
# Check if it was decoded completely
print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
' return (-1);'
) % { 'name' : self._name }
# Successfully decoded
print >>file, ( ' return (0);\n'
'}\n')
# Checking if a structure has all the required data
print >>file, (
'int\n'
'%(name)s_complete(struct %(name)s *msg)\n'
'{' ) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(
file, ' ',
entry.CodeComplete('msg'))
print >>file, (
' return (0);\n'
'}\n' )
# Complete message unmarshaling
print >>file, (
'int\n'
'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
'uint32_t need_tag, struct %(name)s *msg)\n'
'{\n'
' uint32_t tag;\n'
' int res = -1;\n'
'\n'
' struct evbuffer *tmp = evbuffer_new();\n'
'\n'
' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
' || tag != need_tag)\n'
' goto error;\n'
'\n'
' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
' goto error;\n'
'\n'
' res = 0;\n'
'\n'
' error:\n'
' evbuffer_free(tmp);\n'
' return (res);\n'
'}\n' ) % { 'name' : self._name }
# Complete message marshaling
print >>file, (
'void\n'
'evtag_marshal_%(name)s(struct evbuffer *evbuf, uint32_t tag, '
'const struct %(name)s *msg)\n'
'{\n'
' struct evbuffer *_buf = evbuffer_new();\n'
' assert(_buf != NULL);\n'
' evbuffer_drain(_buf, -1);\n'
' %(name)s_marshal(_buf, msg);\n'
' evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), '
'EVBUFFER_LENGTH(_buf));\n'
' evbuffer_free(_buf);\n'
'}\n' ) % { 'name' : self._name }
class Entry:
def __init__(self, type, name, tag):
self._type = type
self._name = name
self._tag = int(tag)
self._ctype = type
self._optional = 0
self._can_be_array = 0
self._array = 0
self._line_count = -1
self._struct = None
self._refname = None
def GetTranslation(self):
return { "parent_name" : self._struct.Name(),
"name" : self._name,
"ctype" : self._ctype,
"refname" : self._refname
}
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self, yes=1):
self._array = yes
def MakeOptional(self):
self._optional = 1
def GetFuncName(self):
return '%s_%s_get' % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeGet(self):
code = (
'int',
'%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
'%(ctype)s *value)',
'{',
' if (msg->%(name)s_set != 1)',
' return (-1);',
' *value = msg->%(name)s_data;',
' return (0);',
'}' )
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def AssignFuncName(self):
return '%s_%s_assign' % (self._struct.Name(), self._name)
def AddFuncName(self):
return '%s_%s_add' % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
code = [ 'int',
'%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
' const %(ctype)s value)',
'{',
' msg->%(name)s_set = 1;',
' msg->%(name)s_data = value;',
' return (0);',
'}' ]
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
return code
def CodeComplete(self, structname):
if self.Optional():
return []
code = [ 'if (!%s->%s_set)' % (structname, self.Name()),
' return (-1);' ]
return code
def CodeFree(self, name):
return []
def CodeBase(self):
code = [
'%(parent_name)s_%(name)s_assign,',
'%(parent_name)s_%(name)s_get,'
]
if self.Array():
code.append('%(parent_name)s_%(name)s_add,')
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def Verify(self):
if self.Array() and not self._can_be_array:
print >>sys.stderr, (
'Entry "%s" cannot be created as an array '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if not self._struct:
print >>sys.stderr, (
'Entry "%s" does not know which struct it belongs to '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if self._optional and self._array:
print >>sys.stderr, ( 'Entry "%s" has illegal combination of '
'optional and array around line %d' ) % (
self._name, self.LineCount() )
sys.exit(1)
class EntryBytes(Entry):
def __init__(self, type, name, tag, length):
# Init base class
Entry.__init__(self, type, name, tag)
self._length = length
self._ctype = 'uint8_t'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s **);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def Declaration(self):
dcl = ['uint8_t %s_data[%s];' % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s **value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, const %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' msg->%s_set = 1;' % name,
' memcpy(msg->%s_data, value, %s);' % (
name, self._length),
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = [ 'if (evtag_unmarshal_fixed(%s, %s, ' % (buf, tag_name) +
'%s->%s_data, ' % (var_name, self._name) +
'sizeof(%s->%s_data)) == -1) {' % (
var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, sizeof(%s->%s_data));' % (
buf, tag_name, var_name, self._name, var_name, self._name )]
return code
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()),
'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
structname, self._name, structname, self._name)]
return code
def CodeNew(self, name):
code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
name, self._name, name, self._name)]
return code
def Verify(self):
if not self._length:
print >>sys.stderr, 'Entry "%s" needs a length around line %d' % (
self._name, self.LineCount() )
sys.exit(1)
Entry.Verify(self)
class EntryInt(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'uint32_t'
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_int(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}' ]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_int(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def Declaration(self):
dcl = ['uint32_t %s_data;' % self._name]
return dcl
def CodeNew(self, name):
code = ['%s->%s_data = 0;' % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'char *'
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_string(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_string(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['char *%s_data;' % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, type, name, tag, refname):
# Init base class
Entry.__init__(self, type, name, tag)
self._can_be_array = 1
self._refname = refname
self._ctype = 'struct %s*' % refname
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1) {' % name,
' msg->%s_data = %s_new();' % (name, self._refname),
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' }',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
if self.Optional():
code = [ 'if (%s->%s_set && %s_complete(%s->%s_data) == -1)' % (
structname, self.Name(),
self._refname, structname, self.Name()),
' return (-1);' ]
else:
code = [ 'if (%s_complete(%s->%s_data) == -1)' % (
self._refname, structname, self.Name()),
' return (-1);' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['%s->%s_data = %s_new();' % (
var_name, self._name, self._refname),
'if (%s->%s_data == NULL)' % (var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_%s(%s, %s, %s->%s_data) == -1) {' % (
self._refname, buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_%s(%s, %s, %s->%s_data);' % (
self._refname, buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' %s_free(%s->%s_data);' % (
self._refname, structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' %s_free(%s->%s_data); ' % (
self._refname, name, self._name)]
return code
def Declaration(self):
dcl = ['%s %s_data;' % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'uint8_t *'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *, uint32_t *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s, uint32_t);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, '
'const %s value, uint32_t len)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_data != NULL)' % name,
' free (msg->%s_data);' % name,
' msg->%s_data = malloc(len);' % name,
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' msg->%s_length = len;' % name,
' memcpy(msg->%s_data, value, len);' % name,
' return (0);',
'}' ]
return code
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value, uint32_t *plen)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' *plen = msg->%s_length;' % name,
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_payload_length(%s, &%s->%s_length) == -1)' % (
buf, var_name, self._name),
' return (-1);',
# We do not want DoS opportunities
'if (%s->%s_length > EVBUFFER_LENGTH(%s))' % (
var_name, self._name, buf),
' return (-1);',
'if ((%s->%s_data = malloc(%s->%s_length)) == NULL)' % (
var_name, self._name, var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_fixed(%s, %s, %s->%s_data, '
'%s->%s_length) == -1) {' % (
buf, tag_name, var_name, self._name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, %s->%s_length);' % (
buf, tag_name, var_name, self._name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name) ]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['uint8_t *%s_data;' % self._name,
'uint32_t %s_length;' % self._name]
return dcl
class EntryArray(Entry):
def __init__(self, entry):
# Init base class
Entry.__init__(self, entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = 'struct %s *' % self._refname
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
translate = self.GetTranslation()
translate["funcname"] = funcname
code = [
'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
translate ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, int, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AddDeclaration(self, funcname):
code = [ '%s %s(struct %s *);' % (
self._ctype, funcname, self._struct.Name() ) ]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)
return (-1);
%(refname)s_clear(msg->%(name)s_data[off]);
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data[off], tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
%(refname)s_clear(msg->%(name)s_data[off]);
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAdd(self):
code = \
"""%(ctype)s
%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg)
{
if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {
int tobe_allocated = msg->%(name)s_num_allocated;
%(ctype)s* new_data = NULL;
tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
new_data = (%(ctype)s*) realloc(msg->%(name)s_data,
tobe_allocated * sizeof(%(ctype)s));
if (new_data == NULL)
goto error;
msg->%(name)s_data = new_data;
msg->%(name)s_num_allocated = tobe_allocated;
}
msg->%(name)s_data[msg->%(name)s_length - 1] = %(refname)s_new();
if (msg->%(name)s_data[msg->%(name)s_length - 1] == NULL)
goto error;
msg->%(name)s_set = 1;
return (msg->%(name)s_data[msg->%(name)s_length - 1]);
error:
--msg->%(name)s_length;
return (NULL);
}
""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
code = []
translate = self.GetTranslation()
if self.Optional():
code.append( 'if (%(structname)s->%(name)s_set)' % translate)
translate["structname"] = structname
tmp = """{
int i;
for (i = 0; i < %(structname)s->%(name)s_length; ++i) {
if (%(refname)s_complete(%(structname)s->%(name)s_data[i]) == -1)
return (-1);
}
}""" % translate
code.extend(tmp.split('\n'))
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
translate = self.GetTranslation()
translate["var_name"] = var_name
translate["buf"] = buf
translate["tag_name"] = tag_name
code = """if (%(parent_name)s_%(name)s_add(%(var_name)s) == NULL)
return (-1);
if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag_name)s,
%(var_name)s->%(name)s_data[%(var_name)s->%(name)s_length - 1]) == -1) {
--%(var_name)s->%(name)s_length;
event_warnx("%%s: failed to unmarshal %(name)s", __func__);
return (-1);
}""" % translate
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name):
code = ['{',
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
var_name, self._name),
' evtag_marshal_%s(%s, %s, %s->%s_data[i]);' % (
self._refname, buf, tag_name, var_name, self._name),
' }',
'}'
]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
structname, self.Name()),
' %s_free(%s->%s_data[i]);' % (
self._refname, structname, self.Name()),
' }',
' free(%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_num_allocated = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name),
'%s->%s_num_allocated = 0;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL) {' % (name, self._name),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
name, self._name),
' %s_free(%s->%s_data[i]); ' % (
self._refname, name, self._name),
' %s->%s_data[i] = NULL;' % (name, self._name),
' }',
' free(%s->%s_data);' % (name, self._name),
' %s->%s_data = NULL;' % (name, self._name),
' %s->%s_length = 0;' % (name, self._name),
' %s->%s_num_allocated = 0;' % (name, self._name),
'}'
]
return code
def Declaration(self):
dcl = ['struct %s **%s_data;' % (self._refname, self._name),
'int %s_length;' % self._name,
'int %s_num_allocated;' % self._name ]
return dcl
def NormalizeLine(line):
global white
global cppcomment
line = cppcomment.sub('', line)
line = line.strip()
line = white.sub(' ', line)
return line
def ProcessOneEntry(newstruct, entry):
optional = 0
array = 0
entry_type = ''
name = ''
tag = ''
tag_set = None
separator = ''
fixed_length = ''
tokens = entry.split(' ')
while tokens:
token = tokens[0]
tokens = tokens[1:]
if not entry_type:
if not optional and token == 'optional':
optional = 1
continue
if not array and token == 'array':
array = 1
continue
if not entry_type:
entry_type = token
continue
if not name:
res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
if not res:
print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % (
entry, line_count)
sys.exit(1)
name = res.group(1)
fixed_length = res.group(2)
if fixed_length:
fixed_length = fixed_length[1:-1]
continue
if not separator:
separator = token
if separator != '=':
print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % (
name, token)
sys.exit(1)
continue
if not tag_set:
tag_set = 1
if not re.match(r'^(0x)?[0-9]+$', token):
print >>sys.stderr, 'Expected tag number: \"%s\"' % entry
sys.exit(1)
tag = int(token, 0)
continue
print >>sys.stderr, 'Cannot parse \"%s\"' % entry
sys.exit(1)
if not tag_set:
print >>sys.stderr, 'Need tag number: \"%s\"' % entry
sys.exit(1)
# Create the right entry
if entry_type == 'bytes':
if fixed_length:
newentry = EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = EntryVarBytes(entry_type, name, tag)
elif entry_type == 'int' and not fixed_length:
newentry = EntryInt(entry_type, name, tag)
elif entry_type == 'string' and not fixed_length:
newentry = EntryString(entry_type, name, tag)
else:
res = re.match(r'^struct\[(%s)\]$' % _STRUCT_RE,
entry_type, re.IGNORECASE)
if res:
# References another struct defined in our file
newentry = EntryStruct(entry_type, name, tag, res.group(1))
else:
print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry)
sys.exit(1)
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newname = newentry.Name()+ '_array'
# Now borgify the new entry.
newentry = EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(data):
tokens = data.split(' ')
# First three tokens are: 'struct' 'name' '{'
newstruct = Struct(tokens[1])
inside = ' '.join(tokens[3:-1])
tokens = inside.split(';')
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(newstruct, entry))
structs.append(newstruct)
return structs
def GetNextStruct(file):
global line_count
global cppdirect
got_struct = 0
processed_lines = []
have_c_comment = 0
data = ''
while 1:
line = file.readline()
if not line:
break
line_count += 1
line = line[:-1]
if not have_c_comment and re.search(r'/\*', line):
if re.search(r'/\*.*\*/', line):
line = re.sub(r'/\*.*\*/', '', line)
else:
line = re.sub(r'/\*.*$', '', line)
have_c_comment = 1
if have_c_comment:
if not re.search(r'\*/', line):
continue
have_c_comment = 0
line = re.sub(r'^.*\*/', '', line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if re.match(r'#include ["<].*[>"]', line):
cppdirect.append(line)
continue
if re.match(r'^#(if( |def)|endif)', line):
cppdirect.append(line)
continue
if re.match(r'^#define', line):
headerdirect.append(line)
continue
if not re.match(r'^struct %s {$' % _STRUCT_RE,
line, re.IGNORECASE):
print >>sys.stderr, 'Missing struct on line %d: %s' % (
line_count, line)
sys.exit(1)
else:
got_struct = 1
data += line
continue
# We are inside the struct
tokens = line.split('}')
if len(tokens) == 1:
data += ' ' + line
continue
if len(tokens[1]):
print >>sys.stderr, 'Trailing garbage after struct on line %d' % (
line_count )
sys.exit(1)
# We found the end of the struct
data += ' %s}' % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r'/\*.*\*/', '', data)
return data
def Parse(file):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(file)
if not data:
break
entities.extend(ProcessStruct(data))
return entities
def GuardName(name):
name = '_'.join(name.split('.'))
name = '_'.join(name.split('/'))
guard = '_'+name.upper()+'_'
return guard
def HeaderPreamble(name):
guard = GuardName(name)
pre = (
'/*\n'
' * Automatically generated from %s\n'
' */\n\n'
'#ifndef %s\n'
'#define %s\n\n' ) % (
name, guard, guard)
# insert stdint.h - let's hope everyone has it
pre += (
'#include <event-config.h>\n'
'#ifdef _EVENT_HAVE_STDINT_H\n'
'#include <stdint.h>\n'
'#endif\n' )
for statement in headerdirect:
pre += '%s\n' % statement
if headerdirect:
pre += '\n'
pre += (
'#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)\n'
'#ifdef __GNUC__\n'
'#define EVTAG_ASSIGN(msg, member, args...) '
'(*(msg)->base->member##_assign)(msg, ## args)\n'
'#define EVTAG_GET(msg, member, args...) '
'(*(msg)->base->member##_get)(msg, ## args)\n'
'#else\n'
'#define EVTAG_ASSIGN(msg, member, ...) '
'(*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)\n'
'#define EVTAG_GET(msg, member, ...) '
'(*(msg)->base->member##_get)(msg, ## __VA_ARGS__)\n'
'#endif\n'
'#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)\n'
'#define EVTAG_LEN(msg, member) ((msg)->member##_length)\n'
)
return pre
def HeaderPostamble(name):
guard = GuardName(name)
return '#endif /* %s */' % guard
def BodyPreamble(name):
global _NAME
global _VERSION
header_file = '.'.join(name.split('.')[:-1]) + '.gen.h'
pre = ( '/*\n'
' * Automatically generated from %s\n'
' * by %s/%s. DO NOT EDIT THIS FILE.\n'
' */\n\n' ) % (name, _NAME, _VERSION)
pre += ( '#include <sys/types.h>\n'
'#include <sys/time.h>\n'
'#include <stdlib.h>\n'
'#include <string.h>\n'
'#include <assert.h>\n'
'#include <event.h>\n\n' )
for statement in cppdirect:
pre += '%s\n' % statement
pre += '\n#include "%s"\n\n' % header_file
pre += 'void event_err(int eval, const char *fmt, ...);\n'
pre += 'void event_warn(const char *fmt, ...);\n'
pre += 'void event_errx(int eval, const char *fmt, ...);\n'
pre += 'void event_warnx(const char *fmt, ...);\n\n'
return pre
def main(argv):
if len(argv) < 2 or not argv[1]:
print >>sys.stderr, 'Need RPC description file as first argument.'
sys.exit(1)
filename = argv[1]
ext = filename.split('.')[-1]
if ext != 'rpc':
print >>sys.stderr, 'Unrecognized file extension: %s' % ext
sys.exit(1)
print >>sys.stderr, 'Reading \"%s\"' % filename
fp = open(filename, 'r')
entities = Parse(fp)
fp.close()
header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h'
impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c'
print >>sys.stderr, '... creating "%s"' % header_file
header_fp = open(header_file, 'w')
print >>header_fp, HeaderPreamble(filename)
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
print >>header_fp, ''
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
print >>header_fp, HeaderPostamble(filename)
header_fp.close()
print >>sys.stderr, '... creating "%s"' % impl_file
impl_fp = open(impl_file, 'w')
print >>impl_fp, BodyPreamble(filename)
for entry in entities:
entry.PrintCode(impl_fp)
impl_fp.close()
if __name__ == '__main__':
main(sys.argv)
| 31.892102 | 79 | 0.481105 |
import sys
import re
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
_STRUCT_RE = '[a-z][a-z_0-9]*'
line_count = 0
white = re.compile(r'^\s+')
cppcomment = re.compile(r'\/\/.*$')
headerdirect = []
cppdirect = []
class Struct:
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
print >>sys.stderr, ' Created struct: %s' % name
def AddEntry(self, entry):
if self._tags.has_key(entry.Tag()):
print >>sys.stderr, ( 'Entry "%s" duplicates tag number '
'%d from "%s" around line %d' ) % (
entry.Name(), entry.Tag(),
self._tags[entry.Tag()], line_count)
sys.exit(1)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
print >>sys.stderr, ' Added entry: %s' % entry.Name()
def Name(self):
return self._name
def EntryTagName(self, entry):
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
def PrintIdented(self, file, ident, code):
for entry in code:
print >>file, '%s%s' % (ident, entry)
def PrintTags(self, file):
print >>file, '/* Tag definition for %s */' % self._name
print >>file, 'enum %s_ {' % self._name.lower()
for entry in self._entries:
print >>file, ' %s=%d,' % (self.EntryTagName(entry),
entry.Tag())
print >>file, ' %s_MAX_TAGS' % (self._name.upper())
print >>file, '};\n'
def PrintForwardDeclaration(self, file):
print >>file, 'struct %s;' % self._name
def PrintDeclaration(self, file):
print >>file, '/* Structure declaration for %s */' % self._name
print >>file, 'struct %s_access_ {' % self._name
for entry in self._entries:
dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
dcl.extend(
entry.GetDeclaration('(*%s_get)' % entry.Name()))
if entry.Array():
dcl.extend(
entry.AddDeclaration('(*%s_add)' % entry.Name()))
self.PrintIdented(file, ' ', dcl)
print >>file, '};\n'
print >>file, 'struct %s {' % self._name
print >>file, ' struct %s_access_ *base;\n' % self._name
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIdented(file, ' ', dcl)
print >>file, ''
for entry in self._entries:
print >>file, ' uint8_t %s_set;' % entry.Name()
print >>file, '};\n'
print >>file, \
"""struct %(name)s *%(name)s_new(void);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, uint32_t,
struct %(name)s *);""" % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, '', entry.AssignDeclaration(
entry.AssignFuncName()))
self.PrintIdented(file, '', entry.GetDeclaration(
entry.GetFuncName()))
if entry.Array():
self.PrintIdented(file, '', entry.AddDeclaration(
entry.AddFuncName()))
print >>file, '/* --- %s done --- */\n' % self._name
def PrintCode(self, file):
print >>file, ('/*\n'
' * Implementation of %s\n'
' */\n') % self._name
print >>file, \
'static struct %(name)s_access_ __%(name)s_base = {' % \
{ 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeBase())
print >>file, '};\n'
print >>file, (
'struct %(name)s *\n'
'%(name)s_new(void)\n'
'{\n'
' struct %(name)s *tmp;\n'
' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
' event_warn("%%s: malloc", __func__);\n'
' return (NULL);\n'
' }\n'
' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeNew('tmp'))
print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
print >>file, (
' return (tmp);\n'
'}\n')
for entry in self._entries:
if entry.Array():
self.PrintIdented(file, '', entry.CodeAdd())
print >>file, ''
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeAssign())
print >>file, ''
for entry in self._entries:
self.PrintIdented(file, '', entry.CodeGet())
print >>file, ''
print >>file, ( 'void\n'
'%(name)s_clear(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeClear('tmp'))
print >>file, '}\n'
print >>file, ( 'void\n'
'%(name)s_free(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(file, ' ', entry.CodeFree('tmp'))
print >>file, (' free(tmp);\n'
'}\n')
print >>file, ('void\n'
'%(name)s_marshal(struct evbuffer *evbuf, '
'const struct %(name)s *tmp)'
'{') % { 'name' : self._name }
for entry in self._entries:
indent = ' '
if entry.Optional():
indent += ' '
print >>file, ' if (tmp->%s_set) {' % entry.Name()
self.PrintIdented(
file, indent,
entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp'))
if entry.Optional():
print >>file, ' }'
print >>file, '}\n'
print >>file, ('int\n'
'%(name)s_unmarshal(struct %(name)s *tmp, '
' struct evbuffer *evbuf)\n'
'{\n'
' uint32_t tag;\n'
' while (EVBUFFER_LENGTH(evbuf) > 0) {\n'
' if (evtag_peek(evbuf, &tag) == -1)\n'
' return (-1);\n'
' switch (tag) {\n'
) % { 'name' : self._name }
for entry in self._entries:
print >>file, ' case %s:\n' % self.EntryTagName(entry)
if not entry.Array():
print >>file, (
' if (tmp->%s_set)\n'
' return (-1);'
) % (entry.Name())
self.PrintIdented(
file, ' ',
entry.CodeUnmarshal('evbuf',
self.EntryTagName(entry), 'tmp'))
print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
' break;\n' )
print >>file, ( ' default:\n'
' return -1;\n'
' }\n'
' }\n' )
print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
' return (-1);'
) % { 'name' : self._name }
print >>file, ( ' return (0);\n'
'}\n')
print >>file, (
'int\n'
'%(name)s_complete(struct %(name)s *msg)\n'
'{' ) % { 'name' : self._name }
for entry in self._entries:
self.PrintIdented(
file, ' ',
entry.CodeComplete('msg'))
print >>file, (
' return (0);\n'
'}\n' )
print >>file, (
'int\n'
'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
'uint32_t need_tag, struct %(name)s *msg)\n'
'{\n'
' uint32_t tag;\n'
' int res = -1;\n'
'\n'
' struct evbuffer *tmp = evbuffer_new();\n'
'\n'
' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
' || tag != need_tag)\n'
' goto error;\n'
'\n'
' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
' goto error;\n'
'\n'
' res = 0;\n'
'\n'
' error:\n'
' evbuffer_free(tmp);\n'
' return (res);\n'
'}\n' ) % { 'name' : self._name }
print >>file, (
'void\n'
'evtag_marshal_%(name)s(struct evbuffer *evbuf, uint32_t tag, '
'const struct %(name)s *msg)\n'
'{\n'
' struct evbuffer *_buf = evbuffer_new();\n'
' assert(_buf != NULL);\n'
' evbuffer_drain(_buf, -1);\n'
' %(name)s_marshal(_buf, msg);\n'
' evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), '
'EVBUFFER_LENGTH(_buf));\n'
' evbuffer_free(_buf);\n'
'}\n' ) % { 'name' : self._name }
class Entry:
def __init__(self, type, name, tag):
self._type = type
self._name = name
self._tag = int(tag)
self._ctype = type
self._optional = 0
self._can_be_array = 0
self._array = 0
self._line_count = -1
self._struct = None
self._refname = None
def GetTranslation(self):
return { "parent_name" : self._struct.Name(),
"name" : self._name,
"ctype" : self._ctype,
"refname" : self._refname
}
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self, yes=1):
self._array = yes
def MakeOptional(self):
self._optional = 1
def GetFuncName(self):
return '%s_%s_get' % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeGet(self):
code = (
'int',
'%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
'%(ctype)s *value)',
'{',
' if (msg->%(name)s_set != 1)',
' return (-1);',
' *value = msg->%(name)s_data;',
' return (0);',
'}' )
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def AssignFuncName(self):
return '%s_%s_assign' % (self._struct.Name(), self._name)
def AddFuncName(self):
return '%s_%s_add' % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
code = [ 'int',
'%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
' const %(ctype)s value)',
'{',
' msg->%(name)s_set = 1;',
' msg->%(name)s_data = value;',
' return (0);',
'}' ]
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
return code
def CodeComplete(self, structname):
if self.Optional():
return []
code = [ 'if (!%s->%s_set)' % (structname, self.Name()),
' return (-1);' ]
return code
def CodeFree(self, name):
return []
def CodeBase(self):
code = [
'%(parent_name)s_%(name)s_assign,',
'%(parent_name)s_%(name)s_get,'
]
if self.Array():
code.append('%(parent_name)s_%(name)s_add,')
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def Verify(self):
if self.Array() and not self._can_be_array:
print >>sys.stderr, (
'Entry "%s" cannot be created as an array '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if not self._struct:
print >>sys.stderr, (
'Entry "%s" does not know which struct it belongs to '
'around line %d' ) % (self._name, self.LineCount())
sys.exit(1)
if self._optional and self._array:
print >>sys.stderr, ( 'Entry "%s" has illegal combination of '
'optional and array around line %d' ) % (
self._name, self.LineCount() )
sys.exit(1)
class EntryBytes(Entry):
def __init__(self, type, name, tag, length):
Entry.__init__(self, type, name, tag)
self._length = length
self._ctype = 'uint8_t'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s **);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def Declaration(self):
dcl = ['uint8_t %s_data[%s];' % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s **value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, const %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' msg->%s_set = 1;' % name,
' memcpy(msg->%s_data, value, %s);' % (
name, self._length),
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = [ 'if (evtag_unmarshal_fixed(%s, %s, ' % (buf, tag_name) +
'%s->%s_data, ' % (var_name, self._name) +
'sizeof(%s->%s_data)) == -1) {' % (
var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, sizeof(%s->%s_data));' % (
buf, tag_name, var_name, self._name, var_name, self._name )]
return code
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()),
'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
structname, self._name, structname, self._name)]
return code
def CodeNew(self, name):
code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
name, self._name, name, self._name)]
return code
def Verify(self):
if not self._length:
print >>sys.stderr, 'Entry "%s" needs a length around line %d' % (
self._name, self.LineCount() )
sys.exit(1)
Entry.Verify(self)
class EntryInt(Entry):
def __init__(self, type, name, tag):
Entry.__init__(self, type, name, tag)
self._ctype = 'uint32_t'
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_int(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}' ]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_int(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def Declaration(self):
dcl = ['uint32_t %s_data;' % self._name]
return dcl
def CodeNew(self, name):
code = ['%s->%s_data = 0;' % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, type, name, tag):
Entry.__init__(self, type, name, tag)
self._ctype = 'char *'
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_unmarshal_string(%s, %s, &%s->%s_data) == -1) {' % (
buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_string(%s, %s, %s->%s_data);' % (
buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['char *%s_data;' % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, type, name, tag, refname):
Entry.__init__(self, type, name, tag)
self._can_be_array = 1
self._refname = refname
self._ctype = 'struct %s*' % refname
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1) {' % name,
' msg->%s_data = %s_new();' % (name, self._refname),
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' }',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
if self.Optional():
code = [ 'if (%s->%s_set && %s_complete(%s->%s_data) == -1)' % (
structname, self.Name(),
self._refname, structname, self.Name()),
' return (-1);' ]
else:
code = [ 'if (%s_complete(%s->%s_data) == -1)' % (
self._refname, structname, self.Name()),
' return (-1);' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['%s->%s_data = %s_new();' % (
var_name, self._name, self._refname),
'if (%s->%s_data == NULL)' % (var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_%s(%s, %s, %s->%s_data) == -1) {' % (
self._refname, buf, tag_name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal_%s(%s, %s, %s->%s_data);' % (
self._refname, buf, tag_name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' %s_free(%s->%s_data);' % (
self._refname, structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' %s_free(%s->%s_data); ' % (
self._refname, name, self._name)]
return code
def Declaration(self):
dcl = ['%s %s_data;' % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, type, name, tag):
Entry.__init__(self, type, name, tag)
self._ctype = 'uint8_t *'
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *, uint32_t *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s, uint32_t);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, '
'const %s value, uint32_t len)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_data != NULL)' % name,
' free (msg->%s_data);' % name,
' msg->%s_data = malloc(len);' % name,
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' msg->%s_length = len;' % name,
' memcpy(msg->%s_data, value, len);' % name,
' return (0);',
'}' ]
return code
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value, uint32_t *plen)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' *plen = msg->%s_length;' % name,
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
code = ['if (evtag_payload_length(%s, &%s->%s_length) == -1)' % (
buf, var_name, self._name),
' return (-1);',
'if (%s->%s_length > EVBUFFER_LENGTH(%s))' % (
var_name, self._name, buf),
' return (-1);',
'if ((%s->%s_data = malloc(%s->%s_length)) == NULL)' % (
var_name, self._name, var_name, self._name),
' return (-1);',
'if (evtag_unmarshal_fixed(%s, %s, %s->%s_data, '
'%s->%s_length) == -1) {' % (
buf, tag_name, var_name, self._name, var_name, self._name),
' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
self._name ),
' return (-1);',
'}'
]
return code
def CodeMarshal(self, buf, tag_name, var_name):
code = ['evtag_marshal(%s, %s, %s->%s_data, %s->%s_length);' % (
buf, tag_name, var_name, self._name, var_name, self._name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name) ]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data); ' % (name, self._name)]
return code
def Declaration(self):
dcl = ['uint8_t *%s_data;' % self._name,
'uint32_t %s_length;' % self._name]
return dcl
class EntryArray(Entry):
def __init__(self, entry):
Entry.__init__(self, entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = 'struct %s *' % self._refname
def GetDeclaration(self, funcname):
translate = self.GetTranslation()
translate["funcname"] = funcname
code = [
'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
translate ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, int, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AddDeclaration(self, funcname):
code = [ '%s %s(struct %s *);' % (
self._ctype, funcname, self._struct.Name() ) ]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)
return (-1);
%(refname)s_clear(msg->%(name)s_data[off]);
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data[off], tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
%(refname)s_clear(msg->%(name)s_data[off]);
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAdd(self):
code = \
"""%(ctype)s
%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg)
{
if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {
int tobe_allocated = msg->%(name)s_num_allocated;
%(ctype)s* new_data = NULL;
tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
new_data = (%(ctype)s*) realloc(msg->%(name)s_data,
tobe_allocated * sizeof(%(ctype)s));
if (new_data == NULL)
goto error;
msg->%(name)s_data = new_data;
msg->%(name)s_num_allocated = tobe_allocated;
}
msg->%(name)s_data[msg->%(name)s_length - 1] = %(refname)s_new();
if (msg->%(name)s_data[msg->%(name)s_length - 1] == NULL)
goto error;
msg->%(name)s_set = 1;
return (msg->%(name)s_data[msg->%(name)s_length - 1]);
error:
--msg->%(name)s_length;
return (NULL);
}
""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname):
code = []
translate = self.GetTranslation()
if self.Optional():
code.append( 'if (%(structname)s->%(name)s_set)' % translate)
translate["structname"] = structname
tmp = """{
int i;
for (i = 0; i < %(structname)s->%(name)s_length; ++i) {
if (%(refname)s_complete(%(structname)s->%(name)s_data[i]) == -1)
return (-1);
}
}""" % translate
code.extend(tmp.split('\n'))
return code
def CodeUnmarshal(self, buf, tag_name, var_name):
translate = self.GetTranslation()
translate["var_name"] = var_name
translate["buf"] = buf
translate["tag_name"] = tag_name
code = """if (%(parent_name)s_%(name)s_add(%(var_name)s) == NULL)
return (-1);
if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag_name)s,
%(var_name)s->%(name)s_data[%(var_name)s->%(name)s_length - 1]) == -1) {
--%(var_name)s->%(name)s_length;
event_warnx("%%s: failed to unmarshal %(name)s", __func__);
return (-1);
}""" % translate
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name):
code = ['{',
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
var_name, self._name),
' evtag_marshal_%s(%s, %s, %s->%s_data[i]);' % (
self._refname, buf, tag_name, var_name, self._name),
' }',
'}'
]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
structname, self.Name()),
' %s_free(%s->%s_data[i]);' % (
self._refname, structname, self.Name()),
' }',
' free(%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_num_allocated = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeNew(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name),
'%s->%s_num_allocated = 0;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL) {' % (name, self._name),
' int i;',
' for (i = 0; i < %s->%s_length; ++i) {' % (
name, self._name),
' %s_free(%s->%s_data[i]); ' % (
self._refname, name, self._name),
' %s->%s_data[i] = NULL;' % (name, self._name),
' }',
' free(%s->%s_data);' % (name, self._name),
' %s->%s_data = NULL;' % (name, self._name),
' %s->%s_length = 0;' % (name, self._name),
' %s->%s_num_allocated = 0;' % (name, self._name),
'}'
]
return code
def Declaration(self):
dcl = ['struct %s **%s_data;' % (self._refname, self._name),
'int %s_length;' % self._name,
'int %s_num_allocated;' % self._name ]
return dcl
def NormalizeLine(line):
global white
global cppcomment
line = cppcomment.sub('', line)
line = line.strip()
line = white.sub(' ', line)
return line
def ProcessOneEntry(newstruct, entry):
optional = 0
array = 0
entry_type = ''
name = ''
tag = ''
tag_set = None
separator = ''
fixed_length = ''
tokens = entry.split(' ')
while tokens:
token = tokens[0]
tokens = tokens[1:]
if not entry_type:
if not optional and token == 'optional':
optional = 1
continue
if not array and token == 'array':
array = 1
continue
if not entry_type:
entry_type = token
continue
if not name:
res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
if not res:
print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % (
entry, line_count)
sys.exit(1)
name = res.group(1)
fixed_length = res.group(2)
if fixed_length:
fixed_length = fixed_length[1:-1]
continue
if not separator:
separator = token
if separator != '=':
print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % (
name, token)
sys.exit(1)
continue
if not tag_set:
tag_set = 1
if not re.match(r'^(0x)?[0-9]+$', token):
print >>sys.stderr, 'Expected tag number: \"%s\"' % entry
sys.exit(1)
tag = int(token, 0)
continue
print >>sys.stderr, 'Cannot parse \"%s\"' % entry
sys.exit(1)
if not tag_set:
print >>sys.stderr, 'Need tag number: \"%s\"' % entry
sys.exit(1)
if entry_type == 'bytes':
if fixed_length:
newentry = EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = EntryVarBytes(entry_type, name, tag)
elif entry_type == 'int' and not fixed_length:
newentry = EntryInt(entry_type, name, tag)
elif entry_type == 'string' and not fixed_length:
newentry = EntryString(entry_type, name, tag)
else:
res = re.match(r'^struct\[(%s)\]$' % _STRUCT_RE,
entry_type, re.IGNORECASE)
if res:
newentry = EntryStruct(entry_type, name, tag, res.group(1))
else:
print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry)
sys.exit(1)
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.Verify()
if array:
newname = newentry.Name()+ '_array'
newentry = EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(data):
tokens = data.split(' ')
newstruct = Struct(tokens[1])
inside = ' '.join(tokens[3:-1])
tokens = inside.split(';')
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
structs.extend(ProcessOneEntry(newstruct, entry))
structs.append(newstruct)
return structs
def GetNextStruct(file):
global line_count
global cppdirect
got_struct = 0
processed_lines = []
have_c_comment = 0
data = ''
while 1:
line = file.readline()
if not line:
break
line_count += 1
line = line[:-1]
if not have_c_comment and re.search(r'/\*', line):
if re.search(r'/\*.*\*/', line):
line = re.sub(r'/\*.*\*/', '', line)
else:
line = re.sub(r'/\*.*$', '', line)
have_c_comment = 1
if have_c_comment:
if not re.search(r'\*/', line):
continue
have_c_comment = 0
line = re.sub(r'^.*\*/', '', line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if re.match(r'
cppdirect.append(line)
continue
if re.match(r'^
cppdirect.append(line)
continue
if re.match(r'^
headerdirect.append(line)
continue
if not re.match(r'^struct %s {$' % _STRUCT_RE,
line, re.IGNORECASE):
print >>sys.stderr, 'Missing struct on line %d: %s' % (
line_count, line)
sys.exit(1)
else:
got_struct = 1
data += line
continue
# We are inside the struct
tokens = line.split('}')
if len(tokens) == 1:
data += ' ' + line
continue
if len(tokens[1]):
print >>sys.stderr, 'Trailing garbage after struct on line %d' % (
line_count )
sys.exit(1)
# We found the end of the struct
data += ' %s}' % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r'/\*.*\*/', '', data)
return data
def Parse(file):
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(file)
if not data:
break
entities.extend(ProcessStruct(data))
return entities
def GuardName(name):
name = '_'.join(name.split('.'))
name = '_'.join(name.split('/'))
guard = '_'+name.upper()+'_'
return guard
def HeaderPreamble(name):
guard = GuardName(name)
pre = (
'/*\n'
' * Automatically generated from %s\n'
' */\n\n'
'
'
name, guard, guard)
# insert stdint.h - let's hope everyone has it
pre += (
'#include <event-config.h>\n'
'#ifdef _EVENT_HAVE_STDINT_H\n'
'#include <stdint.h>\n'
'#endif\n' )
for statement in headerdirect:
pre += '%s\n' % statement
if headerdirect:
pre += '\n'
pre += (
'#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)\n'
'#ifdef __GNUC__\n'
'#define EVTAG_ASSIGN(msg, member, args...) '
'(*(msg)->base->member##_assign)(msg, ## args)\n'
'#define EVTAG_GET(msg, member, args...) '
'(*(msg)->base->member##_get)(msg, ## args)\n'
'#else\n'
'#define EVTAG_ASSIGN(msg, member, ...) '
'(*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)\n'
'#define EVTAG_GET(msg, member, ...) '
'(*(msg)->base->member##_get)(msg, ## __VA_ARGS__)\n'
'#endif\n'
'#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)\n'
'#define EVTAG_LEN(msg, member) ((msg)->member##_length)\n'
)
return pre
def HeaderPostamble(name):
guard = GuardName(name)
return '#endif /* %s */' % guard
def BodyPreamble(name):
global _NAME
global _VERSION
header_file = '.'.join(name.split('.')[:-1]) + '.gen.h'
pre = ( '/*\n'
' * Automatically generated from %s\n'
' * by %s/%s. DO NOT EDIT THIS FILE.\n'
' */\n\n' ) % (name, _NAME, _VERSION)
pre += ( '#include <sys/types.h>\n'
'#include <sys/time.h>\n'
'#include <stdlib.h>\n'
'#include <string.h>\n'
'#include <assert.h>\n'
'#include <event.h>\n\n' )
for statement in cppdirect:
pre += '%s\n' % statement
pre += '\n#include "%s"\n\n' % header_file
pre += 'void event_err(int eval, const char *fmt, ...);\n'
pre += 'void event_warn(const char *fmt, ...);\n'
pre += 'void event_errx(int eval, const char *fmt, ...);\n'
pre += 'void event_warnx(const char *fmt, ...);\n\n'
return pre
def main(argv):
if len(argv) < 2 or not argv[1]:
print >>sys.stderr, 'Need RPC description file as first argument.'
sys.exit(1)
filename = argv[1]
ext = filename.split('.')[-1]
if ext != 'rpc':
print >>sys.stderr, 'Unrecognized file extension: %s' % ext
sys.exit(1)
print >>sys.stderr, 'Reading \"%s\"' % filename
fp = open(filename, 'r')
entities = Parse(fp)
fp.close()
header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h'
impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c'
print >>sys.stderr, '... creating "%s"' % header_file
header_fp = open(header_file, 'w')
print >>header_fp, HeaderPreamble(filename)
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
print >>header_fp, ''
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
print >>header_fp, HeaderPostamble(filename)
header_fp.close()
print >>sys.stderr, '... creating "%s"' % impl_file
impl_fp = open(impl_file, 'w')
print >>impl_fp, BodyPreamble(filename)
for entry in entities:
entry.PrintCode(impl_fp)
impl_fp.close()
if __name__ == '__main__':
main(sys.argv)
| true | true |
f7fd1297806e29b2d84c56a985e31e25414f44c9 | 748 | py | Python | resources/textures/get_texture.py | liaojh1998/cross-modal-concept2robot | 2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e | [
"MIT"
] | 4 | 2021-08-04T08:14:36.000Z | 2022-03-14T05:59:46.000Z | resources/textures/get_texture.py | liaojh1998/cross-modal-concept2robot | 2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e | [
"MIT"
] | null | null | null | resources/textures/get_texture.py | liaojh1998/cross-modal-concept2robot | 2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e | [
"MIT"
] | 2 | 2021-08-28T13:19:31.000Z | 2021-09-17T17:48:41.000Z | import cv2
import glob
sun_data_path = '/scr1/workspace/dataset/sun/SUN2012/Images'
def resize(filename='bg3.png',output_name=None):
W = 256.
oriimg = cv2.imread(filename)
height, width, depth = oriimg.shape
imgScale = W/width
newX,newY = oriimg.shape[1]*imgScale, oriimg.shape[0]*imgScale
newimg = cv2.resize(oriimg,(int(newX),int(newX)))
if output_name is not None:
cv2.imwrite(output_name,newimg)
else:
cv2.imwrite (filename, newimg)
if __name__ == '__main__':
cut_ratio = 21
for i,file in enumerate(glob.glob(sun_data_path+'/*/*/*.jpg')):
if i%cut_ratio==1:
output = './sun_textures/{}.jpg'.format (int(i/cut_ratio))
resize(file,output)
print(i) | 31.166667 | 70 | 0.643048 | import cv2
import glob
sun_data_path = '/scr1/workspace/dataset/sun/SUN2012/Images'
def resize(filename='bg3.png',output_name=None):
W = 256.
oriimg = cv2.imread(filename)
height, width, depth = oriimg.shape
imgScale = W/width
newX,newY = oriimg.shape[1]*imgScale, oriimg.shape[0]*imgScale
newimg = cv2.resize(oriimg,(int(newX),int(newX)))
if output_name is not None:
cv2.imwrite(output_name,newimg)
else:
cv2.imwrite (filename, newimg)
if __name__ == '__main__':
cut_ratio = 21
for i,file in enumerate(glob.glob(sun_data_path+'/*/*/*.jpg')):
if i%cut_ratio==1:
output = './sun_textures/{}.jpg'.format (int(i/cut_ratio))
resize(file,output)
print(i) | true | true |
f7fd13c735953d3712d598b49e0672939502f80f | 8,340 | py | Python | src/discovergy/utils.py | a8/discovergy | 7766a6eb74e8c3cf9b09dfdac21d79b31f5922e5 | [
"MIT"
] | 4 | 2020-03-27T12:41:28.000Z | 2020-08-17T17:43:28.000Z | src/discovergy/utils.py | a8/discovergy | 7766a6eb74e8c3cf9b09dfdac21d79b31f5922e5 | [
"MIT"
] | null | null | null | src/discovergy/utils.py | a8/discovergy | 7766a6eb74e8c3cf9b09dfdac21d79b31f5922e5 | [
"MIT"
] | 1 | 2020-04-01T22:10:40.000Z | 2020-04-01T22:10:40.000Z | # -*- coding: utf-8 -*-
"""
Discovergy shared helper code
"""
__author__ = "Frank Becker <fb@alien8.de>"
__copyright__ = "Frank Becker"
__license__ = "mit"
import gzip
import json
import os
import re
import sys
from contextlib import ContextDecorator
from pathlib import Path
from timeit import default_timer
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Union
import pandas as pd # type: ignore
import pystore
from box import Box # type: ignore
from loguru import logger as log
from tenacity import _utils # type: ignore
class TimeStampedValue(NamedTuple):
timestamp: float
value: Any
class ValueUnit(NamedTuple):
value: Union[float, int]
unit: str
class measure_duration(ContextDecorator):
"""A context manager that measures time from enter to exit."""
def __enter__(self):
self.start = default_timer()
return self
def __exit__(self, *exc):
self.duration = default_timer() - self.start
return False
def start_logging(config: Box) -> None:
"""Start console and file logging"""
log_dir = Path(config.file_location.log_dir).expanduser()
if not log_dir.is_dir():
sys.stderr.write(f"Could not find the log dir {log_dir}. Creating it ...\n")
os.makedirs(log_dir.as_posix())
log_config = {
"handlers": [
{
"sink": sys.stderr,
"format": "{time:YYYY-MM-DD HH:mm:ss} | <level>{level}</level> | {message}",
"colorize": True,
"level": "DEBUG",
"backtrace": True,
},
{
"sink": log_dir / "discovergy_{time}.log",
"rotation": "1 day",
"compression": "gz",
"format": "{time:YYYY-MM-DDTHH:mm:ss} | {level} | {message}",
"backtrace": True,
"serialize": False,
},
],
"extra": {"user": "someone"},
}
log.configure(**log_config) # type: ignore
def before_log(logger: Any, log_level: str) -> Callable:
"""Before call strategy that logs to some logger the attempt."""
def log_it(retry_state):
logger = getattr(log, log_level)
logger(
f"Starting call to '{_utils.get_callback_name(retry_state.fn)}', "
f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it."
)
return log_it
def split_df_by_month(*, df) -> List[pd.DataFrame]:
"""Return data frames split by month."""
data_frames = []
intervals = sorted(set([(e.year, e.month) for e in df.index.unique()]))
if len(intervals) == 1:
# One month only, early return
data_frames.append(df)
return data_frames
date_range = pd.date_range(
"{}-{:02d}".format(intervals[0][0], intervals[0][1]),
periods=len(intervals),
freq="M",
tz="UTC",
)
prev_month = date_range[0]
data_frames.append(df[df.index <= date_range[0]])
for date in date_range[1:]:
df_per_month = df[(prev_month < df.index) & (df.index <= date)]
data_frames.append(df_per_month)
prev_month = date
return data_frames
def split_df_by_day(*, df) -> List[pd.DataFrame]:
"""Return data frames split by day."""
data_frames = []
intervals = sorted(set([(e.year, e.month, e.day) for e in df.index.unique()]))
if len(intervals) == 1:
# One day only, early return
data_frames.append(df)
return data_frames
date_range = pd.date_range(
"{}-{:02d}-{:02d}".format(intervals[0][0], intervals[0][1], intervals[0][2]),
periods=len(intervals),
freq="D",
# tz="UTC",
)
# date_range starts at 0h 00m 00s
prev_day = date_range[0]
for date in date_range[1:]:
df_per_day = df[(prev_day < df.index) & (df.index <= date)]
data_frames.append(df_per_day)
prev_day = date
return data_frames
def str2bool(value: str) -> bool:
"""Return the boolean value of the value given as a str."""
if value.lower() in ["true", "1", "t", "y", "yes", "yeah"]:
return True
return False
def verify_file_permissions(path: Path) -> bool:
"""Return (True|False) if the file system access rights are set to current user only."""
if path.is_file:
file_stat = path.stat()
if file_stat.st_uid != os.getuid():
return False
if re.match(r"0o*100[0-6]00", oct(file_stat.st_mode)):
return True
try:
os.chmod(path, 0o600)
except OSError:
log.error(
f"Tried to change the permissions of {path} but failed. "
"Please fix the permissions to max. 0600 yourself!"
)
return False
else:
log.warning(
"The file {} didn't have secure file permissions {}. "
"The permissions were changed to -rw------- for you. ".format(
path, oct(file_stat.st_mode)
)
)
return True
return False
def write_data(*, data: List[Dict], file_path: Path) -> None:
"""Write the gz-iped raw data to file_path."""
dst_dir = file_path.parent
if not dst_dir.expanduser().is_dir():
log.warning(f"Creating the data destination directory {dst_dir}.")
os.makedirs(dst_dir.expanduser().as_posix())
with gzip.open(file_path.expanduser().as_posix(), "wb") as fh:
fh.write(json.dumps(data).encode("utf-8"))
def write_data_frames(
*, config: Box, data_frames: List[pd.DataFrame], name: str
) -> None:
"""Create or update the data as a Pandas DataFrame in hdf5 file."""
if not data_frames:
log.debug(f"Did not receive any data for {name}.")
return
for df in data_frames:
if not len(df):
log.debug(f"Did not find any data in {df}. Skipping...")
continue
first_ts = min(df.index)
file_name = f"{name}_{first_ts.year}-{first_ts.month:02d}.hdf5"
file_path = Path(config.file_location.data_dir) / Path(file_name)
file_path = file_path.expanduser()
if file_path.is_file():
df_prev = pd.read_hdf(file_path, name)
df = df.combine_first(df_prev)
df.to_hdf(file_path, key=name)
def write_data_to_pystore(
*,
config: Box,
data_frames: List[pd.DataFrame],
name: str,
metadata: Optional[Dict] = None,
) -> None:
"""Create or update the pandas.DataFrames as Pystore collection.items.
The DataFrames must contain time series data with the index of type datetime64.
The lowest index (min(index)) will be converted as YYYY-MM string and set
as the item name.
Each dataframe must only contain data of one day! This function doesn't check max(df.index).
Note, PyStore will make sure there is a unique index:
~/.../site-packages/pystore/collection.py in append(self, item, data, npartitions, epochdate, threaded, reload_items, **kwargs)
183 # combined = current.data.append(new)
184 combined = dd.concat([current.data, new]).drop_duplicates(keep="last")
PyStore:
https://medium.com/@aroussi/fast-data-store-for-pandas-time-series-data-using-pystore-89d9caeef4e2
"""
if metadata is None:
metadata = {}
if not data_frames:
log.debug(f"Did not receive any data for {name}.")
return
store = pystore.store("discovergy")
collection = store.collection(name)
item_names = collection.list_items()
for df in data_frames:
if not len(df):
log.debug(f"Did not find any data in {df}. Skipping...")
continue
first_ts = min(df.index)
item_name = f"{first_ts.year}-{first_ts.month:02d}"
if item_name in item_names:
# FIXME (a8): Create one partition per day. There must be a better way. Issue is that
# pandas loads the full pd.DataFrame into memory. That requires memory.
npartitions = first_ts.day
log.debug(f"Appended to {item_name} {first_ts}.")
collection.append(item_name, df, npartitions=npartitions)
else:
log.debug("Created new Dask DF.")
collection.write(item_name, df, metadata=metadata, overwrite=False)
| 32.578125 | 131 | 0.606115 |
__author__ = "Frank Becker <fb@alien8.de>"
__copyright__ = "Frank Becker"
__license__ = "mit"
import gzip
import json
import os
import re
import sys
from contextlib import ContextDecorator
from pathlib import Path
from timeit import default_timer
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Union
import pandas as pd
import pystore
from box import Box
from loguru import logger as log
from tenacity import _utils
class TimeStampedValue(NamedTuple):
timestamp: float
value: Any
class ValueUnit(NamedTuple):
value: Union[float, int]
unit: str
class measure_duration(ContextDecorator):
def __enter__(self):
self.start = default_timer()
return self
def __exit__(self, *exc):
self.duration = default_timer() - self.start
return False
def start_logging(config: Box) -> None:
log_dir = Path(config.file_location.log_dir).expanduser()
if not log_dir.is_dir():
sys.stderr.write(f"Could not find the log dir {log_dir}. Creating it ...\n")
os.makedirs(log_dir.as_posix())
log_config = {
"handlers": [
{
"sink": sys.stderr,
"format": "{time:YYYY-MM-DD HH:mm:ss} | <level>{level}</level> | {message}",
"colorize": True,
"level": "DEBUG",
"backtrace": True,
},
{
"sink": log_dir / "discovergy_{time}.log",
"rotation": "1 day",
"compression": "gz",
"format": "{time:YYYY-MM-DDTHH:mm:ss} | {level} | {message}",
"backtrace": True,
"serialize": False,
},
],
"extra": {"user": "someone"},
}
log.configure(**log_config)
def before_log(logger: Any, log_level: str) -> Callable:
def log_it(retry_state):
logger = getattr(log, log_level)
logger(
f"Starting call to '{_utils.get_callback_name(retry_state.fn)}', "
f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it."
)
return log_it
def split_df_by_month(*, df) -> List[pd.DataFrame]:
data_frames = []
intervals = sorted(set([(e.year, e.month) for e in df.index.unique()]))
if len(intervals) == 1:
data_frames.append(df)
return data_frames
date_range = pd.date_range(
"{}-{:02d}".format(intervals[0][0], intervals[0][1]),
periods=len(intervals),
freq="M",
tz="UTC",
)
prev_month = date_range[0]
data_frames.append(df[df.index <= date_range[0]])
for date in date_range[1:]:
df_per_month = df[(prev_month < df.index) & (df.index <= date)]
data_frames.append(df_per_month)
prev_month = date
return data_frames
def split_df_by_day(*, df) -> List[pd.DataFrame]:
data_frames = []
intervals = sorted(set([(e.year, e.month, e.day) for e in df.index.unique()]))
if len(intervals) == 1:
data_frames.append(df)
return data_frames
date_range = pd.date_range(
"{}-{:02d}-{:02d}".format(intervals[0][0], intervals[0][1], intervals[0][2]),
periods=len(intervals),
freq="D",
)
prev_day = date_range[0]
for date in date_range[1:]:
df_per_day = df[(prev_day < df.index) & (df.index <= date)]
data_frames.append(df_per_day)
prev_day = date
return data_frames
def str2bool(value: str) -> bool:
if value.lower() in ["true", "1", "t", "y", "yes", "yeah"]:
return True
return False
def verify_file_permissions(path: Path) -> bool:
if path.is_file:
file_stat = path.stat()
if file_stat.st_uid != os.getuid():
return False
if re.match(r"0o*100[0-6]00", oct(file_stat.st_mode)):
return True
try:
os.chmod(path, 0o600)
except OSError:
log.error(
f"Tried to change the permissions of {path} but failed. "
"Please fix the permissions to max. 0600 yourself!"
)
return False
else:
log.warning(
"The file {} didn't have secure file permissions {}. "
"The permissions were changed to -rw------- for you. ".format(
path, oct(file_stat.st_mode)
)
)
return True
return False
def write_data(*, data: List[Dict], file_path: Path) -> None:
dst_dir = file_path.parent
if not dst_dir.expanduser().is_dir():
log.warning(f"Creating the data destination directory {dst_dir}.")
os.makedirs(dst_dir.expanduser().as_posix())
with gzip.open(file_path.expanduser().as_posix(), "wb") as fh:
fh.write(json.dumps(data).encode("utf-8"))
def write_data_frames(
*, config: Box, data_frames: List[pd.DataFrame], name: str
) -> None:
if not data_frames:
log.debug(f"Did not receive any data for {name}.")
return
for df in data_frames:
if not len(df):
log.debug(f"Did not find any data in {df}. Skipping...")
continue
first_ts = min(df.index)
file_name = f"{name}_{first_ts.year}-{first_ts.month:02d}.hdf5"
file_path = Path(config.file_location.data_dir) / Path(file_name)
file_path = file_path.expanduser()
if file_path.is_file():
df_prev = pd.read_hdf(file_path, name)
df = df.combine_first(df_prev)
df.to_hdf(file_path, key=name)
def write_data_to_pystore(
*,
config: Box,
data_frames: List[pd.DataFrame],
name: str,
metadata: Optional[Dict] = None,
) -> None:
if metadata is None:
metadata = {}
if not data_frames:
log.debug(f"Did not receive any data for {name}.")
return
store = pystore.store("discovergy")
collection = store.collection(name)
item_names = collection.list_items()
for df in data_frames:
if not len(df):
log.debug(f"Did not find any data in {df}. Skipping...")
continue
first_ts = min(df.index)
item_name = f"{first_ts.year}-{first_ts.month:02d}"
if item_name in item_names:
# FIXME (a8): Create one partition per day. There must be a better way. Issue is that
# pandas loads the full pd.DataFrame into memory. That requires memory.
npartitions = first_ts.day
log.debug(f"Appended to {item_name} {first_ts}.")
collection.append(item_name, df, npartitions=npartitions)
else:
log.debug("Created new Dask DF.")
collection.write(item_name, df, metadata=metadata, overwrite=False)
| true | true |
f7fd14ca425257edfcecad066cdc471c2fb58a9d | 2,009 | py | Python | lib/surface/datastore/databases/create.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/datastore/databases/create.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/datastore/databases/create.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to create Google Cloud Firestore in Datastore Mode database."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.firestore import create_util
class Create(base.Command):
"""Create a Google Cloud Firestore in Datastore Mode database."""
product_name = 'Google Cloud Firestore in Datastore Mode'
enum_value = core_apis.GetMessagesModule(
'appengine', 'v1'
).Application.DatabaseTypeValueValuesEnum.CLOUD_DATASTORE_COMPATIBILITY
detailed_help = {
'DESCRIPTION':
"""\
{description}
""",
'EXAMPLES':
"""\
To create Google Cloud Firestore in Datastore Mode database
$ {command}
To create an app in the us-central region, run:
$ {command} --region=us-central
""",
}
def Run(self, args):
create_util.create(args, self.product_name, self.enum_value)
@staticmethod
def Args(parser):
parser.add_argument(
'--region',
help=(
'The region to create the {product_name} database within. '
'Use `gcloud app regions list` to list available regions.').format(
product_name=Create.product_name))
| 32.934426 | 79 | 0.696864 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.firestore import create_util
class Create(base.Command):
product_name = 'Google Cloud Firestore in Datastore Mode'
enum_value = core_apis.GetMessagesModule(
'appengine', 'v1'
).Application.DatabaseTypeValueValuesEnum.CLOUD_DATASTORE_COMPATIBILITY
detailed_help = {
'DESCRIPTION':
"""\
{description}
""",
'EXAMPLES':
"""\
To create Google Cloud Firestore in Datastore Mode database
$ {command}
To create an app in the us-central region, run:
$ {command} --region=us-central
""",
}
def Run(self, args):
create_util.create(args, self.product_name, self.enum_value)
@staticmethod
def Args(parser):
parser.add_argument(
'--region',
help=(
'The region to create the {product_name} database within. '
'Use `gcloud app regions list` to list available regions.').format(
product_name=Create.product_name))
| true | true |
f7fd1553194bcf60136927d7be8666c5c7533973 | 46,142 | py | Python | obs-tn/generate_obs-tn_pdf.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 6 | 2015-07-27T21:50:39.000Z | 2020-06-25T14:32:35.000Z | obs-tn/generate_obs-tn_pdf.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 89 | 2015-06-24T09:35:40.000Z | 2022-02-13T14:40:31.000Z | obs-tn/generate_obs-tn_pdf.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 12 | 2015-07-13T17:31:04.000Z | 2021-08-06T06:50:21.000Z | #!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2019 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Richard Mahn <rich.mahn@unfoldingword.org>
"""
This script generates the HTML and PDF OBS tN document
"""
from __future__ import unicode_literals, print_function
import os
import sys
import re
import logging
import argparse
import tempfile
import markdown2
import shutil
import subprocess
import json
import git
from glob import glob
from bs4 import BeautifulSoup
from ..general_tools.file_utils import write_file, read_file, load_json_object, unzip, load_yaml_object
_print = print
DEFAULT_LANG = 'en'
DEFAULT_OWNER = 'unfoldingWord'
DEFAULT_TAG = 'master'
OWNERS = [DEFAULT_OWNER, 'STR', 'Door43-Catalog']
LANGUAGE_FILES = {
'fr': 'French-fr_FR.json',
'en': 'English-en_US.json'
}
def print(obj):
_print(json.dumps(obj, ensure_ascii=False, indent=2).encode('utf-8'))
class ObsTnConverter(object):
def __init__(self, obs_tn_tag=None, obs_tag=None, tw_tag=None, ta_tag=None, working_dir=None, output_dir=None,
lang_code=DEFAULT_LANG, owner=DEFAULT_OWNER, regenerate=False, logger=None):
self.obs_tn_tag = obs_tn_tag
self.obs_tag = obs_tag
self.tw_tag = tw_tag
self.ta_tag = ta_tag
self.working_dir = working_dir
self.output_dir = output_dir
self.lang_code = lang_code
self.owner = owner
self.regenerate = regenerate
self.logger = logger
if not self.working_dir:
self.working_dir = tempfile.mkdtemp(prefix='obs-tn-')
if not self.output_dir:
self.output_dir = self.working_dir
self.logger.info('WORKING DIR IS {0} FOR {1}'.format(self.working_dir, self.lang_code))
self.obs_tn_dir = os.path.join(self.working_dir, '{0}_obs-tn'.format(lang_code))
self.obs_dir = os.path.join(self.working_dir, '{0}_obs'.format(lang_code))
self.tw_dir = os.path.join(self.working_dir, '{0}_tw'.format(lang_code))
self.ta_dir = os.path.join(self.working_dir, '{0}_ta'.format(lang_code))
self.html_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(self.html_dir):
os.makedirs(self.html_dir)
self.manifest = None
self.tw_manifest = None
self.ta_manifest = None
self.obs_tn_text = ''
self.tw_text = ''
self.ta_text = ''
self.tw_cat = {}
self.bad_links = {}
self.bad_notes = {}
self.resource_data = {}
self.rc_references = {}
self.version = None
self.publisher = None
self.contributors = None
self.issued = None
self.file_id = None
self.my_path = os.path.dirname(os.path.realpath(__file__))
self.generation_info = {}
self.title = 'unfoldingWord® Open Bible Stories Translation Notes'
self.tw_title = 'Translation Words'
self.ta_title = 'Translation Academy'
self.translations = {}
def translate(self, key):
if not self.translations:
if self.lang_code not in LANGUAGE_FILES:
self.logger.error('No locale file for {0}.'.format(self.lang_code))
exit(1)
locale_file = os.path.join(self.my_path, '..', 'locale', LANGUAGE_FILES[self.lang_code])
if not os.path.isfile(locale_file):
self.logger.error('No locale file found at {0} for {1}.'.format(locale_file, self.lang_code))
exit(1)
self.translations = load_json_object(locale_file)
keys = key.split('.')
t = self.translations
for key in keys:
t = t.get(key, None)
if t is None:
# handle the case where the self.translations doesn't have that (sub)key
print("No translation for `{0}`".format(key))
break
return t
def run(self):
# self.load_resource_data()
self.setup_resource_files()
self.file_id = '{0}_obs-tn_{1}_{2}'.format(self.lang_code, self.obs_tn_tag, self.generation_info['obs-tn']['commit'])
self.determine_if_regeneration_needed()
self.manifest = load_yaml_object(os.path.join(self.obs_tn_dir, 'manifest.yaml'))
self.tw_manifest = load_yaml_object(os.path.join(self.tw_dir, 'manifest.yaml'))
self.ta_manifest = load_yaml_object(os.path.join(self.ta_dir, 'manifest.yaml'))
self.version = self.manifest['dublin_core']['version']
self.title = self.manifest['dublin_core']['title']
if 'subject' in self.tw_manifest['dublin_core']:
self.tw_title = self.tw_manifest['dublin_core']['subject']
if 'subject' in self.ta_manifest['dublin_core']:
self.ta_title = self.ta_manifest['dublin_core']['subject']
self.contributors = '<br/>'.join(self.manifest['dublin_core']['contributor'])
self.publisher = self.manifest['dublin_core']['publisher']
self.issued = self.manifest['dublin_core']['issued']
self.file_id = self.file_id
self.load_tw_cat()
self.logger.info('Creating OBS tN HTML files for {0}...'.format(self.file_id))
if self.regenerate or not os.path.exists(os.path.join(self.output_dir, '{0}.html'.format(self.file_id))):
self.generate_obs_tn_content()
self.logger.info('Generating Body HTML for {0}...'.format(self.file_id))
self.generate_body_html()
self.logger.info('Generating Cover HTML for {0}...'.format(self.file_id))
self.generate_cover_html()
self.logger.info('Generating License HTML for {0}...'.format(self.file_id))
self.generate_license_html()
self.logger.info('Copying style sheet file for {0}...'.format(self.file_id))
style_file = os.path.join(self.my_path, 'obs-tn_style.css')
shutil.copy2(style_file, self.html_dir)
self.save_resource_data()
self.save_bad_links()
self.save_bad_notes()
self.logger.info('Generating PDF {0}/{1}.pdf...'.format(self.output_dir, self.file_id))
self.generate_obs_tn_pdf()
self.logger.info('PDF file can be found at {0}/{1}.pdf'.format(self.output_dir, self.file_id))
def save_bad_links(self):
bad_links = "BAD LINKS:\n"
for source_rc in sorted(self.bad_links.keys()):
for rc in sorted(self.bad_links[source_rc].keys()):
source = source_rc[5:].split('/')
parts = rc[5:].split('/')
if source[1] == 'obs-tn':
if parts[1] == 'tw':
str = ' tW'
else:
str = ' tN'
str += ' {0} {1}:{2}'.format(source[3].upper(), source[4], source[5])
else:
str = ' {0}'.format(source_rc)
str += ': BAD RC - `{0}`'.format(rc)
if self.bad_links[source_rc][rc]:
str += ' - change to `{0}`'.format(self.bad_links[source_rc][rc])
bad_links += "{0}\n".format(str)
save_file = os.path.join(self.output_dir, '{0}_bad_links.txt'.format(self.file_id))
write_file(save_file, bad_links)
self.logger.info('BAD LINKS file can be found at {0}'.format(save_file))
def save_bad_notes(self):
bad_notes = '<!DOCTYPE html><html lang="en-US"><head data-suburl=""><title>NON-MATCHING NOTES</title><meta charset="utf-8"></head><body><p>NON-MATCHING NOTES (i.e. not found in the frame text as written):</p><ul>'
for cf in sorted(self.bad_notes.keys()):
bad_notes += '<li><a href="{0}_html/{0}.html#obs-tn-{1}" title="See in the OBS tN Docs (HTML)" target="obs-tn-html">{1}</a><a href="https://git.door43.org/{6}/{2}_obs-tn/src/branch/{7}/content/{3}/{4}.md" style="text-decoration:none" target="obs-tn-git"><img src="http://www.myiconfinder.com/uploads/iconsets/16-16-65222a067a7152473c9cc51c05b85695-note.png" title="See OBS UTN note on DCS"></a><a href="https://git.door43.org/{6}/{2}_obs/src/branch/master/content/{3}.md" style="text-decoration:none" target="obs-git"><img src="https://cdn3.iconfinder.com/data/icons/linecons-free-vector-icons-pack/32/photo-16.png" title="See OBS story on DCS"></a>:<br/><i>{5}</i><br/><ul>'.format(
self.file_id, cf, self.lang_code, cf.split('-')[0], cf.split('-')[1], self.bad_notes[cf]['text'], self.owner, DEFAULT_TAG)
for note in self.bad_notes[cf]['notes']:
for key in note.keys():
if note[key]:
bad_notes += '<li><b><i>{0}</i></b><br/>{1} (QUOTE ISSUE)</li>'.format(key, note[key])
else:
bad_notes += '<li><b><i>{0}</i></b></li>'.format(key)
bad_notes += '</ul></li>'
bad_notes += "</u></body></html>"
save_file = os.path.join(self.output_dir, '{0}_bad_notes.html'.format(self.file_id))
write_file(save_file, bad_notes)
self.logger.info('BAD NOTES file can be found at {0}'.format(save_file))
@staticmethod
def get_resource_git_url(resource, lang, owner):
return 'https://git.door43.org/{0}/{1}_{2}.git'.format(owner, lang, resource)
def clone_resource(self, resource, tag=DEFAULT_TAG, url=None):
if not url:
url = self.get_resource_git_url(resource, self.lang_code, self.owner)
repo_dir = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource))
if not os.path.isdir(repo_dir):
try:
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
owners = OWNERS
owners.insert(0, self.owner)
languages = [self.lang_code, DEFAULT_LANG]
if not os.path.isdir(repo_dir):
for lang in languages:
for owner in owners:
url = self.get_resource_git_url(resource, lang, owner)
try:
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
continue
break
if os.path.isdir(repo_dir):
break
g = git.Git(repo_dir)
g.fetch()
g.checkout(tag)
if tag == DEFAULT_TAG:
g.pull()
commit = g.rev_parse('HEAD', short=10)
self.generation_info[resource] = {'tag': tag, 'commit': commit}
def setup_resource_files(self):
self.clone_resource('obs-tn', self.obs_tn_tag)
self.clone_resource('obs', self.obs_tag)
self.clone_resource('tw', self.tw_tag)
self.clone_resource('ta', self.ta_tag)
if not os.path.isfile(os.path.join(self.html_dir, 'logo-obs-tn.png')):
command = 'curl -o {0}/logo-obs-tn.png https://cdn.door43.org/assets/uw-icons/logo-obs-256.png'.format(
self.html_dir)
subprocess.call(command, shell=True)
def load_tw_cat(self):
mapping = {
'idol': 'falsegod',
'witness': 'testimony',
'newcovenant': 'covenant',
'taxcollector': 'tax',
'believer': 'believe'
}
tw_cat_file = os.path.join(self.working_dir, 'tw_cat.json')
if not os.path.isfile(tw_cat_file):
command = 'curl -o {0} https://cdn.door43.org/v2/ts/obs/en/tw_cat.json'.format(
tw_cat_file)
subprocess.call(command, shell=True)
tw_cat = load_json_object(tw_cat_file)
for chapter in tw_cat['chapters']:
self.tw_cat[chapter['id']] = {}
for frame in chapter['frames']:
self.tw_cat[chapter['id']][frame['id']] = []
for item in frame['items']:
term = item['id']
category = None
for c in ['kt', 'names', 'other']:
if os.path.exists(os.path.join(self.tw_dir, 'bible', c, '{0}.md'.format(term))):
category = c
break
if not category and term in mapping:
category = None
for c in ['kt', 'names', 'other']:
if os.path.exists(os.path.join(self.tw_dir, 'bible', c, '{0}.md'.format(mapping[term]))):
category = c
term = mapping[term]
break
if category:
self.tw_cat[chapter['id']][frame['id']].append('rc://{0}/tw/dict/bible/{1}/{2}'.format(
self.lang_code, category, term))
if not category or term != item['id']:
fix = None
if term != item['id']:
fix = term
source_rc = 'tw_cat.json {0}/{1}'.format(chapter['id'], frame['id'])
if source_rc not in self.bad_links:
self.bad_links[source_rc] = {}
self.bad_links[source_rc][item['id']] = fix
def determine_if_regeneration_needed(self):
# check if any commit hashes have changed
old_info = self.get_previous_generation_info()
if not old_info:
self.logger.info('Looks like this is a new commit of {0}. Generating PDF.'.format(self.file_id))
self.regenerate = True
else:
for resource in self.generation_info:
if resource in old_info and resource in self.generation_info \
and (old_info[resource]['tag'] != self.generation_info[resource]['tag']
or old_info[resource]['commit'] != self.generation_info[resource]['commit']):
self.logger.info('Resource {0} has changed: {1} => {2}, {3} => {4}. REGENERATING PDF.'.format(
resource, old_info[resource]['tag'], self.generation_info[resource]['tag'],
old_info[resource]['commit'], self.generation_info[resource]['commit']
))
self.regenerate = True
def get_contributors_html(self):
if self.contributors and len(self.contributors):
return '''
<div id="contributors" class="article">
<h1 class="section-header">{0}</h1>
<p>
{1}
</p>
</div>
'''.format(self.translate('contributors'), self.contributors)
else:
return ''
def save_resource_data(self):
save_dir = os.path.join(self.output_dir, 'save')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}_resource_data.json'.format(self.file_id))
write_file(save_file, self.resource_data)
save_file = os.path.join(save_dir, '{0}_references.json'.format(self.file_id))
write_file(save_file, self.rc_references)
save_file = os.path.join(save_dir, '{0}_bad_links.json'.format(self.file_id))
write_file(save_file, self.bad_links)
save_file = os.path.join(save_dir, '{0}_bad_notes.json'.format(self.file_id))
write_file(save_file, self.bad_notes)
save_file = os.path.join(save_dir, '{0}_generation_info.json'.format(self.file_id))
write_file(save_file, self.generation_info)
def get_previous_generation_info(self):
save_dir = os.path.join(self.output_dir, 'save')
save_file = os.path.join(save_dir, '{0}_generation_info.json'.format(self.file_id))
if os.path.isfile(save_file):
return load_json_object(save_file)
else:
return {}
def load_resource_data(self):
save_dir = os.path.join(self.output_dir, 'save')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}_resource_data.json'.format(self.file_id))
if os.path.isfile(save_file):
self.resource_data = load_json_object(save_file)
save_file = os.path.join(save_dir, '{0}_references.json'.format(self.file_id))
if os.path.isfile(save_file):
self.rc_references = load_json_object(save_file)
save_file = os.path.join(save_dir, '{0}_bad_links.json'.format(self.file_id))
if os.path.isfile(save_file):
self.bad_links = load_json_object(save_file)
def generate_body_html(self):
obs_tn_html = self.obs_tn_text
ta_html = self.get_ta_html()
tw_html = self.get_tw_html()
contributors_html = self.get_contributors_html()
html = '\n'.join([obs_tn_html, tw_html, ta_html, contributors_html])
html = self.replace_rc_links(html)
html = self.fix_links(html)
html = '''<!DOCTYPE html>
<html lang="en-US">
<head data-suburl="">
<meta charset="utf-8"/>
<title>{0} - v{1}</title>
</head>
<body>
{2}
</body>
</html>
'''.format(self.title, self.version, html)
soup = BeautifulSoup(html, 'html.parser')
# Make all headers that have a header right before them non-break
for h in soup.find_all(['h2', 'h3', 'h4', 'h5', 'h6']):
prev = h.find_previous_sibling()
if prev and re.match('^h[2-6]$', prev.name):
h['class'] = h.get('class', []) + ['no-break']
# Make all headers within the page content to just be span tags with h# classes
for h in soup.find_all(['h3', 'h4', 'h5', 'h6']):
if not h.get('class') or 'section-header' not in h['class']:
h['class'] = h.get('class', []) + [h.name]
h.name = 'span'
soup.head.append(soup.new_tag('link', href="html/obs-tn_style.css", rel="stylesheet"))
html_file = os.path.join(self.output_dir, '{0}.html'.format(self.file_id))
write_file(html_file, unicode(soup))
self.logger.info('Wrote HTML to {0}'.format(html_file))
def generate_cover_html(self):
cover_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<link href="obs-tn_style.css" rel="stylesheet"/>
</head>
<body>
<div style="text-align:center;padding-top:200px" class="break" id="cover">
<img src="logo-obs-tn.png" width="120">
<span class="h1">{0}</span>
<span class="h3">{1} {2}</span>
</div>
</body>
</html>
'''.format(self.title, self.translate('license.version'), self.version)
html_file = os.path.join(self.html_dir, '{0}_cover.html'.format(self.file_id))
write_file(html_file, cover_html)
def generate_license_html(self):
license_file = os.path.join(self.obs_tn_dir, 'LICENSE.md')
license = markdown2.markdown_path(license_file)
license_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<link href="obs-tn_style.css" rel="stylesheet"/>
</head>
<body>
<div class="break">
<span class="h1">{4}</span>
<p>
<strong>{5}:</strong> {0}<br/>
<strong>{6}:</strong> {1}<br/>
<strong>{7}:</strong> {2}<br/>
</p>
{3}
</div>
</body>
</html>
'''.format(self.issued, self.version, self.publisher, license,
self.translate('license.copyrights_and_licensing'),
self.translate('license.date'),
self.translate('license.version'),
self.translate('license.published_by'))
html_file = os.path.join(self.html_dir, '{0}_license.html'.format(self.file_id))
write_file(html_file, license_html)
def generate_obs_tn_pdf(self):
cover_file = os.path.join(self.html_dir, '{0}_cover.html'.format(self.file_id))
license_file = os.path.join(self.html_dir, '{0}_license.html'.format(self.file_id))
header_file = os.path.join(self.my_path, 'obs-tn_header.html')
footer_file = os.path.join(self.my_path, 'obs-tn_footer.html')
body_file = os.path.join(self.output_dir, '{0}.html'.format(self.file_id))
output_file = os.path.join(self.output_dir, '{0}.pdf'.format(self.file_id))
template_file = os.path.join(self.my_path, '{0}_toc_template.xsl'.format(self.lang_code))
command = '''wkhtmltopdf
--javascript-delay 2000
--debug-javascript
--cache-dir "{6}"
--run-script "setInterval(function(){{if(document.readyState=='complete') setTimeout(function() {{window.status='done';}}, 100);}},200)"
--encoding utf-8
--outline-depth 3
-O portrait
-L 15 -R 15 -T 15 -B 15
--header-html "{0}"
--header-spacing 2
--footer-html "{7}"
cover "{1}"
cover "{2}"
toc
--disable-dotted-lines
--enable-external-links
--xsl-style-sheet "{3}"
--toc-header-text "{8}"
"{4}"
"{5}"
'''.format(header_file, cover_file, license_file, template_file, body_file, output_file,
os.path.join(self.working_dir, 'wkhtmltopdf'), footer_file,
self.translate('table_of_contents'))
command = re.sub(r'\s+', ' ', command, flags=re.MULTILINE)
self.logger.info(command)
subprocess.call(command, shell=True)
@staticmethod
def highlight_text(text, note):
parts = re.split(r"\s*…\s*|\s*\.\.\.\s*", note)
processed_text = ''
to_process_text = text
for idx, part in enumerate(parts):
split_pattern = re.escape(part)
if '<span' in text:
split_pattern = '({0})'.format(re.sub('(\\\\ )+', '(\s+|(\s*</*span[^>]*>\s*)+)', split_pattern))
else:
split_pattern = '({0})'.format(split_pattern)
splits = re.split(split_pattern, to_process_text, 1)
processed_text += splits[0]
if len(splits) > 1:
processed_text += '<span class="highlight{0}">{1}</span>'.format(' split' if len(parts) > 1 else '',
splits[1])
if len(splits) > 2:
to_process_text = splits[-1]
if to_process_text:
processed_text += to_process_text
return processed_text
def highlight_text_with_frame(self, orig_text, frame_html, cf):
ignore = ['A Bible story from', 'Connecting Statement', 'Connecting Statement:',
'General Information', 'General Note', 'Information générale',
'Termes Importants', 'Une histoire biblique tirée de', 'Une histoire de la Bible tirée de',
'Une histoire de la Bible à partir', 'Une histoire de la Bible à partir de',
'Mots de Traduction', 'Nota geral', 'Déclaration de connexion', 'Cette histoire biblique est tirée',
'Une histoire biblique tirée de:', 'Informations générales', 'Information Générale']
highlighted_text = orig_text
phrases = []
soup = BeautifulSoup(frame_html, 'html.parser')
headers = soup.find_all('h4')
for header in headers:
phrases.append(header.text)
phrases.sort(key=len, reverse=True)
for phrase in phrases:
new_highlighted_text = self.highlight_text(highlighted_text, phrase)
if new_highlighted_text != highlighted_text:
highlighted_text = new_highlighted_text
elif phrase not in ignore:
if cf not in self.bad_notes:
self.bad_notes[cf] = {
'text': orig_text,
'notes': []
}
bad_note = {phrase: None}
alt_notes = [
phrase.replace('‘', "'").replace('’', "'").replace('“', '"').replace('”', '"'),
phrase.replace("'", '’').replace('’', '‘', 1).replace('"', '”').replace('”', '“', 1),
phrase.replace('‘', "'").replace('’', "'").replace('“', '"').replace('”', '"'),
phrase.replace("'", '’').replace('’', '‘', 1).replace('"', '”').replace('”', '“', 1),
phrase.replace('“', '"').replace('”', '"'),
phrase.replace('"', '”').replace('”', '“', 1),
phrase.replace("'", '’').replace('’', '‘', 1),
phrase.replace("'", '’'),
phrase.replace('’', "'"),
phrase.replace('‘', "'")]
for alt_note in alt_notes:
if orig_text != self.highlight_text(orig_text, alt_note):
bad_note[phrase] = alt_note
break
self.bad_notes[cf]['notes'].append(bad_note)
return highlighted_text
def generate_obs_tn_content(self):
content = '''
<div id="obs-tn" class="resource-title-page">
<h1 class="section-header">{0}</h1>
</div>
'''.format(self.title.replace('unfoldingWord® ', ''))
chapter_dirs = sorted(glob(os.path.join(self.obs_tn_dir, 'content', '*')))
for chapter_dir in chapter_dirs:
if os.path.isdir(chapter_dir):
chapter = os.path.basename(chapter_dir)
soup = BeautifulSoup(
markdown2.markdown_path(os.path.join(self.obs_dir, 'content', '{0}.md'.format(chapter))),
'html.parser')
title = soup.h1.text
paragraphs = soup.find_all('p')
frames = []
for idx, p in enumerate(paragraphs): # iterate over loop [above sections]
if idx % 2:
frames.append(p.text)
content += '<div id="chapter-{0}" class="chapter break">\n\n'.format(chapter)
content += '<h2>{0}</h2>\n'.format(title)
frame_files = sorted(glob(os.path.join(chapter_dir, '*.md')))
for frame_file in frame_files:
frame = os.path.splitext(os.path.basename(frame_file))[0]
frame_idx = int(frame)
id = 'obs-tn-{0}-{1}'.format(chapter, frame)
content += '<div id="{0}" class="frame">\n'.format(id)
content += '<h3>{0}:{1}</h3>\n'.format(chapter, frame)
text = ''
if frame_idx > 0:
text = re.sub(r'[\n\s]+', ' ', frames[frame_idx - 1], flags=re.MULTILINE)
frame_html = markdown2.markdown_path(frame_file)
frame_html = frame_html.replace('h1>', 'h4>')
frame_html = frame_html.replace('h2>', 'h5>')
frame_html = frame_html.replace('h3>', 'h6>')
frame_html = re.sub(r'href="(\d+)/(\d+)"', r'href="#obs-tn-\1-\2"', frame_html)
if text:
text = self.highlight_text_with_frame(text, frame_html, '{0}:{1}'.format(chapter, frame))
if '/tw/' not in frame_html and chapter in self.tw_cat and frame in self.tw_cat[chapter]\
and len(self.tw_cat[chapter][frame]):
frame_html += "<h3>{0}</h3>\n<ul>".format(self.tw_title)
for rc in self.tw_cat[chapter][frame]:
frame_html += '<li>[[{0}]]</li>'.format(rc)
frame_html += '</ul>'
content += '<div id="{0}-text" class="frame-text">\n{1}\n</div>\n'.format(id, text)
content += frame_html
content += '</div>\n\n'
# HANDLE RC LINKS
rc = 'rc://{0}/obs-tn/help/{1}/{2}'.format(self.lang_code, chapter, frame)
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#' + id,
'title': title
}
self.get_resource_data_from_rc_links(frame_html, rc)
content += '</div>\n\n'
self.obs_tn_text = content
write_file(os.path.join(self.html_dir, '{0}_obs-tn_content.html'.format(self.file_id)),
BeautifulSoup(content, 'html.parser').prettify())
def get_tw_html(self):
tw_html = ''
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/tw/' not in rc:
continue
reference_text = self.get_reference_text(rc)
if not reference_text:
continue
html = self.resource_data[rc]['text']
html = self.increase_headers(html)
title = self.resource_data[rc]['title']
alt_title = self.resource_data[rc]['alt_title']
if alt_title:
html = '<h2 class="hidden">{0}11</h2><span class="h2 section-header">{1}</span>\n{2}'.\
format(alt_title, title, html)
else:
html = '<h2 class="section-header">{0}</h2>\n{1}'.format(title, html)
tw_html += '<div id="{0}" class="article">\n{1}\n{2}</div>\n\n'.format(self.resource_data[rc]['id'], html,
reference_text)
if tw_html:
tw_html = '<div id="tw" class="resource-title-page">\n<h1 class="section-header">{0}</h1>\n</div>\n\n{1}'.\
format(self.tw_title, tw_html)
return tw_html
def get_ta_html(self):
ta_html = ''
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/ta/' not in rc:
continue
reference_text = self.get_reference_text(rc)
if not reference_text:
continue
if self.resource_data[rc]['text']:
ta_html += '''
<div id="{0}" class="article">
<h2 class="section-header">{1}</h2>
<div class="top-box box">
<div class="ta-question">
{2}: <em>{3}</em>
</div>
</div>
{4}
{5}
</div>
'''.format(self.resource_data[rc]['id'], self.resource_data[rc]['title'],
self.translate('this_page_answers_the_question'),
self.resource_data[rc]['alt_title'],
self.increase_headers(self.resource_data[rc]['text']), self.get_reference_text(rc))
if ta_html:
ta_html = '<div id="ta" class="resource-title-page">\n<h1 class="section-header">{0}</h1>\n</div>\n\n{1}'.\
format(self.ta_title, ta_html)
return ta_html
def has_tn_references(self, rc):
if rc not in self.rc_references:
return False
for reference in self.rc_references[rc]:
if '/obs-tn/' in reference:
return True
return False
def get_reference_text(self, rc):
if not self.has_tn_references(rc):
return ''
uses = ''
references = []
done = {}
for reference in self.rc_references[rc]:
if '/obs-tn/' in reference and reference not in done:
parts = reference[5:].split('/')
id = 'obs-tn-{0}-{1}'.format(parts[3], parts[4])
text = '{0}:{1}'.format(parts[3], parts[4])
references.append('<a href="#{0}">{1}</a>'.format(id, text))
done[reference] = True
if len(references):
uses = '<p class="go-back">\n(<b>{0}:</b> {1})\n</p>\n'.format(self.translate('go_back_to'),
'; '.join(references))
return uses
def get_resource_data_from_rc_links(self, text, source_rc):
if source_rc not in self.bad_links:
self.bad_links[source_rc] = {}
rcs = re.findall(r'rc://[A-Z0-9/_\*-]+', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
parts = rc[5:].split('/')
resource = parts[1]
path = '/'.join(parts[3:])
if resource not in ['ta', 'tw']:
continue
if rc not in self.rc_references:
self.rc_references[rc] = []
if source_rc not in self.rc_references[rc]:
self.rc_references[rc].append(source_rc)
title = ''
t = ''
anchor_id = '{0}-{1}'.format(resource, path.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path))
if not os.path.isfile(file_path):
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path))
fix = None
if not os.path.isfile(file_path):
if resource == 'tw':
for category in ['kt', 'other', 'names']:
path2 = re.sub(r'^bible/([^/]+)/', r'bible/{0}/'.format(category), path.lower())
fix = 'rc://{0}/tw/dict/{1}'.format(self.lang_code, path2)
anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path2))
if os.path.isfile(file_path):
break
elif resource == 'ta':
bad_names = {
'figs-abstractnoun': 'translate/figs-abstractnouns'
}
if parts[3] in bad_names:
path2 = bad_names[parts[3]]
else:
path2 = path
fix = 'rc://{0}/ta/man/{1}'.format(self.lang_code, path2)
anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path2))
if os.path.isfile(file_path):
if fix:
self.bad_links[source_rc][rc] = fix
if not rc in self.resource_data:
t = markdown2.markdown_path(file_path)
alt_title = ''
if resource == 'ta':
title_file = os.path.join(os.path.dirname(file_path), 'title.md')
question_file = os.path.join(os.path.dirname(file_path), 'sub-title.md')
if os.path.isfile(title_file):
title = read_file(title_file)
else:
title = self.get_first_header(t)
t = re.sub(r'\s*\n*\s*<h\d>[^<]+</h\d>\s*\n*', r'', t, 1,
flags=re.IGNORECASE | re.MULTILINE) # removes the header
if os.path.isfile(question_file):
alt_title = read_file(question_file)
t = self.fix_ta_links(t, path.split('/')[0])
elif resource == 'tw':
title = self.get_first_header(t)
t = re.sub(r'\s*\n*\s*<h\d>[^<]+</h\d>\s*\n*', r'', t, 1,
flags=re.IGNORECASE | re.MULTILINE) # removes the header
if len(title) > 70:
alt_title = ','.join(title[:70].split(',')[:-1]) + ', ...'
t = re.sub(r'\n*\s*\(See [^\n]*\)\s*\n*', '\n\n', t,
flags=re.IGNORECASE | re.MULTILINE) # removes the See also line
t = self.fix_tw_links(t, path.split('/')[1])
self.resource_data[rc] = {
'rc': rc,
'link': link,
'id': anchor_id,
'title': title,
'alt_title': alt_title,
'text': t,
'references': [source_rc]
}
self.get_resource_data_from_rc_links(t, rc)
else:
if source_rc not in self.resource_data[rc]['references']:
self.resource_data[rc]['references'].append(source_rc)
else:
if rc not in self.bad_links[source_rc]:
self.bad_links[source_rc][rc] = None
rcs = re.findall(r'(?<=\()\.+/[^\)]+(?=\))', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
fix = re.sub(r'(\.\./)+(kt|names|other)/([^)]+?)(\.md)*', r'rc://{0}/tw/dict/bible/\2/\3'.
format(self.lang_code), rc, flags=re.IGNORECASE)
if fix != rc:
self.bad_links[source_rc][rc] = fix
else:
self.bad_links[source_rc][rc] = None
rcs = re.findall(r'(?<=\()\.[^ \)]+(?=\))', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
fix = None
if '/kt/' in rc or '/names/' in rc or '/other/' in rc:
new_rc = re.sub(r'(\.\./)+(kt|names|other)/([^)]+?)(\.md)*', r'rc://{0}/tw/dict/bible/\2/\3'.
format(self.lang_code), rc, flags=re.IGNORECASE)
if new_rc != rc:
fix = new_rc
self.bad_links[source_rc][rc] = fix
@staticmethod
def increase_headers(text, increase_depth=1):
if text:
for num in range(5, 0, -1):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num + increase_depth),
text, flags=re.MULTILINE)
return text
@staticmethod
def decrease_headers(text, minimum_header=1, decrease=1):
if text:
for num in range(minimum_header, minimum_header + 10):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num),
r'<h{0}>\1</h{0}>'.format(num - decrease if (num - decrease) <= 5 else 5), text,
flags=re.MULTILINE)
return text
@staticmethod
def get_first_header(text):
lines = text.split('\n')
if len(lines):
for line in lines:
if re.match(r'<h1>', line):
return re.sub(r'<h1>(.*?)</h1>', r'\1', line)
return lines[0]
return "NO TITLE"
def fix_tw_links(self, text, group):
text = re.sub(r'href="\.\./([^/)]+?)(\.md)*"', r'href="rc://{0}/tw/dict/bible/{1}/\1"'.
format(self.lang_code, group), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^)]+?)(\.md)*"', r'href="rc://{0}/tw/dict/bible/\1"'.format(self.lang_code),
text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'(\(|\[\[)(\.\./)*(kt|names|other)/([^)]+?)(\.md)*(\)|\]\])(?!\[)',
r'[[rc://{0}/tw/dict/bible/\3/\4]]'.format(self.lang_code), text,
flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_ta_links(self, text, manual):
text = re.sub(r'href="\.\./([^/"]+)/01\.md"', r'href="rc://{0}/ta/man/{1}/\1"'.format(self.lang_code, manual),
text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./\.\./([^/"]+)/([^/"]+)/01\.md"', r'href="rc://{0}/ta/man/\1/\2"'.
format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="([^# :/"]+)"', r'href="rc://{0}/ta/man/{1}/\1"'.format(self.lang_code, manual), text,
flags=re.IGNORECASE | re.MULTILINE)
return text
def replace(self, m):
before = m.group(1)
rc = m.group(2)
after = m.group(3)
if rc not in self.resource_data:
return m.group()
info = self.resource_data[rc]
if (before == '[[' and after == ']]') or (before == '(' and after == ')') or before == ' ' \
or (before == '>' and after == '<'):
return '<a href="{0}">{1}</a>'.format(info['link'], info['title'])
if (before == '"' and after == '"') or (before == "'" and after == "'"):
return info['link']
self.logger.error("FOUND SOME MALFORMED RC LINKS: {0}".format(m.group()))
return m.group()
def replace_rc_links(self, text):
# Change rc://... rc links,
# Case 1: [[rc://en/tw/help/bible/kt/word]] => <a href="#tw-kt-word">God's Word</a>
# Case 2: rc://en/tw/help/bible/ht/word => <a href="#tw-kt-word">God's Word</a>
# Case 3: <a href="rc://en/tw/help/bible/kt/word">text</a> => <a href="#tw-kt-word>Text</a> (used in links that are already formed)
# Case 5: Link from a TA or TW article that was not referenced in a TN. Remove the link
# Case 4: Remove other links to other resources not in this tN
def replace_rc(match):
left = match.group(1)
rc = match.group(2)
right = match.group(3)
title = match.group(4)
if rc in self.resource_data:
info = self.resource_data[rc]
if not self.has_tn_references(rc):
# Case 4
return info['title']
if (left and right and left == '[[' and right == ']]') \
or (not left and not right):
# Case 1 and Case 2
return '<a href="{0}">{1}</a>'.format(info['link'], info['title'])
else:
# Case 3
return (left if left else '') + info['link'] + (right if right else '')
else:
# Case 5
return title if title else rc
regex = re.compile(r'(\[\[|<a[^>]+href=")*(rc://[/A-Za-z0-9\*_-]+)(\]\]|"[^>]*>(.*?)</a>)*')
text = regex.sub(replace_rc, text)
return text
@staticmethod
def fix_links(text):
# Change [[http.*]] to <a href="http\1">http\1</a>
text = re.sub(r'\[\[http([^\]]+)\]\]', r'<a href="http\1">http\1</a>', text, flags=re.IGNORECASE)
# convert URLs to links if not already
text = re.sub(r'([^">])((http|https|ftp)://[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])',
r'\1<a href="\2">\2</a>', text, flags=re.IGNORECASE)
# URLS wth just www at the start, no http
text = re.sub(r'([^\/])(www\.[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="http://\2">\2</a>',
text, flags=re.IGNORECASE)
# Removes leading 0s from verse references
text = re.sub(r' 0*(\d+):0*(\d+)(-*)0*(\d*)', r' \1:\2\3\4', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def main(obs_tn_tag, obs_tag, tw_tag, ta_tag, lang_codes, working_dir, output_dir, owner, regenerate):
if not obs_tag:
obs_tag = args.obs_sn
if not lang_codes:
lang_codes = [DEFAULT_LANG]
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if not working_dir and 'WORKING_DIR' in os.environ:
working_dir = os.environ['WORKING_DIR']
print('Using env var WORKING_DIR: {0}'.format(working_dir))
if not output_dir and 'OUTPUT_DIR' in os.environ:
output_dir = os.environ['OUTPUT_DIR']
print('Using env var OUTPUT_DIR: {0}'.format(output_dir))
for lang_code in lang_codes:
_print('Starting OBS TN Converter for {0}...'.format(lang_code))
obs_tn_converter = ObsTnConverter(obs_tn_tag, obs_tag, tw_tag, ta_tag, working_dir, output_dir, lang_code,
owner, regenerate, logger)
obs_tn_converter.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='lang_codes', required=False, help='Language Code(s)', action='append')
parser.add_argument('-w', '--working', dest='working_dir', default=False, required=False, help='Working Directory')
parser.add_argument('-o', '--output', dest='output_dir', default=False, required=False, help='Output Directory')
parser.add_argument('--owner', dest='owner', default=DEFAULT_OWNER, required=False, help='Owner')
parser.add_argument('--obs-tn-tag', dest='obs_tn', default=DEFAULT_TAG, required=False, help='OBS tN Tag')
parser.add_argument('--obs-tag', dest='obs', default=DEFAULT_TAG, required=False, help='OBS Tag')
parser.add_argument('--ta-tag', dest='ta', default=DEFAULT_TAG, required=False, help='tA Tag')
parser.add_argument('--tw-tag', dest='tw', default=DEFAULT_TAG, required=False, help='tW Tag')
parser.add_argument('-r', '--regenerate', dest='regenerate', action='store_true',
help='Regenerate PDF even if exists')
args = parser.parse_args(sys.argv[1:])
main(args.obs_tn, args.obs, args.tw, args.ta, args.lang_codes, args.working_dir, args.output_dir, args.owner,
args.regenerate)
| 48.570526 | 695 | 0.531446 |
from __future__ import unicode_literals, print_function
import os
import sys
import re
import logging
import argparse
import tempfile
import markdown2
import shutil
import subprocess
import json
import git
from glob import glob
from bs4 import BeautifulSoup
from ..general_tools.file_utils import write_file, read_file, load_json_object, unzip, load_yaml_object
_print = print
DEFAULT_LANG = 'en'
DEFAULT_OWNER = 'unfoldingWord'
DEFAULT_TAG = 'master'
OWNERS = [DEFAULT_OWNER, 'STR', 'Door43-Catalog']
LANGUAGE_FILES = {
'fr': 'French-fr_FR.json',
'en': 'English-en_US.json'
}
def print(obj):
_print(json.dumps(obj, ensure_ascii=False, indent=2).encode('utf-8'))
class ObsTnConverter(object):
def __init__(self, obs_tn_tag=None, obs_tag=None, tw_tag=None, ta_tag=None, working_dir=None, output_dir=None,
lang_code=DEFAULT_LANG, owner=DEFAULT_OWNER, regenerate=False, logger=None):
self.obs_tn_tag = obs_tn_tag
self.obs_tag = obs_tag
self.tw_tag = tw_tag
self.ta_tag = ta_tag
self.working_dir = working_dir
self.output_dir = output_dir
self.lang_code = lang_code
self.owner = owner
self.regenerate = regenerate
self.logger = logger
if not self.working_dir:
self.working_dir = tempfile.mkdtemp(prefix='obs-tn-')
if not self.output_dir:
self.output_dir = self.working_dir
self.logger.info('WORKING DIR IS {0} FOR {1}'.format(self.working_dir, self.lang_code))
self.obs_tn_dir = os.path.join(self.working_dir, '{0}_obs-tn'.format(lang_code))
self.obs_dir = os.path.join(self.working_dir, '{0}_obs'.format(lang_code))
self.tw_dir = os.path.join(self.working_dir, '{0}_tw'.format(lang_code))
self.ta_dir = os.path.join(self.working_dir, '{0}_ta'.format(lang_code))
self.html_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(self.html_dir):
os.makedirs(self.html_dir)
self.manifest = None
self.tw_manifest = None
self.ta_manifest = None
self.obs_tn_text = ''
self.tw_text = ''
self.ta_text = ''
self.tw_cat = {}
self.bad_links = {}
self.bad_notes = {}
self.resource_data = {}
self.rc_references = {}
self.version = None
self.publisher = None
self.contributors = None
self.issued = None
self.file_id = None
self.my_path = os.path.dirname(os.path.realpath(__file__))
self.generation_info = {}
self.title = 'unfoldingWord® Open Bible Stories Translation Notes'
self.tw_title = 'Translation Words'
self.ta_title = 'Translation Academy'
self.translations = {}
def translate(self, key):
if not self.translations:
if self.lang_code not in LANGUAGE_FILES:
self.logger.error('No locale file for {0}.'.format(self.lang_code))
exit(1)
locale_file = os.path.join(self.my_path, '..', 'locale', LANGUAGE_FILES[self.lang_code])
if not os.path.isfile(locale_file):
self.logger.error('No locale file found at {0} for {1}.'.format(locale_file, self.lang_code))
exit(1)
self.translations = load_json_object(locale_file)
keys = key.split('.')
t = self.translations
for key in keys:
t = t.get(key, None)
if t is None:
print("No translation for `{0}`".format(key))
break
return t
def run(self):
# self.load_resource_data()
self.setup_resource_files()
self.file_id = '{0}_obs-tn_{1}_{2}'.format(self.lang_code, self.obs_tn_tag, self.generation_info['obs-tn']['commit'])
self.determine_if_regeneration_needed()
self.manifest = load_yaml_object(os.path.join(self.obs_tn_dir, 'manifest.yaml'))
self.tw_manifest = load_yaml_object(os.path.join(self.tw_dir, 'manifest.yaml'))
self.ta_manifest = load_yaml_object(os.path.join(self.ta_dir, 'manifest.yaml'))
self.version = self.manifest['dublin_core']['version']
self.title = self.manifest['dublin_core']['title']
if 'subject' in self.tw_manifest['dublin_core']:
self.tw_title = self.tw_manifest['dublin_core']['subject']
if 'subject' in self.ta_manifest['dublin_core']:
self.ta_title = self.ta_manifest['dublin_core']['subject']
self.contributors = '<br/>'.join(self.manifest['dublin_core']['contributor'])
self.publisher = self.manifest['dublin_core']['publisher']
self.issued = self.manifest['dublin_core']['issued']
self.file_id = self.file_id
self.load_tw_cat()
self.logger.info('Creating OBS tN HTML files for {0}...'.format(self.file_id))
if self.regenerate or not os.path.exists(os.path.join(self.output_dir, '{0}.html'.format(self.file_id))):
self.generate_obs_tn_content()
self.logger.info('Generating Body HTML for {0}...'.format(self.file_id))
self.generate_body_html()
self.logger.info('Generating Cover HTML for {0}...'.format(self.file_id))
self.generate_cover_html()
self.logger.info('Generating License HTML for {0}...'.format(self.file_id))
self.generate_license_html()
self.logger.info('Copying style sheet file for {0}...'.format(self.file_id))
style_file = os.path.join(self.my_path, 'obs-tn_style.css')
shutil.copy2(style_file, self.html_dir)
self.save_resource_data()
self.save_bad_links()
self.save_bad_notes()
self.logger.info('Generating PDF {0}/{1}.pdf...'.format(self.output_dir, self.file_id))
self.generate_obs_tn_pdf()
self.logger.info('PDF file can be found at {0}/{1}.pdf'.format(self.output_dir, self.file_id))
def save_bad_links(self):
bad_links = "BAD LINKS:\n"
for source_rc in sorted(self.bad_links.keys()):
for rc in sorted(self.bad_links[source_rc].keys()):
source = source_rc[5:].split('/')
parts = rc[5:].split('/')
if source[1] == 'obs-tn':
if parts[1] == 'tw':
str = ' tW'
else:
str = ' tN'
str += ' {0} {1}:{2}'.format(source[3].upper(), source[4], source[5])
else:
str = ' {0}'.format(source_rc)
str += ': BAD RC - `{0}`'.format(rc)
if self.bad_links[source_rc][rc]:
str += ' - change to `{0}`'.format(self.bad_links[source_rc][rc])
bad_links += "{0}\n".format(str)
save_file = os.path.join(self.output_dir, '{0}_bad_links.txt'.format(self.file_id))
write_file(save_file, bad_links)
self.logger.info('BAD LINKS file can be found at {0}'.format(save_file))
def save_bad_notes(self):
bad_notes = '<!DOCTYPE html><html lang="en-US"><head data-suburl=""><title>NON-MATCHING NOTES</title><meta charset="utf-8"></head><body><p>NON-MATCHING NOTES (i.e. not found in the frame text as written):</p><ul>'
for cf in sorted(self.bad_notes.keys()):
bad_notes += '<li><a href="{0}_html/{0}.html#obs-tn-{1}" title="See in the OBS tN Docs (HTML)" target="obs-tn-html">{1}</a><a href="https://git.door43.org/{6}/{2}_obs-tn/src/branch/{7}/content/{3}/{4}.md" style="text-decoration:none" target="obs-tn-git"><img src="http://www.myiconfinder.com/uploads/iconsets/16-16-65222a067a7152473c9cc51c05b85695-note.png" title="See OBS UTN note on DCS"></a><a href="https://git.door43.org/{6}/{2}_obs/src/branch/master/content/{3}.md" style="text-decoration:none" target="obs-git"><img src="https://cdn3.iconfinder.com/data/icons/linecons-free-vector-icons-pack/32/photo-16.png" title="See OBS story on DCS"></a>:<br/><i>{5}</i><br/><ul>'.format(
self.file_id, cf, self.lang_code, cf.split('-')[0], cf.split('-')[1], self.bad_notes[cf]['text'], self.owner, DEFAULT_TAG)
for note in self.bad_notes[cf]['notes']:
for key in note.keys():
if note[key]:
bad_notes += '<li><b><i>{0}</i></b><br/>{1} (QUOTE ISSUE)</li>'.format(key, note[key])
else:
bad_notes += '<li><b><i>{0}</i></b></li>'.format(key)
bad_notes += '</ul></li>'
bad_notes += "</u></body></html>"
save_file = os.path.join(self.output_dir, '{0}_bad_notes.html'.format(self.file_id))
write_file(save_file, bad_notes)
self.logger.info('BAD NOTES file can be found at {0}'.format(save_file))
@staticmethod
def get_resource_git_url(resource, lang, owner):
return 'https://git.door43.org/{0}/{1}_{2}.git'.format(owner, lang, resource)
def clone_resource(self, resource, tag=DEFAULT_TAG, url=None):
if not url:
url = self.get_resource_git_url(resource, self.lang_code, self.owner)
repo_dir = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource))
if not os.path.isdir(repo_dir):
try:
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
owners = OWNERS
owners.insert(0, self.owner)
languages = [self.lang_code, DEFAULT_LANG]
if not os.path.isdir(repo_dir):
for lang in languages:
for owner in owners:
url = self.get_resource_git_url(resource, lang, owner)
try:
git.Repo.clone_from(url, repo_dir)
except git.GitCommandError:
continue
break
if os.path.isdir(repo_dir):
break
g = git.Git(repo_dir)
g.fetch()
g.checkout(tag)
if tag == DEFAULT_TAG:
g.pull()
commit = g.rev_parse('HEAD', short=10)
self.generation_info[resource] = {'tag': tag, 'commit': commit}
def setup_resource_files(self):
self.clone_resource('obs-tn', self.obs_tn_tag)
self.clone_resource('obs', self.obs_tag)
self.clone_resource('tw', self.tw_tag)
self.clone_resource('ta', self.ta_tag)
if not os.path.isfile(os.path.join(self.html_dir, 'logo-obs-tn.png')):
command = 'curl -o {0}/logo-obs-tn.png https://cdn.door43.org/assets/uw-icons/logo-obs-256.png'.format(
self.html_dir)
subprocess.call(command, shell=True)
def load_tw_cat(self):
mapping = {
'idol': 'falsegod',
'witness': 'testimony',
'newcovenant': 'covenant',
'taxcollector': 'tax',
'believer': 'believe'
}
tw_cat_file = os.path.join(self.working_dir, 'tw_cat.json')
if not os.path.isfile(tw_cat_file):
command = 'curl -o {0} https://cdn.door43.org/v2/ts/obs/en/tw_cat.json'.format(
tw_cat_file)
subprocess.call(command, shell=True)
tw_cat = load_json_object(tw_cat_file)
for chapter in tw_cat['chapters']:
self.tw_cat[chapter['id']] = {}
for frame in chapter['frames']:
self.tw_cat[chapter['id']][frame['id']] = []
for item in frame['items']:
term = item['id']
category = None
for c in ['kt', 'names', 'other']:
if os.path.exists(os.path.join(self.tw_dir, 'bible', c, '{0}.md'.format(term))):
category = c
break
if not category and term in mapping:
category = None
for c in ['kt', 'names', 'other']:
if os.path.exists(os.path.join(self.tw_dir, 'bible', c, '{0}.md'.format(mapping[term]))):
category = c
term = mapping[term]
break
if category:
self.tw_cat[chapter['id']][frame['id']].append('rc://{0}/tw/dict/bible/{1}/{2}'.format(
self.lang_code, category, term))
if not category or term != item['id']:
fix = None
if term != item['id']:
fix = term
source_rc = 'tw_cat.json {0}/{1}'.format(chapter['id'], frame['id'])
if source_rc not in self.bad_links:
self.bad_links[source_rc] = {}
self.bad_links[source_rc][item['id']] = fix
def determine_if_regeneration_needed(self):
# check if any commit hashes have changed
old_info = self.get_previous_generation_info()
if not old_info:
self.logger.info('Looks like this is a new commit of {0}. Generating PDF.'.format(self.file_id))
self.regenerate = True
else:
for resource in self.generation_info:
if resource in old_info and resource in self.generation_info \
and (old_info[resource]['tag'] != self.generation_info[resource]['tag']
or old_info[resource]['commit'] != self.generation_info[resource]['commit']):
self.logger.info('Resource {0} has changed: {1} => {2}, {3} => {4}. REGENERATING PDF.'.format(
resource, old_info[resource]['tag'], self.generation_info[resource]['tag'],
old_info[resource]['commit'], self.generation_info[resource]['commit']
))
self.regenerate = True
def get_contributors_html(self):
if self.contributors and len(self.contributors):
return '''
<div id="contributors" class="article">
<h1 class="section-header">{0}</h1>
<p>
{1}
</p>
</div>
'''.format(self.translate('contributors'), self.contributors)
else:
return ''
def save_resource_data(self):
save_dir = os.path.join(self.output_dir, 'save')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}_resource_data.json'.format(self.file_id))
write_file(save_file, self.resource_data)
save_file = os.path.join(save_dir, '{0}_references.json'.format(self.file_id))
write_file(save_file, self.rc_references)
save_file = os.path.join(save_dir, '{0}_bad_links.json'.format(self.file_id))
write_file(save_file, self.bad_links)
save_file = os.path.join(save_dir, '{0}_bad_notes.json'.format(self.file_id))
write_file(save_file, self.bad_notes)
save_file = os.path.join(save_dir, '{0}_generation_info.json'.format(self.file_id))
write_file(save_file, self.generation_info)
def get_previous_generation_info(self):
save_dir = os.path.join(self.output_dir, 'save')
save_file = os.path.join(save_dir, '{0}_generation_info.json'.format(self.file_id))
if os.path.isfile(save_file):
return load_json_object(save_file)
else:
return {}
def load_resource_data(self):
save_dir = os.path.join(self.output_dir, 'save')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_file = os.path.join(save_dir, '{0}_resource_data.json'.format(self.file_id))
if os.path.isfile(save_file):
self.resource_data = load_json_object(save_file)
save_file = os.path.join(save_dir, '{0}_references.json'.format(self.file_id))
if os.path.isfile(save_file):
self.rc_references = load_json_object(save_file)
save_file = os.path.join(save_dir, '{0}_bad_links.json'.format(self.file_id))
if os.path.isfile(save_file):
self.bad_links = load_json_object(save_file)
def generate_body_html(self):
obs_tn_html = self.obs_tn_text
ta_html = self.get_ta_html()
tw_html = self.get_tw_html()
contributors_html = self.get_contributors_html()
html = '\n'.join([obs_tn_html, tw_html, ta_html, contributors_html])
html = self.replace_rc_links(html)
html = self.fix_links(html)
html = '''<!DOCTYPE html>
<html lang="en-US">
<head data-suburl="">
<meta charset="utf-8"/>
<title>{0} - v{1}</title>
</head>
<body>
{2}
</body>
</html>
'''.format(self.title, self.version, html)
soup = BeautifulSoup(html, 'html.parser')
# Make all headers that have a header right before them non-break
for h in soup.find_all(['h2', 'h3', 'h4', 'h5', 'h6']):
prev = h.find_previous_sibling()
if prev and re.match('^h[2-6]$', prev.name):
h['class'] = h.get('class', []) + ['no-break']
# Make all headers within the page content to just be span tags with h# classes
for h in soup.find_all(['h3', 'h4', 'h5', 'h6']):
if not h.get('class') or 'section-header' not in h['class']:
h['class'] = h.get('class', []) + [h.name]
h.name = 'span'
soup.head.append(soup.new_tag('link', href="html/obs-tn_style.css", rel="stylesheet"))
html_file = os.path.join(self.output_dir, '{0}.html'.format(self.file_id))
write_file(html_file, unicode(soup))
self.logger.info('Wrote HTML to {0}'.format(html_file))
def generate_cover_html(self):
cover_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<link href="obs-tn_style.css" rel="stylesheet"/>
</head>
<body>
<div style="text-align:center;padding-top:200px" class="break" id="cover">
<img src="logo-obs-tn.png" width="120">
<span class="h1">{0}</span>
<span class="h3">{1} {2}</span>
</div>
</body>
</html>
'''.format(self.title, self.translate('license.version'), self.version)
html_file = os.path.join(self.html_dir, '{0}_cover.html'.format(self.file_id))
write_file(html_file, cover_html)
def generate_license_html(self):
license_file = os.path.join(self.obs_tn_dir, 'LICENSE.md')
license = markdown2.markdown_path(license_file)
license_html = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<link href="obs-tn_style.css" rel="stylesheet"/>
</head>
<body>
<div class="break">
<span class="h1">{4}</span>
<p>
<strong>{5}:</strong> {0}<br/>
<strong>{6}:</strong> {1}<br/>
<strong>{7}:</strong> {2}<br/>
</p>
{3}
</div>
</body>
</html>
'''.format(self.issued, self.version, self.publisher, license,
self.translate('license.copyrights_and_licensing'),
self.translate('license.date'),
self.translate('license.version'),
self.translate('license.published_by'))
html_file = os.path.join(self.html_dir, '{0}_license.html'.format(self.file_id))
write_file(html_file, license_html)
def generate_obs_tn_pdf(self):
cover_file = os.path.join(self.html_dir, '{0}_cover.html'.format(self.file_id))
license_file = os.path.join(self.html_dir, '{0}_license.html'.format(self.file_id))
header_file = os.path.join(self.my_path, 'obs-tn_header.html')
footer_file = os.path.join(self.my_path, 'obs-tn_footer.html')
body_file = os.path.join(self.output_dir, '{0}.html'.format(self.file_id))
output_file = os.path.join(self.output_dir, '{0}.pdf'.format(self.file_id))
template_file = os.path.join(self.my_path, '{0}_toc_template.xsl'.format(self.lang_code))
command = '''wkhtmltopdf
--javascript-delay 2000
--debug-javascript
--cache-dir "{6}"
--run-script "setInterval(function(){{if(document.readyState=='complete') setTimeout(function() {{window.status='done';}}, 100);}},200)"
--encoding utf-8
--outline-depth 3
-O portrait
-L 15 -R 15 -T 15 -B 15
--header-html "{0}"
--header-spacing 2
--footer-html "{7}"
cover "{1}"
cover "{2}"
toc
--disable-dotted-lines
--enable-external-links
--xsl-style-sheet "{3}"
--toc-header-text "{8}"
"{4}"
"{5}"
'''.format(header_file, cover_file, license_file, template_file, body_file, output_file,
os.path.join(self.working_dir, 'wkhtmltopdf'), footer_file,
self.translate('table_of_contents'))
command = re.sub(r'\s+', ' ', command, flags=re.MULTILINE)
self.logger.info(command)
subprocess.call(command, shell=True)
@staticmethod
def highlight_text(text, note):
parts = re.split(r"\s*…\s*|\s*\.\.\.\s*", note)
processed_text = ''
to_process_text = text
for idx, part in enumerate(parts):
split_pattern = re.escape(part)
if '<span' in text:
split_pattern = '({0})'.format(re.sub('(\\\\ )+', '(\s+|(\s*</*span[^>]*>\s*)+)', split_pattern))
else:
split_pattern = '({0})'.format(split_pattern)
splits = re.split(split_pattern, to_process_text, 1)
processed_text += splits[0]
if len(splits) > 1:
processed_text += '<span class="highlight{0}">{1}</span>'.format(' split' if len(parts) > 1 else '',
splits[1])
if len(splits) > 2:
to_process_text = splits[-1]
if to_process_text:
processed_text += to_process_text
return processed_text
def highlight_text_with_frame(self, orig_text, frame_html, cf):
ignore = ['A Bible story from', 'Connecting Statement', 'Connecting Statement:',
'General Information', 'General Note', 'Information générale',
'Termes Importants', 'Une histoire biblique tirée de', 'Une histoire de la Bible tirée de',
'Une histoire de la Bible à partir', 'Une histoire de la Bible à partir de',
'Mots de Traduction', 'Nota geral', 'Déclaration de connexion', 'Cette histoire biblique est tirée',
'Une histoire biblique tirée de:', 'Informations générales', 'Information Générale']
highlighted_text = orig_text
phrases = []
soup = BeautifulSoup(frame_html, 'html.parser')
headers = soup.find_all('h4')
for header in headers:
phrases.append(header.text)
phrases.sort(key=len, reverse=True)
for phrase in phrases:
new_highlighted_text = self.highlight_text(highlighted_text, phrase)
if new_highlighted_text != highlighted_text:
highlighted_text = new_highlighted_text
elif phrase not in ignore:
if cf not in self.bad_notes:
self.bad_notes[cf] = {
'text': orig_text,
'notes': []
}
bad_note = {phrase: None}
alt_notes = [
phrase.replace('‘', "'").replace('’', "'").replace('“', '"').replace('”', '"'),
phrase.replace("'", '’').replace('’', '‘', 1).replace('"', '”').replace('”', '“', 1),
phrase.replace('‘', "'").replace('’', "'").replace('“', '"').replace('”', '"'),
phrase.replace("'", '’').replace('’', '‘', 1).replace('"', '”').replace('”', '“', 1),
phrase.replace('“', '"').replace('”', '"'),
phrase.replace('"', '”').replace('”', '“', 1),
phrase.replace("'", '’').replace('’', '‘', 1),
phrase.replace("'", '’'),
phrase.replace('’', "'"),
phrase.replace('‘', "'")]
for alt_note in alt_notes:
if orig_text != self.highlight_text(orig_text, alt_note):
bad_note[phrase] = alt_note
break
self.bad_notes[cf]['notes'].append(bad_note)
return highlighted_text
def generate_obs_tn_content(self):
content = '''
<div id="obs-tn" class="resource-title-page">
<h1 class="section-header">{0}</h1>
</div>
'''.format(self.title.replace('unfoldingWord® ', ''))
chapter_dirs = sorted(glob(os.path.join(self.obs_tn_dir, 'content', '*')))
for chapter_dir in chapter_dirs:
if os.path.isdir(chapter_dir):
chapter = os.path.basename(chapter_dir)
soup = BeautifulSoup(
markdown2.markdown_path(os.path.join(self.obs_dir, 'content', '{0}.md'.format(chapter))),
'html.parser')
title = soup.h1.text
paragraphs = soup.find_all('p')
frames = []
for idx, p in enumerate(paragraphs): # iterate over loop [above sections]
if idx % 2:
frames.append(p.text)
content += '<div id="chapter-{0}" class="chapter break">\n\n'.format(chapter)
content += '<h2>{0}</h2>\n'.format(title)
frame_files = sorted(glob(os.path.join(chapter_dir, '*.md')))
for frame_file in frame_files:
frame = os.path.splitext(os.path.basename(frame_file))[0]
frame_idx = int(frame)
id = 'obs-tn-{0}-{1}'.format(chapter, frame)
content += '<div id="{0}" class="frame">\n'.format(id)
content += '<h3>{0}:{1}</h3>\n'.format(chapter, frame)
text = ''
if frame_idx > 0:
text = re.sub(r'[\n\s]+', ' ', frames[frame_idx - 1], flags=re.MULTILINE)
frame_html = markdown2.markdown_path(frame_file)
frame_html = frame_html.replace('h1>', 'h4>')
frame_html = frame_html.replace('h2>', 'h5>')
frame_html = frame_html.replace('h3>', 'h6>')
frame_html = re.sub(r'href="(\d+)/(\d+)"', r'href="
if text:
text = self.highlight_text_with_frame(text, frame_html, '{0}:{1}'.format(chapter, frame))
if '/tw/' not in frame_html and chapter in self.tw_cat and frame in self.tw_cat[chapter]\
and len(self.tw_cat[chapter][frame]):
frame_html += "<h3>{0}</h3>\n<ul>".format(self.tw_title)
for rc in self.tw_cat[chapter][frame]:
frame_html += '<li>[[{0}]]</li>'.format(rc)
frame_html += '</ul>'
content += '<div id="{0}-text" class="frame-text">\n{1}\n</div>\n'.format(id, text)
content += frame_html
content += '</div>\n\n'
# HANDLE RC LINKS
rc = 'rc://{0}/obs-tn/help/{1}/{2}'.format(self.lang_code, chapter, frame)
self.resource_data[rc] = {
'rc': rc,
'id': id,
'link': '#' + id,
'title': title
}
self.get_resource_data_from_rc_links(frame_html, rc)
content += '</div>\n\n'
self.obs_tn_text = content
write_file(os.path.join(self.html_dir, '{0}_obs-tn_content.html'.format(self.file_id)),
BeautifulSoup(content, 'html.parser').prettify())
def get_tw_html(self):
tw_html = ''
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/tw/' not in rc:
continue
reference_text = self.get_reference_text(rc)
if not reference_text:
continue
html = self.resource_data[rc]['text']
html = self.increase_headers(html)
title = self.resource_data[rc]['title']
alt_title = self.resource_data[rc]['alt_title']
if alt_title:
html = '<h2 class="hidden">{0}11</h2><span class="h2 section-header">{1}</span>\n{2}'.\
format(alt_title, title, html)
else:
html = '<h2 class="section-header">{0}</h2>\n{1}'.format(title, html)
tw_html += '<div id="{0}" class="article">\n{1}\n{2}</div>\n\n'.format(self.resource_data[rc]['id'], html,
reference_text)
if tw_html:
tw_html = '<div id="tw" class="resource-title-page">\n<h1 class="section-header">{0}</h1>\n</div>\n\n{1}'.\
format(self.tw_title, tw_html)
return tw_html
def get_ta_html(self):
ta_html = ''
sorted_rcs = sorted(self.resource_data.keys(), key=lambda k: self.resource_data[k]['title'].lower())
for rc in sorted_rcs:
if '/ta/' not in rc:
continue
reference_text = self.get_reference_text(rc)
if not reference_text:
continue
if self.resource_data[rc]['text']:
ta_html += '''
<div id="{0}" class="article">
<h2 class="section-header">{1}</h2>
<div class="top-box box">
<div class="ta-question">
{2}: <em>{3}</em>
</div>
</div>
{4}
{5}
</div>
'''.format(self.resource_data[rc]['id'], self.resource_data[rc]['title'],
self.translate('this_page_answers_the_question'),
self.resource_data[rc]['alt_title'],
self.increase_headers(self.resource_data[rc]['text']), self.get_reference_text(rc))
if ta_html:
ta_html = '<div id="ta" class="resource-title-page">\n<h1 class="section-header">{0}</h1>\n</div>\n\n{1}'.\
format(self.ta_title, ta_html)
return ta_html
def has_tn_references(self, rc):
if rc not in self.rc_references:
return False
for reference in self.rc_references[rc]:
if '/obs-tn/' in reference:
return True
return False
def get_reference_text(self, rc):
if not self.has_tn_references(rc):
return ''
uses = ''
references = []
done = {}
for reference in self.rc_references[rc]:
if '/obs-tn/' in reference and reference not in done:
parts = reference[5:].split('/')
id = 'obs-tn-{0}-{1}'.format(parts[3], parts[4])
text = '{0}:{1}'.format(parts[3], parts[4])
references.append('<a href="
done[reference] = True
if len(references):
uses = '<p class="go-back">\n(<b>{0}:</b> {1})\n</p>\n'.format(self.translate('go_back_to'),
'; '.join(references))
return uses
def get_resource_data_from_rc_links(self, text, source_rc):
if source_rc not in self.bad_links:
self.bad_links[source_rc] = {}
rcs = re.findall(r'rc://[A-Z0-9/_\*-]+', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
parts = rc[5:].split('/')
resource = parts[1]
path = '/'.join(parts[3:])
if resource not in ['ta', 'tw']:
continue
if rc not in self.rc_references:
self.rc_references[rc] = []
if source_rc not in self.rc_references[rc]:
self.rc_references[rc].append(source_rc)
title = ''
t = ''
anchor_id = '{0}-{1}'.format(resource, path.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path))
if not os.path.isfile(file_path):
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path))
fix = None
if not os.path.isfile(file_path):
if resource == 'tw':
for category in ['kt', 'other', 'names']:
path2 = re.sub(r'^bible/([^/]+)/', r'bible/{0}/'.format(category), path.lower())
fix = 'rc://{0}/tw/dict/{1}'.format(self.lang_code, path2)
anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}.md'.format(path2))
if os.path.isfile(file_path):
break
elif resource == 'ta':
bad_names = {
'figs-abstractnoun': 'translate/figs-abstractnouns'
}
if parts[3] in bad_names:
path2 = bad_names[parts[3]]
else:
path2 = path
fix = 'rc://{0}/ta/man/{1}'.format(self.lang_code, path2)
anchor_id = '{0}-{1}'.format(resource, path2.replace('/', '-'))
link = '#{0}'.format(anchor_id)
file_path = os.path.join(self.working_dir, '{0}_{1}'.format(self.lang_code, resource),
'{0}/01.md'.format(path2))
if os.path.isfile(file_path):
if fix:
self.bad_links[source_rc][rc] = fix
if not rc in self.resource_data:
t = markdown2.markdown_path(file_path)
alt_title = ''
if resource == 'ta':
title_file = os.path.join(os.path.dirname(file_path), 'title.md')
question_file = os.path.join(os.path.dirname(file_path), 'sub-title.md')
if os.path.isfile(title_file):
title = read_file(title_file)
else:
title = self.get_first_header(t)
t = re.sub(r'\s*\n*\s*<h\d>[^<]+</h\d>\s*\n*', r'', t, 1,
flags=re.IGNORECASE | re.MULTILINE) # removes the header
if os.path.isfile(question_file):
alt_title = read_file(question_file)
t = self.fix_ta_links(t, path.split('/')[0])
elif resource == 'tw':
title = self.get_first_header(t)
t = re.sub(r'\s*\n*\s*<h\d>[^<]+</h\d>\s*\n*', r'', t, 1,
flags=re.IGNORECASE | re.MULTILINE) # removes the header
if len(title) > 70:
alt_title = ','.join(title[:70].split(',')[:-1]) + ', ...'
t = re.sub(r'\n*\s*\(See [^\n]*\)\s*\n*', '\n\n', t,
flags=re.IGNORECASE | re.MULTILINE) # removes the See also line
t = self.fix_tw_links(t, path.split('/')[1])
self.resource_data[rc] = {
'rc': rc,
'link': link,
'id': anchor_id,
'title': title,
'alt_title': alt_title,
'text': t,
'references': [source_rc]
}
self.get_resource_data_from_rc_links(t, rc)
else:
if source_rc not in self.resource_data[rc]['references']:
self.resource_data[rc]['references'].append(source_rc)
else:
if rc not in self.bad_links[source_rc]:
self.bad_links[source_rc][rc] = None
rcs = re.findall(r'(?<=\()\.+/[^\)]+(?=\))', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
fix = re.sub(r'(\.\./)+(kt|names|other)/([^)]+?)(\.md)*', r'rc://{0}/tw/dict/bible/\2/\3'.
format(self.lang_code), rc, flags=re.IGNORECASE)
if fix != rc:
self.bad_links[source_rc][rc] = fix
else:
self.bad_links[source_rc][rc] = None
rcs = re.findall(r'(?<=\()\.[^ \)]+(?=\))', text, flags=re.IGNORECASE | re.MULTILINE)
for rc in rcs:
fix = None
if '/kt/' in rc or '/names/' in rc or '/other/' in rc:
new_rc = re.sub(r'(\.\./)+(kt|names|other)/([^)]+?)(\.md)*', r'rc://{0}/tw/dict/bible/\2/\3'.
format(self.lang_code), rc, flags=re.IGNORECASE)
if new_rc != rc:
fix = new_rc
self.bad_links[source_rc][rc] = fix
@staticmethod
def increase_headers(text, increase_depth=1):
if text:
for num in range(5, 0, -1):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num), r'<h{0}>\1</h{0}>'.format(num + increase_depth),
text, flags=re.MULTILINE)
return text
@staticmethod
def decrease_headers(text, minimum_header=1, decrease=1):
if text:
for num in range(minimum_header, minimum_header + 10):
text = re.sub(r'<h{0}>\s*(.+?)\s*</h{0}>'.format(num),
r'<h{0}>\1</h{0}>'.format(num - decrease if (num - decrease) <= 5 else 5), text,
flags=re.MULTILINE)
return text
@staticmethod
def get_first_header(text):
lines = text.split('\n')
if len(lines):
for line in lines:
if re.match(r'<h1>', line):
return re.sub(r'<h1>(.*?)</h1>', r'\1', line)
return lines[0]
return "NO TITLE"
def fix_tw_links(self, text, group):
text = re.sub(r'href="\.\./([^/)]+?)(\.md)*"', r'href="rc://{0}/tw/dict/bible/{1}/\1"'.
format(self.lang_code, group), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./([^)]+?)(\.md)*"', r'href="rc://{0}/tw/dict/bible/\1"'.format(self.lang_code),
text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'(\(|\[\[)(\.\./)*(kt|names|other)/([^)]+?)(\.md)*(\)|\]\])(?!\[)',
r'[[rc://{0}/tw/dict/bible/\3/\4]]'.format(self.lang_code), text,
flags=re.IGNORECASE | re.MULTILINE)
return text
def fix_ta_links(self, text, manual):
text = re.sub(r'href="\.\./([^/"]+)/01\.md"', r'href="rc://{0}/ta/man/{1}/\1"'.format(self.lang_code, manual),
text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="\.\./\.\./([^/"]+)/([^/"]+)/01\.md"', r'href="rc://{0}/ta/man/\1/\2"'.
format(self.lang_code), text, flags=re.IGNORECASE | re.MULTILINE)
text = re.sub(r'href="([^# :/"]+)"', r'href="rc://{0}/ta/man/{1}/\1"'.format(self.lang_code, manual), text,
flags=re.IGNORECASE | re.MULTILINE)
return text
def replace(self, m):
before = m.group(1)
rc = m.group(2)
after = m.group(3)
if rc not in self.resource_data:
return m.group()
info = self.resource_data[rc]
if (before == '[[' and after == ']]') or (before == '(' and after == ')') or before == ' ' \
or (before == '>' and after == '<'):
return '<a href="{0}">{1}</a>'.format(info['link'], info['title'])
if (before == '"' and after == '"') or (before == "'" and after == "'"):
return info['link']
self.logger.error("FOUND SOME MALFORMED RC LINKS: {0}".format(m.group()))
return m.group()
def replace_rc_links(self, text):
# Change rc://... rc links,
# Case 1: [[rc://en/tw/help/bible/kt/word]] => <a href="#tw-kt-word">God's Word</a>
# Case 2: rc://en/tw/help/bible/ht/word => <a href="
# Case 3: <a href="rc://en/tw/help/bible/kt/word">text</a> => <a href="#tw-kt-word>Text</a> (used in links that are already formed)
# Case 5: Link from a TA or TW article that was not referenced in a TN. Remove the link
# Case 4: Remove other links to other resources not in this tN
def replace_rc(match):
left = match.group(1)
rc = match.group(2)
right = match.group(3)
title = match.group(4)
if rc in self.resource_data:
info = self.resource_data[rc]
if not self.has_tn_references(rc):
# Case 4
return info['title']
if (left and right and left == '[[' and right == ']]') \
or (not left and not right):
# Case 1 and Case 2
return '<a href="{0}">{1}</a>'.format(info['link'], info['title'])
else:
# Case 3
return (left if left else '') + info['link'] + (right if right else '')
else:
# Case 5
return title if title else rc
regex = re.compile(r'(\[\[|<a[^>]+href=")*(rc://[/A-Za-z0-9\*_-]+)(\]\]|"[^>]*>(.*?)</a>)*')
text = regex.sub(replace_rc, text)
return text
@staticmethod
def fix_links(text):
# Change [[http.*]] to <a href="http\1">http\1</a>
text = re.sub(r'\[\[http([^\]]+)\]\]', r'<a href="http\1">http\1</a>', text, flags=re.IGNORECASE)
# convert URLs to links if not already
text = re.sub(r'([^">])((http|https|ftp)://[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])',
r'\1<a href="\2">\2</a>', text, flags=re.IGNORECASE)
# URLS wth just www at the start, no http
text = re.sub(r'([^\/])(www\.[A-Za-z0-9\/\?&_\.:=#-]+[A-Za-z0-9\/\?&_:=#-])', r'\1<a href="http://\2">\2</a>',
text, flags=re.IGNORECASE)
# Removes leading 0s from verse references
text = re.sub(r' 0*(\d+):0*(\d+)(-*)0*(\d*)', r' \1:\2\3\4', text, flags=re.IGNORECASE | re.MULTILINE)
return text
def main(obs_tn_tag, obs_tag, tw_tag, ta_tag, lang_codes, working_dir, output_dir, owner, regenerate):
if not obs_tag:
obs_tag = args.obs_sn
if not lang_codes:
lang_codes = [DEFAULT_LANG]
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if not working_dir and 'WORKING_DIR' in os.environ:
working_dir = os.environ['WORKING_DIR']
print('Using env var WORKING_DIR: {0}'.format(working_dir))
if not output_dir and 'OUTPUT_DIR' in os.environ:
output_dir = os.environ['OUTPUT_DIR']
print('Using env var OUTPUT_DIR: {0}'.format(output_dir))
for lang_code in lang_codes:
_print('Starting OBS TN Converter for {0}...'.format(lang_code))
obs_tn_converter = ObsTnConverter(obs_tn_tag, obs_tag, tw_tag, ta_tag, working_dir, output_dir, lang_code,
owner, regenerate, logger)
obs_tn_converter.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--lang', dest='lang_codes', required=False, help='Language Code(s)', action='append')
parser.add_argument('-w', '--working', dest='working_dir', default=False, required=False, help='Working Directory')
parser.add_argument('-o', '--output', dest='output_dir', default=False, required=False, help='Output Directory')
parser.add_argument('--owner', dest='owner', default=DEFAULT_OWNER, required=False, help='Owner')
parser.add_argument('--obs-tn-tag', dest='obs_tn', default=DEFAULT_TAG, required=False, help='OBS tN Tag')
parser.add_argument('--obs-tag', dest='obs', default=DEFAULT_TAG, required=False, help='OBS Tag')
parser.add_argument('--ta-tag', dest='ta', default=DEFAULT_TAG, required=False, help='tA Tag')
parser.add_argument('--tw-tag', dest='tw', default=DEFAULT_TAG, required=False, help='tW Tag')
parser.add_argument('-r', '--regenerate', dest='regenerate', action='store_true',
help='Regenerate PDF even if exists')
args = parser.parse_args(sys.argv[1:])
main(args.obs_tn, args.obs, args.tw, args.ta, args.lang_codes, args.working_dir, args.output_dir, args.owner,
args.regenerate)
| true | true |
f7fd1584fa1051d2a90cee0d89bad3976d5ce615 | 196 | py | Python | polyaxon_schemas/ops/tensorboard/__init__.py | orf/polyaxon-schemas | dce55df25ae752fc3fbf465ea53add126746d630 | [
"MIT"
] | null | null | null | polyaxon_schemas/ops/tensorboard/__init__.py | orf/polyaxon-schemas | dce55df25ae752fc3fbf465ea53add126746d630 | [
"MIT"
] | null | null | null | polyaxon_schemas/ops/tensorboard/__init__.py | orf/polyaxon-schemas | dce55df25ae752fc3fbf465ea53add126746d630 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.ops.tensorboard.op import ( # noqa
TensorboardConfig,
TensorboardSchema,
)
| 24.5 | 64 | 0.75 |
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.ops.tensorboard.op import (
TensorboardConfig,
TensorboardSchema,
)
| true | true |
f7fd16037406f3f8a3401ff842b1c078646b9673 | 1,545 | py | Python | flexget/components/estimate_release/estimators/est_movies_bluray.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | null | null | null | flexget/components/estimate_release/estimators/est_movies_bluray.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | 1 | 2017-10-09T23:06:44.000Z | 2017-10-09T23:06:44.000Z | flexget/components/estimate_release/estimators/est_movies_bluray.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import datetime
from flexget import plugin
from flexget.event import event
from flexget.utils.database import Session
log = logging.getLogger('est_movies_bluray')
class EstimatesMoviesBluray(object):
@plugin.priority(2)
def estimate(self, entry):
if 'movie_name' not in entry:
return
movie_name = entry['movie_name']
movie_year = entry.get('movie_year')
if movie_year is not None and movie_year > datetime.datetime.now().year:
log.debug('Skipping Blu-ray.com lookup since movie year is %s', movie_year)
return
log.debug(
'Searching Blu-ray.com for release date of {} ({})'.format(movie_name, movie_year)
)
release_date = None
try:
with Session() as session:
lookup = plugin.get('api_bluray', self).lookup
movie = lookup(title=movie_name, year=movie_year, session=session)
if movie:
release_date = movie.release_date
except LookupError as e:
log.debug(e)
if release_date:
log.debug('received release date: {0}'.format(release_date))
return release_date
@event('plugin.register')
def register_plugin():
plugin.register(
EstimatesMoviesBluray, 'est_movies_bluray', interfaces=['estimate_release'], api_ver=2
)
| 30.294118 | 94 | 0.649191 | from __future__ import unicode_literals, division, absolute_import
from builtins import *
import logging
import datetime
from flexget import plugin
from flexget.event import event
from flexget.utils.database import Session
log = logging.getLogger('est_movies_bluray')
class EstimatesMoviesBluray(object):
@plugin.priority(2)
def estimate(self, entry):
if 'movie_name' not in entry:
return
movie_name = entry['movie_name']
movie_year = entry.get('movie_year')
if movie_year is not None and movie_year > datetime.datetime.now().year:
log.debug('Skipping Blu-ray.com lookup since movie year is %s', movie_year)
return
log.debug(
'Searching Blu-ray.com for release date of {} ({})'.format(movie_name, movie_year)
)
release_date = None
try:
with Session() as session:
lookup = plugin.get('api_bluray', self).lookup
movie = lookup(title=movie_name, year=movie_year, session=session)
if movie:
release_date = movie.release_date
except LookupError as e:
log.debug(e)
if release_date:
log.debug('received release date: {0}'.format(release_date))
return release_date
@event('plugin.register')
def register_plugin():
plugin.register(
EstimatesMoviesBluray, 'est_movies_bluray', interfaces=['estimate_release'], api_ver=2
)
| true | true |
f7fd166abd3a03bac5909e498669b482447435cf | 2,178 | py | Python | utils/pitch_utils.py | ishine/DiffSinger-1 | 9a5baf553f635f088ca110aa22e87b67ece6e947 | [
"MIT"
] | 288 | 2021-12-19T04:02:00.000Z | 2022-03-27T16:13:44.000Z | utils/pitch_utils.py | ishine/DiffSinger-1 | 9a5baf553f635f088ca110aa22e87b67ece6e947 | [
"MIT"
] | 44 | 2021-12-27T07:11:20.000Z | 2022-03-29T08:39:41.000Z | utils/pitch_utils.py | ishine/DiffSinger-1 | 9a5baf553f635f088ca110aa22e87b67ece6e947 | [
"MIT"
] | 37 | 2021-12-19T16:51:34.000Z | 2022-03-23T09:22:31.000Z | #########
# world
##########
import librosa
import numpy as np
import torch
gamma = 0
mcepInput = 3 # 0 for dB, 3 for magnitude
alpha = 0.45
en_floor = 10 ** (-80 / 20)
FFT_SIZE = 2048
f0_bin = 256
f0_max = 1100.0
f0_min = 50.0
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
def f0_to_coarse(f0):
is_torch = isinstance(f0, torch.Tensor)
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse
def norm_f0(f0, uv, hparams):
is_torch = isinstance(f0, torch.Tensor)
if hparams['pitch_norm'] == 'standard':
f0 = (f0 - hparams['f0_mean']) / hparams['f0_std']
if hparams['pitch_norm'] == 'log':
f0 = torch.log2(f0) if is_torch else np.log2(f0)
if uv is not None and hparams['use_uv']:
f0[uv > 0] = 0
return f0
def norm_interp_f0(f0, hparams):
is_torch = isinstance(f0, torch.Tensor)
if is_torch:
device = f0.device
f0 = f0.data.cpu().numpy()
uv = f0 == 0
f0 = norm_f0(f0, uv, hparams)
if sum(uv) == len(f0):
f0[uv] = 0
elif sum(uv) > 0:
f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
uv = torch.FloatTensor(uv)
f0 = torch.FloatTensor(f0)
if is_torch:
f0 = f0.to(device)
return f0, uv
def denorm_f0(f0, uv, hparams, pitch_padding=None, min=None, max=None):
if hparams['pitch_norm'] == 'standard':
f0 = f0 * hparams['f0_std'] + hparams['f0_mean']
if hparams['pitch_norm'] == 'log':
f0 = 2 ** f0
if min is not None:
f0 = f0.clamp(min=min)
if max is not None:
f0 = f0.clamp(max=max)
if uv is not None and hparams['use_uv']:
f0[uv > 0] = 0
if pitch_padding is not None:
f0[pitch_padding] = 0
return f0
| 28.285714 | 105 | 0.588154 | a = 0.45
en_floor = 10 ** (-80 / 20)
FFT_SIZE = 2048
f0_bin = 256
f0_max = 1100.0
f0_min = 50.0
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
def f0_to_coarse(f0):
is_torch = isinstance(f0, torch.Tensor)
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse
def norm_f0(f0, uv, hparams):
is_torch = isinstance(f0, torch.Tensor)
if hparams['pitch_norm'] == 'standard':
f0 = (f0 - hparams['f0_mean']) / hparams['f0_std']
if hparams['pitch_norm'] == 'log':
f0 = torch.log2(f0) if is_torch else np.log2(f0)
if uv is not None and hparams['use_uv']:
f0[uv > 0] = 0
return f0
def norm_interp_f0(f0, hparams):
is_torch = isinstance(f0, torch.Tensor)
if is_torch:
device = f0.device
f0 = f0.data.cpu().numpy()
uv = f0 == 0
f0 = norm_f0(f0, uv, hparams)
if sum(uv) == len(f0):
f0[uv] = 0
elif sum(uv) > 0:
f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
uv = torch.FloatTensor(uv)
f0 = torch.FloatTensor(f0)
if is_torch:
f0 = f0.to(device)
return f0, uv
def denorm_f0(f0, uv, hparams, pitch_padding=None, min=None, max=None):
if hparams['pitch_norm'] == 'standard':
f0 = f0 * hparams['f0_std'] + hparams['f0_mean']
if hparams['pitch_norm'] == 'log':
f0 = 2 ** f0
if min is not None:
f0 = f0.clamp(min=min)
if max is not None:
f0 = f0.clamp(max=max)
if uv is not None and hparams['use_uv']:
f0[uv > 0] = 0
if pitch_padding is not None:
f0[pitch_padding] = 0
return f0
| true | true |
f7fd16dd60e1619229bafbdfdda9f2ae7ffee305 | 2,734 | py | Python | docs/source/user_guide/get_started/examples/pwimmigrant_short_example.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 40 | 2017-09-25T20:22:43.000Z | 2022-02-21T02:53:41.000Z | docs/source/user_guide/get_started/examples/pwimmigrant_short_example.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 594 | 2017-08-08T17:28:52.000Z | 2022-03-28T13:38:10.000Z | docs/source/user_guide/get_started/examples/pwimmigrant_short_example.py | ramirezfranciscof/aiida-quantumespresso | cb32be5361afa05bad617f00f8b187c96eb365ec | [
"MIT"
] | 66 | 2017-08-08T16:58:56.000Z | 2022-03-17T10:18:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# Load the database environment.
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code
from aiida.plugins import CalculationFactory
# Load the PwimmigrantCalculation class.
PwimmigrantCalculation = CalculationFactory('quantumespresso.pwimmigrant')
# Load the Code node representative of the one used to perform the calculations.
code = Code.get('pw_on_TheHive')
# Get the Computer node representative of the one the calculations were run on.
computer = code.get_remote_computer()
# Define the computation resources used for the calculations.
resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
# Initialize the pw_job1 calculation node.
calc1 = PwimmigrantCalculation(
computer=computer,
resources=resources,
remote_workdir='/scratch/',
input_file_name='pw_job1.in',
output_file_name='pw_job1.out'
)
# Initialize the pw_job2 calculation node.
calc2 = PwimmigrantCalculation(
computer=computer,
resources=resources,
remote_workdir='/scratch/',
input_file_name='pw_job2.in',
output_file_name='pw_job2.out'
)
# Link the code that was used to run the calculations.
calc1.use_code(code)
calc2.use_code(code)
# Get the computer's transport and create an instance.
from aiida.backends.utils import get_authinfo, get_automatic_user
authinfo = get_authinfo(computer=computer, aiidauser=get_automatic_user())
transport = a.get_transport()
# Open the transport for the duration of the immigrations, so it's not
# reopened for each one. This is best performed using the transport's
# context guard through the ``with`` statement.
with transport as open_transport:
# Parse the calculations' input files to automatically generate and link the
# calculations' input nodes.
calc1.create_input_nodes(open_transport)
calc2.create_input_nodes(open_transport)
# Store the calculations and their input nodes and tell the daeomon the output
# is ready to be retrieved and parsed.
calc1.prepare_for_retrieval_and_parsing(open_transport)
calc2.prepare_for_retrieval_and_parsing(open_transport)
| 38.507042 | 82 | 0.688369 | true | true | |
f7fd173e432d712db1bdcd59c7e0717652c20284 | 1,951 | py | Python | Python/flask/pr1/main.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | Python/flask/pr1/main.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | 3 | 2020-03-24T16:26:35.000Z | 2020-04-15T19:40:41.000Z | Python/flask/pr1/main.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | from flask import Flask # flask server
from flask import request # how the user requested a resource
from flask import render_template # to use templates/layouts
app = Flask(__name__)
# Something i can do in templates:
# {{ var_name }}
# {% """kind of python code on flask""" %}
# [@] signifies a decorator - way to wrap a function and modifying its behavior.
# In Flask, we are mapping url to return value.
@app.route('/')
def index():
return 'This is the homepage'
@app.route('/about')
def about():
return 'This is the about page.<br />brrr'
# Variables in routing. google query: flask converters
# Variable in URL - in brackets
# Example1 - strings. Strings are default type.
@app.route('/profile/<username>')
def profile(username):
return 'Hello, %s' % username
# Example2 - integers
# In case of exceptions (if post_id is string) 404 page is already implemented.
@app.route('/post/<int:post_id>')
def post(post_id):
return 'post_id is %d' % post_id
# HTTP methods. Here: GET and POST methods
# Example of simple GET method
@app.route('/method')
def check_method():
return 'Method used: %s' % request.method
# Example of different methods
@app.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
return 'You are using POST method'
elif request.method == 'GET':
return 'You are using GET'
return 'You are using something else...'
# HTML Templates
# [templates] and [static] folders are necessary
@app.route('/tmp/<name>')
def tmp_name(name):
return render_template("profile.html", name=name)
# Mapping multiple URLs
@app.route('/mult/')
@app.route('/mult/<user>')
def mult(user=None):
return render_template("user.html", user=user)
# Passing lists to the template
@app.route('/shopping')
def shopping():
food = ["cheese", "eggs", "ham"]
return render_template('shopping.html', food=food)
if __name__ == '__main__':
app.run(debug=True)
| 27.871429 | 80 | 0.686827 | from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/')
def index():
return 'This is the homepage'
@app.route('/about')
def about():
return 'This is the about page.<br />brrr'
@app.route('/profile/<username>')
def profile(username):
return 'Hello, %s' % username
@app.route('/post/<int:post_id>')
def post(post_id):
return 'post_id is %d' % post_id
@app.route('/method')
def check_method():
return 'Method used: %s' % request.method
@app.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
return 'You are using POST method'
elif request.method == 'GET':
return 'You are using GET'
return 'You are using something else...'
@app.route('/tmp/<name>')
def tmp_name(name):
return render_template("profile.html", name=name)
@app.route('/mult/')
@app.route('/mult/<user>')
def mult(user=None):
return render_template("user.html", user=user)
@app.route('/shopping')
def shopping():
food = ["cheese", "eggs", "ham"]
return render_template('shopping.html', food=food)
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f7fd1811f21054458022c1c6a7013d152120b8ac | 27,372 | py | Python | deepchem/nn/model_ops.py | hssinejihene/deepchem-1.1.0 | 6efbe6b638b77bb2685ac617f4d6649755c01335 | [
"MIT"
] | null | null | null | deepchem/nn/model_ops.py | hssinejihene/deepchem-1.1.0 | 6efbe6b638b77bb2685ac617f4d6649755c01335 | [
"MIT"
] | null | null | null | deepchem/nn/model_ops.py | hssinejihene/deepchem-1.1.0 | 6efbe6b638b77bb2685ac617f4d6649755c01335 | [
"MIT"
] | null | null | null | """Ops for graph construction.
Large amounts of code borrowed from Keras. Will try to incorporate into
DeepChem properly.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import traceback
import numpy as np
import tensorflow as tf
from tensorflow.python.training import moving_averages
from collections import defaultdict
# TODO(rbharath): What does this line do?
py_all = all
# TODO(rbharath): REMOVE GLOBAL VARS! BREAKS DEEPCHEM STYLE!
_UID_PREFIXES = defaultdict(int)
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = {}
def _to_tensor(x, dtype):
x = tf.convert_to_tensor(x)
if x.dtype != dtype:
x = tf.cast(x, dtype)
return x
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
"""
graph = tf.get_default_graph()
if graph not in _GRAPH_LEARNING_PHASES:
phase = tf.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
def in_train_phase(x, alt):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Returns
-------
Either `x` or `alt` based on `K.learning_phase`.
"""
if learning_phase() is 1:
return x
elif learning_phase() is 0:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(learning_phase(), x, alt)
x._uses_learning_phase = True
return x
def switch(condition, then_expression, else_expression):
"""Switches between two operations
depending on a scalar value (`int` or `bool`).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Parameters
----------
condition: scalar tensor.
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns
-------
The selected tensor.
"""
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = tf.cond(condition, then_expression_fn, else_expression_fn)
return x
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Returns
-------
A tuple length of 3, (normalized_tensor, mean, variance).
"""
mean, var = tf.nn.moments(
x, reduction_axes, shift=None, name=None, keep_dims=False)
if sorted(reduction_axes) == range(ndim(x))[:-1]:
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
for axis in range(get_ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(tf.shape(x)[axis])
target_shape = stack(target_shape)
broadcast_mean = tf.reshape(mean, target_shape)
broadcast_var = tf.reshape(var, target_shape)
broadcast_gamma = tf.reshape(gamma, target_shape)
broadcast_beta = tf.reshape(beta, target_shape)
normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones tensor variable and returns it.
Parameters
----------
shape: Tuple of integers, shape of returned Keras variable.
dtype: Tensorflow dtype
name: String, name of returned Keras variable.
Returns
-------
A Keras variable, filled with `1.0`.
"""
if dtype is None:
dtype = tf.float32
shape = tuple(map(int, shape))
return tf.Variable(
tf.constant_initializer(1., dtype=dtype)(shape), dtype, name)
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Parameters
----------
x: Numpy array.
Returns
-------
The same Numpy array, cast to its new type.
"""
return np.asarray(x, dtype=tf.float32)
def moving_average_update(variable, value, momentum):
try:
return moving_averages.assign_moving_average(
variable, value, momentum, zero_debias=False)
except TypeError:
return moving_averages.assign_moving_average(variable, value, momentum)
def int_shape(x):
"""Returns the shape of a Keras tensor or a Keras variable as a tuple of
integers or None entries.
Arguments
---------
x: Tensor or variable.
Returns
-------
A tuple of integers (or None entries).
"""
shape = x.get_shape()
return tuple([i.__int__() for i in shape])
def get_uid(prefix=''):
"""Provides a unique UID given a string prefix.
Parameters
----------
prefix: string.
Returns
-------
An integer.
"""
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Returns
-------
A tensor.
"""
if axis < 0:
dims = get_ndim(tensors[0])
if dims:
axis = axis % dims
else:
axis = 0
try:
return tf.concat_v2([x for x in tensors], axis)
except AttributeError:
return tf.concat(axis=axis, values=[x for x in tensors])
def _normalize_axis(axis, ndim):
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list):
for i, a in enumerate(axis):
if a is not None and a < 0:
axis[i] = a % ndim
else:
if axis is not None and axis < 0:
axis = axis % ndim
return axis
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Parameters
----------
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If keepdims is False, the rank of the tensor is reduced
by 1 for each entry in axis. If keep_dims is True,
the reduced dimensions are retained with length 1.
Returns
-------
A tensor with the mean of elements of x.
"""
axis = _normalize_axis(axis, get_ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, tf.float32)
return tf.reduce_mean(x, axis=axis, keep_dims=keepdims)
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a ND tensor
with a ND tensor, it reproduces the Theano behavior.
(e.g. (2, 3).(4, 3, 5) = (2, 4, 5))
Parameters
----------
x: Tensor or variable.
y: Tensor or variable.
Returns
-------
A tensor, dot product of x and y.
"""
if get_ndim(x) is not None and (get_ndim(x) > 2 or get_ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), tf.unstack(tf.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(get_ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = tf.reshape(x, [-1, x_shape[-1]])
yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return tf.reshape(
tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
out = tf.matmul(x, y)
return out
def get_ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Parameters
----------
x: Tensor or variable.
Returns
-------
Integer (scalar), number of axes.
"""
dims = x.get_shape()._dims
if dims is not None:
return len(dims)
return None
def get_dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Parameters
----------
x: Tensor or variable.
Returns
-------
String, dtype of `x`.
"""
return x.dtype.name
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Returns
-------
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return tf.clip_by_value(x, min_value, max_value)
def epsilon():
"""Returns the value of the fuzz
factor used in numeric expressions.
Returns
-------
A float.
"""
return 1e-7
def random_uniform_variable(shape,
low,
high,
dtype=tf.float32,
name=None,
seed=None):
"""Instantiates an variable filled with
samples drawn from a uniform distribution and returns it.
Parameters
----------
shape: Tuple of integers, shape of returned variable.
low: Float, lower boundary of the output inteval.
high: Float, upper boundary of the output interval.
dtype: Tensorflow dtype
name: String, name of returned variable.
seed: Integer, random seed.
Returns
-------
A tf.Variable, filled with drawn samples.
"""
shape = tuple(map(int, shape))
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = tf.random_uniform_initializer(
low, high, dtype=dtype, seed=seed)(shape)
return tf.Variable(value, dtype=dtype, name=name)
def random_normal_variable(shape,
mean,
scale,
dtype=tf.float32,
name=None,
seed=None):
"""Instantiates an Keras variable filled with
samples drawn from a normal distribution and returns it.
Parameters
----------
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: Tensorflow dtype
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns
-------
A tf.Variable, filled with drawn samples.
"""
shape = tuple(map(int, shape))
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = tf.random_normal_initializer(
mean, scale, dtype=dtype, seed=seed)(shape)
return tf.Variable(value, dtype=dtype, name=name)
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Parameters
----------
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns
-------
A tensor with maximum values of `x`.
"""
axis = _normalize_axis(axis, get_ndim(x))
return tf.reduce_max(x, axis=axis, keep_dims=keepdims)
def l2_normalize(x, axis):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Parameters
----------
x: input tensor.
axis: axis along which to perform normalization.
Returns
-------
A tensor.
"""
if axis < 0:
axis = axis % len(x.get_shape())
return tf.nn.l2_normalize(x, dim=axis)
def categorical_crossentropy(output, target, from_logits=False):
"""Categorical crossentropy between an output tensor
and a target tensor, where the target is a tensor of the same
shape as the output.
# TODO(rbharath): Should probably swap this over to tf mode.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(
output, axis=len(output.get_shape()) - 1, keep_dims=True)
# manual computation of crossentropy
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1. - epsilon)
return -tf.reduce_sum(
target * tf.log(output), axis=len(output.get_shape()) - 1)
else:
try:
return tf.nn.softmax_cross_entropy_with_logits(
labels=target, logits=output)
except TypeError:
return tf.nn.softmax_cross_entropy_with_logits(
logits=output, labels=target)
def sparse_categorical_crossentropy(output, target, from_logits=False):
"""Categorical crossentropy between an output tensor
and a target tensor, where the target is an integer tensor.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output)
output_shape = output.get_shape()
targets = cast(flatten(target), 'int64')
logits = tf.reshape(output, [-1, int(output_shape[-1])])
try:
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
except TypeError:
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=targets)
if len(output_shape) == 3:
# if our output includes timesteps we need to reshape
return tf.reshape(res, tf.shape(output)[:-1])
else:
return res
def binary_crossentropy(output, target, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
# Arguments
output: A tensor.
target: A tensor with the same shape as `output`.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
# Returns
A tensor.
"""
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# transform back to logits
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output / (1 - output))
try:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
except TypeError:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target)
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Parameters
----------
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If keepdims is False, the rank of the tensor is reduced
by 1. If keepdims is True,
the reduced dimension is retained with length 1.
Returns
-------
A tensor with sum of x.
"""
axis = _normalize_axis(axis, get_ndim(x))
return tf.reduce_sum(x, axis=axis, keep_dims=keepdims)
# TODO(rbharath): Need to rename this. This makes a variable, not just creates
# a tensor. Confusing with tf.zeros...
def zeros(shape, dtype=tf.float32, name=None):
"""Instantiates an all-zeros variable and returns it.
Parameters
----------
shape: Tuple of integers, shape of returned Keras variable
dtype: Tensorflow dtype
name: String, name of returned Keras variable
Returns
-------
A variable (including Keras metadata), filled with `0.0`.
"""
shape = tuple(map(int, shape))
return tf.Variable(
tf.constant_initializer(0., dtype=dtype)(shape), dtype, name)
def cosine_distances(test, support):
"""Computes pairwise cosine distances between provided tensors
Parameters
----------
test: tf.Tensor
Of shape (n_test, n_feat)
support: tf.Tensor
Of shape (n_support, n_feat)
Returns
-------
tf.Tensor:
Of shape (n_test, n_support)
"""
rnorm_test = tf.rsqrt(
tf.reduce_sum(tf.square(test), 1, keep_dims=True)) + 1e-7
rnorm_support = tf.rsqrt(
tf.reduce_sum(tf.square(support), 1, keep_dims=True)) + 1e-7
test_normalized = test * rnorm_test
support_normalized = support * rnorm_support
# Transpose for mul
support_normalized_t = tf.transpose(support_normalized, perm=[1, 0])
g = tf.matmul(test_normalized, support_normalized_t) # Gram matrix
return g
def elu(x, alpha=1.):
"""Exponential linear unit.
Parameters
----------
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of positive section.
Returns
-------
A tensor.
"""
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
def relu(x, alpha=0., max_value=None):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Parameters
----------
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: Saturation threshold.
Returns
-------
A tensor.
"""
if alpha != 0.:
negative_part = tf.nn.relu(-x)
x = tf.nn.relu(x)
if max_value is not None:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns 0. if x < -2.5, 1. if x > 2.5.
In -2.5 <= x <= 2.5, returns 0.2 * x + 0.5.
Parameters
----------
x: A tensor or variable.
Returns
-------
A tensor.
"""
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, one)
return x
def sqrt(x):
"""Element-wise square root.
Parameters
----------
x: input tensor.
Returns
-------
A tensor.
"""
zero = _to_tensor(0., x.dtype.base_dtype)
inf = _to_tensor(np.inf, x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, inf)
return tf.sqrt(x)
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Parameters
----------
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If keepdims is False, the rank of the tensor is reduced
by 1. If keepdims is True,
the reduced dimension is retained with length 1.
Returns
-------
A tensor with the variance of elements of `x`.
"""
axis = _normalize_axis(axis, get_ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, tf.float32)
m = tf.reduce_mean(x, axis=axis, keep_dims=True)
devs_squared = tf.square(x - m)
return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims)
def euclidean_distance(test, support, max_dist_sq=20):
"""Computes pairwise euclidean distances between provided tensors
TODO(rbharath): BROKEN! THIS DOESN'T WORK!
Parameters
----------
test: tf.Tensor
Of shape (n_test, n_feat)
support: tf.Tensor
Of shape (n_support, n_feat)
max_dist_sq: float, optional
Maximum pairwise distance allowed.
Returns
-------
tf.Tensor:
Of shape (n_test, n_support)
"""
test = tf.expand_dims(test, 1)
support = tf.expand_dims(support, 0)
g = -tf.maximum(tf.reduce_sum(tf.square(test - support), 2), max_dist_sq)
return g
def add_bias(tensor, init=None, name=None):
"""Add a bias term to a tensor.
Parameters
----------
tensor: tf.Tensor
Variable tensor.
init: float
Bias initializer. Defaults to zero.
name: str
Name for this op. Defaults to tensor.op.name.
Returns
-------
tf.Tensor
A biased tensor with the same shape as the input tensor.
"""
if init is None:
init = tf.zeros([tensor.get_shape()[-1].value])
with tf.name_scope(name, tensor.op.name, [tensor]):
b = tf.Variable(init, name='b')
return tf.nn.bias_add(tensor, b)
def dropout(tensor, dropout_prob, training=True, training_only=True):
"""Random dropout.
This implementation supports "always-on" dropout (training_only=False), which
can be used to calculate model uncertainty. See Gal and Ghahramani,
http://arxiv.org/abs/1506.02142.
NOTE(user): To simplify the implementation, I have chosen not to reverse
the scaling that occurs in tf.nn.dropout when using dropout during
inference. This shouldn't be an issue since the activations will be scaled
by the same constant in both training and inference. This means that there
are no training-time differences between networks that use dropout during
inference and those that do not.
Parameters
----------
tensor: tf.Tensor
Input tensor.
dropout_prob: float
Float giving dropout probability for weights (NOT keep probability).
training_only: bool
Boolean. If True (standard dropout), apply dropout only
during training. If False, apply dropout during inference as well.
Returns
-------
tf.Tensor:
A tensor with the same shape as the input tensor.
"""
if not dropout_prob:
return tensor # do nothing
keep_prob = 1.0 - dropout_prob
if training or not training_only:
tensor = tf.nn.dropout(tensor, keep_prob)
return tensor
def fully_connected_layer(tensor,
size=None,
weight_init=None,
bias_init=None,
name=None):
"""Fully connected layer.
Parameters
----------
tensor: tf.Tensor
Input tensor.
size: int
Number of output nodes for this layer.
weight_init: float
Weight initializer.
bias_init: float
Bias initializer.
name: str
Name for this op. Defaults to 'fully_connected'.
Returns
-------
tf.Tensor:
A new tensor representing the output of the fully connected layer.
Raises
------
ValueError
If input tensor is not 2D.
"""
if weight_init is None:
num_features = tensor.get_shape()[-1].value
weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
if bias_init is None:
bias_init = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [tensor]):
w = tf.Variable(weight_init, name='w', dtype=tf.float32)
b = tf.Variable(bias_init, name='b', dtype=tf.float32)
return tf.nn.xw_plus_b(tensor, w, b)
def weight_decay(penalty_type, penalty):
"""Add weight decay.
Args:
model: TensorflowGraph.
Returns:
A scalar tensor containing the weight decay cost.
Raises:
NotImplementedError: If an unsupported penalty type is requested.
"""
variables = []
# exclude bias variables
for v in tf.trainable_variables():
if v.get_shape().ndims == 2:
variables.append(v)
with tf.name_scope('weight_decay'):
if penalty_type == 'l1':
cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables])
elif penalty_type == 'l2':
cost = tf.add_n([tf.nn.l2_loss(v) for v in variables])
else:
raise NotImplementedError('Unsupported penalty_type %s' % penalty_type)
cost *= penalty
#tf.scalar_summary('Weight Decay Cost', cost)
return cost
def multitask_logits(features,
num_tasks,
num_classes=2,
weight_init=None,
bias_init=None,
dropout_prob=None,
name=None):
"""Create a logit tensor for each classification task.
Args:
features: A 2D tensor with dimensions batch_size x num_features.
num_tasks: Number of classification tasks.
num_classes: Number of classes for each task.
weight_init: Weight initializer.
bias_init: Bias initializer.
dropout_prob: Float giving dropout probability for weights (NOT keep
probability).
name: Name for this op. Defaults to 'multitask_logits'.
Returns:
A list of logit tensors; one for each classification task.
"""
logits_list = []
with tf.name_scope('multitask_logits'):
for task_idx in range(num_tasks):
with tf.name_scope(name,
('task' + str(task_idx).zfill(len(str(num_tasks)))),
[features]):
logits_list.append(
logits(
features,
num_classes,
weight_init=weight_init,
bias_init=bias_init,
dropout_prob=dropout_prob))
return logits_list
def logits(features,
num_classes=2,
weight_init=None,
bias_init=None,
dropout_prob=None,
name=None):
"""Create a logits tensor for a single classification task.
You almost certainly don't want dropout on there -- it's like randomly setting
the (unscaled) probability of a target class to 0.5.
Args:
features: A 2D tensor with dimensions batch_size x num_features.
num_classes: Number of classes for each task.
weight_init: Weight initializer.
bias_init: Bias initializer.
dropout_prob: Float giving dropout probability for weights (NOT keep
probability).
name: Name for this op.
Returns:
A logits tensor with shape batch_size x num_classes.
"""
with tf.name_scope(name, 'logits', [features]) as name:
return dropout(
fully_connected_layer(
features,
num_classes,
weight_init=weight_init,
bias_init=bias_init,
name=name), dropout_prob)
def softmax_N(tensor, name=None):
"""Apply softmax across last dimension of a tensor.
Args:
tensor: Input tensor.
name: Name for this op. If None, defaults to 'softmax_N'.
Returns:
A tensor with softmax-normalized values on the last dimension.
"""
with tf.name_scope(name, 'softmax_N', [tensor]):
exp_tensor = tf.exp(tensor)
reduction_indices = [tensor.get_shape().ndims - 1]
return tf.div(exp_tensor,
tf.reduce_sum(
exp_tensor, axis=reduction_indices, keep_dims=True))
def optimizer(optimizer="adam", learning_rate=.001, momentum=.9):
"""Create model optimizer.
Parameters
----------
optimizer: str, optional
Name of optimizer
learning_rate: float, optional
Learning rate for algorithm
momentum: float, optional
Momentum rate
Returns
-------
A training Optimizer.
Raises:
NotImplementedError: If an unsupported optimizer is requested.
"""
# TODO(user): gradient clipping (see Minimize)
if optimizer == 'adagrad':
train_op = tf.train.AdagradOptimizer(learning_rate)
elif optimizer == 'adam':
train_op = tf.train.AdamOptimizer(learning_rate)
elif optimizer == 'momentum':
train_op = tf.train.MomentumOptimizer(learning_rate, momentum)
elif optimizer == 'rmsprop':
train_op = tf.train.RMSPropOptimizer(learning_rate, momentum)
elif optimizer == 'sgd':
train_op = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise NotImplementedError('Unsupported optimizer %s' % optimizer)
return train_op
| 27.648485 | 80 | 0.666192 | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import traceback
import numpy as np
import tensorflow as tf
from tensorflow.python.training import moving_averages
from collections import defaultdict
py_all = all
_UID_PREFIXES = defaultdict(int)
_GRAPH_LEARNING_PHASES = {}
def _to_tensor(x, dtype):
x = tf.convert_to_tensor(x)
if x.dtype != dtype:
x = tf.cast(x, dtype)
return x
def learning_phase():
graph = tf.get_default_graph()
if graph not in _GRAPH_LEARNING_PHASES:
phase = tf.placeholder(dtype='bool', name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
def in_train_phase(x, alt):
if learning_phase() is 1:
return x
elif learning_phase() is 0:
return alt
x = switch(learning_phase(), x, alt)
x._uses_learning_phase = True
return x
def switch(condition, then_expression, else_expression):
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = tf.cond(condition, then_expression_fn, else_expression_fn)
return x
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
mean, var = tf.nn.moments(
x, reduction_axes, shift=None, name=None, keep_dims=False)
if sorted(reduction_axes) == range(ndim(x))[:-1]:
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
else:
target_shape = []
for axis in range(get_ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(tf.shape(x)[axis])
target_shape = stack(target_shape)
broadcast_mean = tf.reshape(mean, target_shape)
broadcast_var = tf.reshape(var, target_shape)
broadcast_gamma = tf.reshape(gamma, target_shape)
broadcast_beta = tf.reshape(beta, target_shape)
normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = tf.float32
shape = tuple(map(int, shape))
return tf.Variable(
tf.constant_initializer(1., dtype=dtype)(shape), dtype, name)
def cast_to_floatx(x):
return np.asarray(x, dtype=tf.float32)
def moving_average_update(variable, value, momentum):
try:
return moving_averages.assign_moving_average(
variable, value, momentum, zero_debias=False)
except TypeError:
return moving_averages.assign_moving_average(variable, value, momentum)
def int_shape(x):
shape = x.get_shape()
return tuple([i.__int__() for i in shape])
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def concatenate(tensors, axis=-1):
if axis < 0:
dims = get_ndim(tensors[0])
if dims:
axis = axis % dims
else:
axis = 0
try:
return tf.concat_v2([x for x in tensors], axis)
except AttributeError:
return tf.concat(axis=axis, values=[x for x in tensors])
def _normalize_axis(axis, ndim):
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list):
for i, a in enumerate(axis):
if a is not None and a < 0:
axis[i] = a % ndim
else:
if axis is not None and axis < 0:
axis = axis % ndim
return axis
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, get_ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, tf.float32)
return tf.reduce_mean(x, axis=axis, keep_dims=keepdims)
def dot(x, y):
if get_ndim(x) is not None and (get_ndim(x) > 2 or get_ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), tf.unstack(tf.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(get_ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = tf.reshape(x, [-1, x_shape[-1]])
yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return tf.reshape(
tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
out = tf.matmul(x, y)
return out
def get_ndim(x):
dims = x.get_shape()._dims
if dims is not None:
return len(dims)
return None
def get_dtype(x):
return x.dtype.name
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
min_value = _to_tensor(min_value, x.dtype.base_dtype)
max_value = _to_tensor(max_value, x.dtype.base_dtype)
return tf.clip_by_value(x, min_value, max_value)
def epsilon():
return 1e-7
def random_uniform_variable(shape,
low,
high,
dtype=tf.float32,
name=None,
seed=None):
shape = tuple(map(int, shape))
if seed is None:
seed = np.random.randint(10e8)
value = tf.random_uniform_initializer(
low, high, dtype=dtype, seed=seed)(shape)
return tf.Variable(value, dtype=dtype, name=name)
def random_normal_variable(shape,
mean,
scale,
dtype=tf.float32,
name=None,
seed=None):
shape = tuple(map(int, shape))
if seed is None:
seed = np.random.randint(10e8)
value = tf.random_normal_initializer(
mean, scale, dtype=dtype, seed=seed)(shape)
return tf.Variable(value, dtype=dtype, name=name)
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, get_ndim(x))
return tf.reduce_max(x, axis=axis, keep_dims=keepdims)
def l2_normalize(x, axis):
if axis < 0:
axis = axis % len(x.get_shape())
return tf.nn.l2_normalize(x, dim=axis)
def categorical_crossentropy(output, target, from_logits=False):
if not from_logits:
output /= tf.reduce_sum(
output, axis=len(output.get_shape()) - 1, keep_dims=True)
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1. - epsilon)
return -tf.reduce_sum(
target * tf.log(output), axis=len(output.get_shape()) - 1)
else:
try:
return tf.nn.softmax_cross_entropy_with_logits(
labels=target, logits=output)
except TypeError:
return tf.nn.softmax_cross_entropy_with_logits(
logits=output, labels=target)
def sparse_categorical_crossentropy(output, target, from_logits=False):
if not from_logits:
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output)
output_shape = output.get_shape()
targets = cast(flatten(target), 'int64')
logits = tf.reshape(output, [-1, int(output_shape[-1])])
try:
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
except TypeError:
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=targets)
if len(output_shape) == 3:
return tf.reshape(res, tf.shape(output)[:-1])
else:
return res
def binary_crossentropy(output, target, from_logits=False):
if not from_logits:
epsilon = _to_tensor(_EPSILON, output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon, 1 - epsilon)
output = tf.log(output / (1 - output))
try:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
except TypeError:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=target)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, get_ndim(x))
return tf.reduce_sum(x, axis=axis, keep_dims=keepdims)
def zeros(shape, dtype=tf.float32, name=None):
shape = tuple(map(int, shape))
return tf.Variable(
tf.constant_initializer(0., dtype=dtype)(shape), dtype, name)
def cosine_distances(test, support):
rnorm_test = tf.rsqrt(
tf.reduce_sum(tf.square(test), 1, keep_dims=True)) + 1e-7
rnorm_support = tf.rsqrt(
tf.reduce_sum(tf.square(support), 1, keep_dims=True)) + 1e-7
test_normalized = test * rnorm_test
support_normalized = support * rnorm_support
support_normalized_t = tf.transpose(support_normalized, perm=[1, 0])
g = tf.matmul(test_normalized, support_normalized_t)
return g
def elu(x, alpha=1.):
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = tf.nn.relu(-x)
x = tf.nn.relu(x)
if max_value is not None:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
zero = _to_tensor(0., x.dtype.base_dtype)
one = _to_tensor(1., x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, one)
return x
def sqrt(x):
zero = _to_tensor(0., x.dtype.base_dtype)
inf = _to_tensor(np.inf, x.dtype.base_dtype)
x = tf.clip_by_value(x, zero, inf)
return tf.sqrt(x)
def var(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, get_ndim(x))
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, tf.float32)
m = tf.reduce_mean(x, axis=axis, keep_dims=True)
devs_squared = tf.square(x - m)
return tf.reduce_mean(devs_squared, axis=axis, keep_dims=keepdims)
def euclidean_distance(test, support, max_dist_sq=20):
test = tf.expand_dims(test, 1)
support = tf.expand_dims(support, 0)
g = -tf.maximum(tf.reduce_sum(tf.square(test - support), 2), max_dist_sq)
return g
def add_bias(tensor, init=None, name=None):
if init is None:
init = tf.zeros([tensor.get_shape()[-1].value])
with tf.name_scope(name, tensor.op.name, [tensor]):
b = tf.Variable(init, name='b')
return tf.nn.bias_add(tensor, b)
def dropout(tensor, dropout_prob, training=True, training_only=True):
if not dropout_prob:
return tensor
keep_prob = 1.0 - dropout_prob
if training or not training_only:
tensor = tf.nn.dropout(tensor, keep_prob)
return tensor
def fully_connected_layer(tensor,
size=None,
weight_init=None,
bias_init=None,
name=None):
if weight_init is None:
num_features = tensor.get_shape()[-1].value
weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
if bias_init is None:
bias_init = tf.zeros([size])
with tf.name_scope(name, 'fully_connected', [tensor]):
w = tf.Variable(weight_init, name='w', dtype=tf.float32)
b = tf.Variable(bias_init, name='b', dtype=tf.float32)
return tf.nn.xw_plus_b(tensor, w, b)
def weight_decay(penalty_type, penalty):
variables = []
for v in tf.trainable_variables():
if v.get_shape().ndims == 2:
variables.append(v)
with tf.name_scope('weight_decay'):
if penalty_type == 'l1':
cost = tf.add_n([tf.reduce_sum(tf.abs(v)) for v in variables])
elif penalty_type == 'l2':
cost = tf.add_n([tf.nn.l2_loss(v) for v in variables])
else:
raise NotImplementedError('Unsupported penalty_type %s' % penalty_type)
cost *= penalty
return cost
def multitask_logits(features,
num_tasks,
num_classes=2,
weight_init=None,
bias_init=None,
dropout_prob=None,
name=None):
logits_list = []
with tf.name_scope('multitask_logits'):
for task_idx in range(num_tasks):
with tf.name_scope(name,
('task' + str(task_idx).zfill(len(str(num_tasks)))),
[features]):
logits_list.append(
logits(
features,
num_classes,
weight_init=weight_init,
bias_init=bias_init,
dropout_prob=dropout_prob))
return logits_list
def logits(features,
num_classes=2,
weight_init=None,
bias_init=None,
dropout_prob=None,
name=None):
with tf.name_scope(name, 'logits', [features]) as name:
return dropout(
fully_connected_layer(
features,
num_classes,
weight_init=weight_init,
bias_init=bias_init,
name=name), dropout_prob)
def softmax_N(tensor, name=None):
with tf.name_scope(name, 'softmax_N', [tensor]):
exp_tensor = tf.exp(tensor)
reduction_indices = [tensor.get_shape().ndims - 1]
return tf.div(exp_tensor,
tf.reduce_sum(
exp_tensor, axis=reduction_indices, keep_dims=True))
def optimizer(optimizer="adam", learning_rate=.001, momentum=.9):
if optimizer == 'adagrad':
train_op = tf.train.AdagradOptimizer(learning_rate)
elif optimizer == 'adam':
train_op = tf.train.AdamOptimizer(learning_rate)
elif optimizer == 'momentum':
train_op = tf.train.MomentumOptimizer(learning_rate, momentum)
elif optimizer == 'rmsprop':
train_op = tf.train.RMSPropOptimizer(learning_rate, momentum)
elif optimizer == 'sgd':
train_op = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise NotImplementedError('Unsupported optimizer %s' % optimizer)
return train_op
| true | true |
f7fd182a41dd01b7734166c0b90a9b2327e95a01 | 896 | py | Python | 235-lowest-common-ancestor-of-a-binary-search-tree/235-lowest-common-ancestor-of-a-binary-search-tree.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 235-lowest-common-ancestor-of-a-binary-search-tree/235-lowest-common-ancestor-of-a-binary-search-tree.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 235-lowest-common-ancestor-of-a-binary-search-tree/235-lowest-common-ancestor-of-a-binary-search-tree.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
"""
Time Complexity:
O(log N) Average
O(N) Worst
Space Complexity:
O(log N) Average
O(N) Worst
"""
return self.search(root, p.val, q.val)
def search(self, root, min_val, max_val):
if not root:
return None
if min_val < root.val and max_val < root.val:
# left
return self.search(root.left, min_val, max_val)
if min_val > root.val and max_val > root.val:
# right
return self.search(root.right, min_val, max_val)
return root | 29.866667 | 97 | 0.530134 |
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
return self.search(root, p.val, q.val)
def search(self, root, min_val, max_val):
if not root:
return None
if min_val < root.val and max_val < root.val:
return self.search(root.left, min_val, max_val)
if min_val > root.val and max_val > root.val:
return self.search(root.right, min_val, max_val)
return root | true | true |
f7fd18c02af864706e31f178877e39b514fb78cc | 5,166 | py | Python | dns_check/check.py | chadharvey/integrations-core | 7f97b3f1b10f37c01cd598640145444072cd7aec | [
"BSD-3-Clause"
] | 1 | 2021-06-17T20:22:35.000Z | 2021-06-17T20:22:35.000Z | dns_check/check.py | chadharvey/integrations-core | 7f97b3f1b10f37c01cd598640145444072cd7aec | [
"BSD-3-Clause"
] | null | null | null | dns_check/check.py | chadharvey/integrations-core | 7f97b3f1b10f37c01cd598640145444072cd7aec | [
"BSD-3-Clause"
] | 1 | 2020-02-12T02:15:27.000Z | 2020-02-12T02:15:27.000Z | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import time
# 3p
import dns.resolver
# project
from utils.platform import Platform
from checks.network_checks import NetworkCheck, Status
# These imports are necessary because otherwise dynamic type
# resolution will fail on windows without it.
# See more here: https://github.com/rthalley/dnspython/issues/39.
if Platform.is_win32():
from dns.rdtypes.ANY import * # noqa
from dns.rdtypes.IN import * # noqa
# for tiny time deltas, time.time on Windows reports the same value
# of the clock more than once, causing the computation of response_time
# to be often 0; let's use time.clock that is more precise.
time_func = time.clock
else:
time_func = time.time
class BadConfException(Exception):
pass
class DNSCheck(NetworkCheck):
SERVICE_CHECK_NAME = 'dns.can_resolve'
DEFAULT_TIMEOUT = 5
def __init__(self, name, init_config, agentConfig, instances):
# Now that the DNS check is a Network check, we must provide a `name` for each
# instance before calling NetworkCheck to make backwards compatible with old yaml.
for idx, inst in enumerate(instances):
try:
inst['name'] = inst['name']
except KeyError:
inst['name'] = 'dns-check-%s' % idx
NetworkCheck.__init__(self, name, init_config, agentConfig, instances)
self.default_timeout = init_config.get('default_timeout', self.DEFAULT_TIMEOUT)
def _load_conf(self, instance):
# Fetches the conf
hostname = instance.get('hostname')
if not hostname:
raise BadConfException('A valid "hostname" must be specified')
resolver = dns.resolver.Resolver()
# If a specific DNS server was defined use it, else use the system default
nameserver = instance.get('nameserver')
nameserver_port = instance.get('nameserver_port')
if nameserver is not None:
resolver.nameservers = [nameserver]
if nameserver_port is not None:
resolver.port = nameserver_port
timeout = float(instance.get('timeout', self.default_timeout))
resolver.lifetime = timeout
record_type = instance.get('record_type', 'A')
return hostname, timeout, nameserver, record_type, resolver
def _check(self, instance):
hostname, timeout, nameserver, record_type, resolver = self._load_conf(instance)
# Perform the DNS query, and report its duration as a gauge
response_time = 0
t0 = time_func()
try:
self.log.debug('Querying "{0}" record for hostname "{1}"...'.format(record_type, hostname))
if record_type == "NXDOMAIN":
try:
resolver.query(hostname)
except dns.resolver.NXDOMAIN:
pass
else:
raise AssertionError("Expected an NXDOMAIN, got a result.")
else:
answer = resolver.query(hostname, rdtype=record_type)
assert(answer.rrset.items[0].to_text())
response_time = time_func() - t0
except dns.exception.Timeout:
self.log.error('DNS resolution of {0} timed out'.format(hostname))
return Status.CRITICAL, 'DNS resolution of {0} timed out'.format(hostname)
except Exception:
self.log.exception('DNS resolution of {0} has failed.'.format(hostname))
return Status.CRITICAL, 'DNS resolution of {0} has failed'.format(hostname)
else:
tags = self._get_tags(instance)
if response_time > 0:
self.gauge('dns.response_time', response_time, tags=tags)
self.log.debug('Resolved hostname: {0}'.format(hostname))
return Status.UP, 'UP'
def _get_tags(self, instance):
hostname = instance.get('hostname')
instance_name = instance.get('name', hostname)
record_type = instance.get('record_type', 'A')
custom_tags = instance.get('tags', [])
tags = []
try:
nameserver = instance.get('nameserver') or dns.resolver.Resolver().nameservers[0]
tags.append('nameserver:{0}'.format(nameserver))
except IndexError:
self.log.error('No DNS server was found on this host.')
tags = custom_tags + ['nameserver:{0}'.format(nameserver),
'resolved_hostname:{0}'.format(hostname),
'instance:{0}'.format(instance_name),
'record_type:{0}'.format(record_type)]
return tags
def report_as_service_check(self, sc_name, status, instance, msg=None):
tags = self._get_tags(instance)
instance['skip_event'] = True
if status == Status.UP:
msg = None
self.service_check(self.SERVICE_CHECK_NAME,
NetworkCheck.STATUS_TO_SERVICE_CHECK[status],
tags=tags,
message=msg
)
| 36.638298 | 103 | 0.61595 |
import time
import dns.resolver
from utils.platform import Platform
from checks.network_checks import NetworkCheck, Status
if Platform.is_win32():
from dns.rdtypes.ANY import *
from dns.rdtypes.IN import *
time_func = time.clock
else:
time_func = time.time
class BadConfException(Exception):
pass
class DNSCheck(NetworkCheck):
SERVICE_CHECK_NAME = 'dns.can_resolve'
DEFAULT_TIMEOUT = 5
def __init__(self, name, init_config, agentConfig, instances):
# Now that the DNS check is a Network check, we must provide a `name` for each
# instance before calling NetworkCheck to make backwards compatible with old yaml.
for idx, inst in enumerate(instances):
try:
inst['name'] = inst['name']
except KeyError:
inst['name'] = 'dns-check-%s' % idx
NetworkCheck.__init__(self, name, init_config, agentConfig, instances)
self.default_timeout = init_config.get('default_timeout', self.DEFAULT_TIMEOUT)
def _load_conf(self, instance):
# Fetches the conf
hostname = instance.get('hostname')
if not hostname:
raise BadConfException('A valid "hostname" must be specified')
resolver = dns.resolver.Resolver()
# If a specific DNS server was defined use it, else use the system default
nameserver = instance.get('nameserver')
nameserver_port = instance.get('nameserver_port')
if nameserver is not None:
resolver.nameservers = [nameserver]
if nameserver_port is not None:
resolver.port = nameserver_port
timeout = float(instance.get('timeout', self.default_timeout))
resolver.lifetime = timeout
record_type = instance.get('record_type', 'A')
return hostname, timeout, nameserver, record_type, resolver
def _check(self, instance):
hostname, timeout, nameserver, record_type, resolver = self._load_conf(instance)
# Perform the DNS query, and report its duration as a gauge
response_time = 0
t0 = time_func()
try:
self.log.debug('Querying "{0}" record for hostname "{1}"...'.format(record_type, hostname))
if record_type == "NXDOMAIN":
try:
resolver.query(hostname)
except dns.resolver.NXDOMAIN:
pass
else:
raise AssertionError("Expected an NXDOMAIN, got a result.")
else:
answer = resolver.query(hostname, rdtype=record_type)
assert(answer.rrset.items[0].to_text())
response_time = time_func() - t0
except dns.exception.Timeout:
self.log.error('DNS resolution of {0} timed out'.format(hostname))
return Status.CRITICAL, 'DNS resolution of {0} timed out'.format(hostname)
except Exception:
self.log.exception('DNS resolution of {0} has failed.'.format(hostname))
return Status.CRITICAL, 'DNS resolution of {0} has failed'.format(hostname)
else:
tags = self._get_tags(instance)
if response_time > 0:
self.gauge('dns.response_time', response_time, tags=tags)
self.log.debug('Resolved hostname: {0}'.format(hostname))
return Status.UP, 'UP'
def _get_tags(self, instance):
hostname = instance.get('hostname')
instance_name = instance.get('name', hostname)
record_type = instance.get('record_type', 'A')
custom_tags = instance.get('tags', [])
tags = []
try:
nameserver = instance.get('nameserver') or dns.resolver.Resolver().nameservers[0]
tags.append('nameserver:{0}'.format(nameserver))
except IndexError:
self.log.error('No DNS server was found on this host.')
tags = custom_tags + ['nameserver:{0}'.format(nameserver),
'resolved_hostname:{0}'.format(hostname),
'instance:{0}'.format(instance_name),
'record_type:{0}'.format(record_type)]
return tags
def report_as_service_check(self, sc_name, status, instance, msg=None):
tags = self._get_tags(instance)
instance['skip_event'] = True
if status == Status.UP:
msg = None
self.service_check(self.SERVICE_CHECK_NAME,
NetworkCheck.STATUS_TO_SERVICE_CHECK[status],
tags=tags,
message=msg
)
| true | true |
f7fd18cf61a2824d85c7b91dde34e6e46449ba95 | 14,289 | py | Python | Hyperparameters.py | thiagofigcosta/stock-pred-v2 | a2905a2aaf87f083772c9c416aa755f7cf604319 | [
"MIT"
] | null | null | null | Hyperparameters.py | thiagofigcosta/stock-pred-v2 | a2905a2aaf87f083772c9c416aa755f7cf604319 | [
"MIT"
] | null | null | null | Hyperparameters.py | thiagofigcosta/stock-pred-v2 | a2905a2aaf87f083772c9c416aa755f7cf604319 | [
"MIT"
] | null | null | null | #!/bin/python3
# -*- coding: utf-8 -*-
import hashlib
import math
import json
from Enums import Features
from Utils import Utils
class Hyperparameters:
REGRESSION_OUTPUT_ACTIVATION_FUNCTION='linear'
BINARY_OUTPUT_ACTIVATION_FUNCTION='sigmoid'
def __init__(self,name='',input_features=['Close'],output_feature='Close',index_feature='Date',backwards_samples=20,forward_samples=7,lstm_layers=2,max_epochs=200,patience_epochs_stop=10,patience_epochs_reduce=10,reduce_factor=.1,batch_size=5,stateful=False,dropout_values=[0,0],layer_sizes=[25,15],normalize=True,optimizer='rmsprop',model_metrics=['R2','mean_squared_error','mean_absolute_error','accuracy','cosine_similarity'],loss='mean_squared_error',train_percent=.8,val_percent=.2,amount_companies=1,shuffle=True,activation_functions='tanh',recurrent_activation_functions='sigmoid',bias=True,use_dense_on_output=False,unit_forget_bias=True,go_backwards=False,recurrent_dropout_values=0,binary_classifier=False):
input_features=Utils.parseInputFeaturesForHyperparameters(input_features)
output_feature=Utils.parseOutputFeatureForHyperparameters(output_feature)
index_feature=Utils.parseIndexFeatureForHyperparameters(index_feature)
model_metrics=Utils.parseMetricsForHyperparameters(model_metrics)
loss=Utils.parseLossForHyperparameters(loss)
activation_functions=Utils.parseNodeTypeForHyperparameters(activation_functions)
recurrent_activation_functions=Utils.parseNodeTypeForHyperparameters(recurrent_activation_functions)
optimizer=Utils.parseOptimizerForHyperparameters(optimizer)
self.uuid=None
self.name=name
self.backwards_samples=backwards_samples # [5, 60]
self.forward_samples=forward_samples # [5, 14]
self.lstm_layers=lstm_layers # [1, 4]
self.max_epochs=max_epochs # [10, 200]
self.patience_epochs_stop=patience_epochs_stop # [10, 20]
self.patience_epochs_reduce=patience_epochs_reduce # [10, 20]
self.reduce_factor=reduce_factor # [0.0, 0.5]
self.batch_size=batch_size # [1, 22]
self.stateful=stateful # [False, True] or [0, 1]
self.dropout_values=dropout_values # [0.0, 1.0] * lstm_layers
self.layer_sizes=layer_sizes # [10, 80] * lstm_layers (two formulas to approximate that)
self.normalize=normalize # [False, True] or [0, 1] - or always True
self.optimizer=optimizer # 'adam' or 'sgd' or 'rmsprop'
self.model_metrics=model_metrics # always ['mean_squared_error','mean_absolute_error','accuracy','cosine_similarity']
self.loss=loss # always 'mean_squared_error'
self.activation_functions=activation_functions # ['tanh','relu','sigmoid'] * lstm_layers
self.recurrent_activation_functions=recurrent_activation_functions # ['tanh','relu','sigmoid'] * lstm_layers
self.train_percent=train_percent # [0.6, 0.9] - depends on dataset size
self.val_percent=val_percent # [0.1, 0.3] - depends on dataset size
self.amount_companies=amount_companies # depends on the problem to be solved
self.input_features=list(dict.fromkeys(input_features)) # depends on the features, but great chance of being ['Close']
self.output_feature=output_feature # depends on the features, but super huge great hyper chance of being 'Close'
self.index_feature=index_feature # depends on the features, but super huge great hyper chance of being 'Date'
self.shuffle=shuffle # [False, True] or [0, 1]
self.bias=bias # [False, True] * lstm_layers
self.use_dense_on_output=use_dense_on_output # [False, True]
self.unit_forget_bias=unit_forget_bias # [False, True] * lstm_layers
self.go_backwards=go_backwards # [False, True] * lstm_layers
self.recurrent_dropout_values=recurrent_dropout_values # [0.0, 1.0] * lstm_layers
self.binary_classifier=binary_classifier
if type(self.bias)==bool:
self.bias=[self.bias]*self.lstm_layers
if type(self.unit_forget_bias)==bool:
self.unit_forget_bias=[self.unit_forget_bias]*self.lstm_layers
if type(self.go_backwards)==bool:
self.go_backwards=[self.go_backwards]*self.lstm_layers
if type(self.dropout_values) in (int,float):
self.dropout_values=[self.dropout_values]*self.lstm_layers
if type(self.recurrent_dropout_values) in (int,float):
self.recurrent_dropout_values=[self.recurrent_dropout_values]*self.lstm_layers
if type(self.layer_sizes)==int:
self.layer_sizes=[self.layer_sizes]*self.lstm_layers
if type(self.activation_functions)==str:
self.activation_functions=[self.activation_functions]*self.lstm_layers
if type(self.recurrent_activation_functions)==str:
self.recurrent_activation_functions=[self.recurrent_activation_functions]*self.lstm_layers
if len(self.dropout_values)!=self.lstm_layers:
raise Exception('Wrong dropout_values array size, should be {} instead of {}'.format(self.lstm_layers,len(self.dropout_values)))
if len(self.layer_sizes)!=self.lstm_layers and not (self.layer_sizes[0]==backwards_samples and len(self.layer_sizes)==self.lstm_layers+1):
raise Exception('Wrong layer_sizes array size, should be {}'.format(self.lstm_layers))
if len(self.activation_functions)!=self.lstm_layers:
raise Exception('Wrong activation_functions array size, should be {}'.format(self.lstm_layers))
if len(self.recurrent_activation_functions)!=self.lstm_layers:
raise Exception('Wrong recurrent_activation_functions array size, should be {}'.format(self.lstm_layers))
if len(self.bias)!=self.lstm_layers:
raise Exception('Wrong bias array size, should be {}'.format(self.lstm_layers))
if len(self.unit_forget_bias)!=self.lstm_layers:
raise Exception('Wrong unit_forget_bias array size, should be {}'.format(self.lstm_layers))
if len(self.go_backwards)!=self.lstm_layers:
raise Exception('Wrong go_backwards array size, should be {}'.format(self.lstm_layers))
if len(self.recurrent_dropout_values)!=self.lstm_layers:
raise Exception('Wrong recurrent_dropout_values array size, should be {}'.format(self.lstm_layers))
if len(self.input_features)>1 and self.amount_companies>1:
raise Exception('Only input_features or amount_companies must be greater than 1')
if self.val_percent>1 or self.train_percent>1 or self.val_percent<0 or self.train_percent<0:
raise Exception('Train + validation percent must be smaller than 1 and bigger than 0')
# constraints
if self.batch_size%2!=0:
self.batch_size+=1 # only even batch_sizes
# patiences cannot be higher than epochs
self.patience_epochs_stop=max(self.patience_epochs_stop,self.max_epochs)
self.patience_epochs_reduce=max(self.patience_epochs_reduce,self.max_epochs)
if self.stateful and (self.batch_size is None or self.batch_size == 0):
self.batch_size=1 # batch size must be one for stateful
if len(self.layer_sizes)==self.lstm_layers:
self.layer_sizes.insert(0,self.backwards_samples)
self.genAndSetUuid()
def copy(self):
name=self.name
backwards_samples=self.backwards_samples
forward_samples=self.forward_samples
lstm_layers=self.lstm_layers
max_epochs=self.max_epochs
patience_epochs_stop=self.patience_epochs_stop
patience_epochs_reduce=self.patience_epochs_reduce
reduce_factor=self.reduce_factor
batch_size=self.batch_size
stateful=self.stateful
dropout_values=self.dropout_values.copy()
layer_sizes=self.layer_sizes.copy()
activation_functions=self.activation_functions.copy()
recurrent_activation_functions=self.recurrent_activation_functions.copy()
bias=self.bias.copy()
normalize=self.normalize
optimizer=self.optimizer
model_metrics=self.model_metrics.copy()
loss=self.loss
train_percent=self.train_percent
val_percent=self.val_percent
amount_companies=self.amount_companies
input_features=self.input_features.copy()
output_feature=self.output_feature
index_feature=self.index_feature
shuffle=self.shuffle
use_dense_on_output=self.use_dense_on_output
unit_forget_bias=self.unit_forget_bias.copy()
go_backwards=self.go_backwards.copy()
binary_classifier=self.binary_classifier
recurrent_dropout_values=self.recurrent_dropout_values.copy()
new_hyperparams=Hyperparameters(name=name,input_features=input_features,output_feature=output_feature,index_feature=index_feature,backwards_samples=backwards_samples,forward_samples=forward_samples,lstm_layers=lstm_layers,max_epochs=max_epochs,patience_epochs_stop=patience_epochs_stop,patience_epochs_reduce=patience_epochs_reduce,reduce_factor=reduce_factor,batch_size=batch_size,stateful=stateful,dropout_values=dropout_values,layer_sizes=layer_sizes,normalize=normalize,optimizer=optimizer,model_metrics=model_metrics,loss=loss,train_percent=train_percent,val_percent=val_percent,amount_companies=amount_companies,shuffle=shuffle,activation_functions=activation_functions,recurrent_activation_functions=recurrent_activation_functions,bias=bias,use_dense_on_output=use_dense_on_output,unit_forget_bias=unit_forget_bias,go_backwards=go_backwards,recurrent_dropout_values=recurrent_dropout_values,binary_classifier=binary_classifier)
return new_hyperparams
def toString(self):
string=''
string+='name: {}'.format(self.name)+', '
string+='use_dense_on_output: {}'.format(self.use_dense_on_output)+', '
string+='backwards_samples: {}'.format(self.backwards_samples)+', '
string+='forward_samples: {}'.format(self.forward_samples)+', '
string+='lstm_layers: {}'.format(self.lstm_layers)+', '
string+='max_epochs: {}'.format(self.max_epochs)+', '
string+='patience_epochs_stop: {}'.format(self.patience_epochs_stop)+', '
string+='patience_epochs_reduce: {}'.format(self.patience_epochs_reduce)+', '
string+='reduce_factor: {}'.format(self.reduce_factor)+', '
string+='batch_size: {}'.format(self.batch_size)+', '
string+='stateful: {}'.format(self.stateful)+', '
string+='dropout_values: {}'.format(self.dropout_values)+', '
string+='layer_sizes: {}'.format(self.layer_sizes)+', '
string+='activation_functions: {}'.format(self.activation_functions)+', '
string+='recurrent_activation_functions: {}'.format(self.recurrent_activation_functions)+', '
string+='unit_forget_bias: {}'.format(self.unit_forget_bias)+', '
string+='go_backwards: {}'.format(self.go_backwards)+', '
string+='recurrent_dropout_values: {}'.format(self.recurrent_dropout_values)+', '
string+='bias: {}'.format(self.bias)+', '
string+='normalize: {}'.format(self.normalize)+', '
string+='optimizer: {}'.format(self.optimizer)+', '
string+='model_metrics: {}'.format(self.model_metrics)+', '
string+='loss: {}'.format(self.loss)+', '
string+='train_percent: {}'.format(self.train_percent)+', '
string+='val_percent: {}'.format(self.val_percent)+', '
string+='amount_companies: {}'.format(self.amount_companies)+', '
string+='input_features: {}'.format(self.input_features)+', '
string+='output_feature: {}'.format(self.output_feature)+', '
string+='index_feature: {}'.format(self.index_feature)+', '
string+='shuffle: {}'.format(self.shuffle)
return string
def genAndSetUuid(self,low_resolution=False):
self.uuid=self.genAndGetUuid(low_resolution=low_resolution)
def genAndGetUuid(self,low_resolution=False):
to_hash=self.toString().encode('utf-8')
if low_resolution:
hash_object=hashlib.md5(to_hash)
else:
hash_object=hashlib.sha256(to_hash)
return hash_object.hexdigest()
@staticmethod
def jsonDecoder(obj):
if '__type__' in obj and obj['__type__'] == 'Hyperparameters':
return Hyperparameters(name=obj['name'],input_features=obj['input_features'],output_feature=obj['output_feature'],index_feature=obj['index_feature'],backwards_samples=obj['backwards_samples'],forward_samples=obj['forward_samples'],lstm_layers=obj['lstm_layers'],max_epochs=obj['max_epochs'],patience_epochs_stop=obj['patience_epochs_stop'],patience_epochs_reduce=obj['patience_epochs_reduce'],reduce_factor=obj['reduce_factor'],batch_size=obj['batch_size'],stateful=obj['stateful'],dropout_values=obj['dropout_values'],layer_sizes=obj['layer_sizes'],normalize=obj['normalize'],optimizer=obj['optimizer'],model_metrics=obj['model_metrics'],loss=obj['loss'],train_percent=obj['train_percent'],val_percent=obj['val_percent'],amount_companies=obj['amount_companies'],shuffle=obj['shuffle'],activation_functions=obj['activation_functions'],recurrent_activation_functions=obj['recurrent_activation_functions'],bias=obj['bias'],use_dense_on_output=obj['use_dense_on_output'],unit_forget_bias=obj['unit_forget_bias'],go_backwards=obj['go_backwards'],recurrent_dropout_values=obj['recurrent_dropout_values'],binary_classifier=obj['binary_classifier'])
return obj
@staticmethod
def loadJson(path):
with open(path, 'r') as fp :
return json.load(fp,object_hook=Hyperparameters.jsonDecoder)
def saveJson(self,path):
hyperparameters_dict=self.__dict__
hyperparameters_dict['__type__']='Hyperparameters'
Utils.saveJson(hyperparameters_dict,path)
@staticmethod
def estimateLayerOutputSize(layer_input_size,network_output_size,train_data_size=0,a=2,second_formula=False):
if not second_formula:
return int(math.ceil(train_data_size/(a*(layer_input_size+network_output_size))))
else:
return int(math.ceil(2/3*(layer_input_size+network_output_size)))
def setName(self,name):
self.name=name
self.genAndSetUuid()
@staticmethod
def valueToClass(value):
threshold=.5
return 1 if float(value)>threshold else 0
@staticmethod
def getFeatureGroups():
return [
[Features.OC,Features.OH,Features.OL,Features.CH,Features.CL,Features.LH,Features.VOLUME,Features.LOG_RETURN,Features.FAST_EXP_MOVING_AVG,Features.SLOW_EXP_MOVING_AVG],
[Features.OC,Features.OH,Features.OL,Features.CH,Features.CL,Features.LH,Features.VOLUME,Features.LOG_RETURN,Features.FAST_EXP_MOVING_AVG,Features.SLOW_EXP_MOVING_AVG,Features.ADJ_CLOSE,Features.FAST_MOVING_AVG,Features.SLOW_MOVING_AVG],
[],
[Features.OPEN,Features.HIGH,Features.LOW,Features.ADJ_CLOSE,Features.VOLUME],
[Features.OPEN,Features.HIGH,Features.LOW,Features.ADJ_CLOSE,Features.VOLUME,Features.OC,Features.OH,Features.OL,Features.CH,Features.CL,Features.LH,Features.FAST_EXP_MOVING_AVG,Features.SLOW_EXP_MOVING_AVG,Features.LOG_RETURN,Features.FAST_MOVING_AVG,Features.SLOW_MOVING_AVG]
]
| 62.39738 | 1,146 | 0.78284 |
import hashlib
import math
import json
from Enums import Features
from Utils import Utils
class Hyperparameters:
REGRESSION_OUTPUT_ACTIVATION_FUNCTION='linear'
BINARY_OUTPUT_ACTIVATION_FUNCTION='sigmoid'
def __init__(self,name='',input_features=['Close'],output_feature='Close',index_feature='Date',backwards_samples=20,forward_samples=7,lstm_layers=2,max_epochs=200,patience_epochs_stop=10,patience_epochs_reduce=10,reduce_factor=.1,batch_size=5,stateful=False,dropout_values=[0,0],layer_sizes=[25,15],normalize=True,optimizer='rmsprop',model_metrics=['R2','mean_squared_error','mean_absolute_error','accuracy','cosine_similarity'],loss='mean_squared_error',train_percent=.8,val_percent=.2,amount_companies=1,shuffle=True,activation_functions='tanh',recurrent_activation_functions='sigmoid',bias=True,use_dense_on_output=False,unit_forget_bias=True,go_backwards=False,recurrent_dropout_values=0,binary_classifier=False):
input_features=Utils.parseInputFeaturesForHyperparameters(input_features)
output_feature=Utils.parseOutputFeatureForHyperparameters(output_feature)
index_feature=Utils.parseIndexFeatureForHyperparameters(index_feature)
model_metrics=Utils.parseMetricsForHyperparameters(model_metrics)
loss=Utils.parseLossForHyperparameters(loss)
activation_functions=Utils.parseNodeTypeForHyperparameters(activation_functions)
recurrent_activation_functions=Utils.parseNodeTypeForHyperparameters(recurrent_activation_functions)
optimizer=Utils.parseOptimizerForHyperparameters(optimizer)
self.uuid=None
self.name=name
self.backwards_samples=backwards_samples
self.forward_samples=forward_samples
self.lstm_layers=lstm_layers
self.max_epochs=max_epochs
self.patience_epochs_stop=patience_epochs_stop
self.patience_epochs_reduce=patience_epochs_reduce
self.reduce_factor=reduce_factor
self.batch_size=batch_size
self.stateful=stateful
self.dropout_values=dropout_values
self.layer_sizes=layer_sizes
self.normalize=normalize
self.optimizer=optimizer
self.model_metrics=model_metrics
self.loss=loss
self.activation_functions=activation_functions
self.recurrent_activation_functions=recurrent_activation_functions
self.train_percent=train_percent
self.val_percent=val_percent
self.amount_companies=amount_companies
self.input_features=list(dict.fromkeys(input_features))
self.output_feature=output_feature
self.index_feature=index_feature
self.shuffle=shuffle
self.bias=bias
self.use_dense_on_output=use_dense_on_output
self.unit_forget_bias=unit_forget_bias
self.go_backwards=go_backwards
self.recurrent_dropout_values=recurrent_dropout_values
self.binary_classifier=binary_classifier
if type(self.bias)==bool:
self.bias=[self.bias]*self.lstm_layers
if type(self.unit_forget_bias)==bool:
self.unit_forget_bias=[self.unit_forget_bias]*self.lstm_layers
if type(self.go_backwards)==bool:
self.go_backwards=[self.go_backwards]*self.lstm_layers
if type(self.dropout_values) in (int,float):
self.dropout_values=[self.dropout_values]*self.lstm_layers
if type(self.recurrent_dropout_values) in (int,float):
self.recurrent_dropout_values=[self.recurrent_dropout_values]*self.lstm_layers
if type(self.layer_sizes)==int:
self.layer_sizes=[self.layer_sizes]*self.lstm_layers
if type(self.activation_functions)==str:
self.activation_functions=[self.activation_functions]*self.lstm_layers
if type(self.recurrent_activation_functions)==str:
self.recurrent_activation_functions=[self.recurrent_activation_functions]*self.lstm_layers
if len(self.dropout_values)!=self.lstm_layers:
raise Exception('Wrong dropout_values array size, should be {} instead of {}'.format(self.lstm_layers,len(self.dropout_values)))
if len(self.layer_sizes)!=self.lstm_layers and not (self.layer_sizes[0]==backwards_samples and len(self.layer_sizes)==self.lstm_layers+1):
raise Exception('Wrong layer_sizes array size, should be {}'.format(self.lstm_layers))
if len(self.activation_functions)!=self.lstm_layers:
raise Exception('Wrong activation_functions array size, should be {}'.format(self.lstm_layers))
if len(self.recurrent_activation_functions)!=self.lstm_layers:
raise Exception('Wrong recurrent_activation_functions array size, should be {}'.format(self.lstm_layers))
if len(self.bias)!=self.lstm_layers:
raise Exception('Wrong bias array size, should be {}'.format(self.lstm_layers))
if len(self.unit_forget_bias)!=self.lstm_layers:
raise Exception('Wrong unit_forget_bias array size, should be {}'.format(self.lstm_layers))
if len(self.go_backwards)!=self.lstm_layers:
raise Exception('Wrong go_backwards array size, should be {}'.format(self.lstm_layers))
if len(self.recurrent_dropout_values)!=self.lstm_layers:
raise Exception('Wrong recurrent_dropout_values array size, should be {}'.format(self.lstm_layers))
if len(self.input_features)>1 and self.amount_companies>1:
raise Exception('Only input_features or amount_companies must be greater than 1')
if self.val_percent>1 or self.train_percent>1 or self.val_percent<0 or self.train_percent<0:
raise Exception('Train + validation percent must be smaller than 1 and bigger than 0')
if self.batch_size%2!=0:
self.batch_size+=1
self.patience_epochs_stop=max(self.patience_epochs_stop,self.max_epochs)
self.patience_epochs_reduce=max(self.patience_epochs_reduce,self.max_epochs)
if self.stateful and (self.batch_size is None or self.batch_size == 0):
self.batch_size=1
if len(self.layer_sizes)==self.lstm_layers:
self.layer_sizes.insert(0,self.backwards_samples)
self.genAndSetUuid()
def copy(self):
name=self.name
backwards_samples=self.backwards_samples
forward_samples=self.forward_samples
lstm_layers=self.lstm_layers
max_epochs=self.max_epochs
patience_epochs_stop=self.patience_epochs_stop
patience_epochs_reduce=self.patience_epochs_reduce
reduce_factor=self.reduce_factor
batch_size=self.batch_size
stateful=self.stateful
dropout_values=self.dropout_values.copy()
layer_sizes=self.layer_sizes.copy()
activation_functions=self.activation_functions.copy()
recurrent_activation_functions=self.recurrent_activation_functions.copy()
bias=self.bias.copy()
normalize=self.normalize
optimizer=self.optimizer
model_metrics=self.model_metrics.copy()
loss=self.loss
train_percent=self.train_percent
val_percent=self.val_percent
amount_companies=self.amount_companies
input_features=self.input_features.copy()
output_feature=self.output_feature
index_feature=self.index_feature
shuffle=self.shuffle
use_dense_on_output=self.use_dense_on_output
unit_forget_bias=self.unit_forget_bias.copy()
go_backwards=self.go_backwards.copy()
binary_classifier=self.binary_classifier
recurrent_dropout_values=self.recurrent_dropout_values.copy()
new_hyperparams=Hyperparameters(name=name,input_features=input_features,output_feature=output_feature,index_feature=index_feature,backwards_samples=backwards_samples,forward_samples=forward_samples,lstm_layers=lstm_layers,max_epochs=max_epochs,patience_epochs_stop=patience_epochs_stop,patience_epochs_reduce=patience_epochs_reduce,reduce_factor=reduce_factor,batch_size=batch_size,stateful=stateful,dropout_values=dropout_values,layer_sizes=layer_sizes,normalize=normalize,optimizer=optimizer,model_metrics=model_metrics,loss=loss,train_percent=train_percent,val_percent=val_percent,amount_companies=amount_companies,shuffle=shuffle,activation_functions=activation_functions,recurrent_activation_functions=recurrent_activation_functions,bias=bias,use_dense_on_output=use_dense_on_output,unit_forget_bias=unit_forget_bias,go_backwards=go_backwards,recurrent_dropout_values=recurrent_dropout_values,binary_classifier=binary_classifier)
return new_hyperparams
def toString(self):
string=''
string+='name: {}'.format(self.name)+', '
string+='use_dense_on_output: {}'.format(self.use_dense_on_output)+', '
string+='backwards_samples: {}'.format(self.backwards_samples)+', '
string+='forward_samples: {}'.format(self.forward_samples)+', '
string+='lstm_layers: {}'.format(self.lstm_layers)+', '
string+='max_epochs: {}'.format(self.max_epochs)+', '
string+='patience_epochs_stop: {}'.format(self.patience_epochs_stop)+', '
string+='patience_epochs_reduce: {}'.format(self.patience_epochs_reduce)+', '
string+='reduce_factor: {}'.format(self.reduce_factor)+', '
string+='batch_size: {}'.format(self.batch_size)+', '
string+='stateful: {}'.format(self.stateful)+', '
string+='dropout_values: {}'.format(self.dropout_values)+', '
string+='layer_sizes: {}'.format(self.layer_sizes)+', '
string+='activation_functions: {}'.format(self.activation_functions)+', '
string+='recurrent_activation_functions: {}'.format(self.recurrent_activation_functions)+', '
string+='unit_forget_bias: {}'.format(self.unit_forget_bias)+', '
string+='go_backwards: {}'.format(self.go_backwards)+', '
string+='recurrent_dropout_values: {}'.format(self.recurrent_dropout_values)+', '
string+='bias: {}'.format(self.bias)+', '
string+='normalize: {}'.format(self.normalize)+', '
string+='optimizer: {}'.format(self.optimizer)+', '
string+='model_metrics: {}'.format(self.model_metrics)+', '
string+='loss: {}'.format(self.loss)+', '
string+='train_percent: {}'.format(self.train_percent)+', '
string+='val_percent: {}'.format(self.val_percent)+', '
string+='amount_companies: {}'.format(self.amount_companies)+', '
string+='input_features: {}'.format(self.input_features)+', '
string+='output_feature: {}'.format(self.output_feature)+', '
string+='index_feature: {}'.format(self.index_feature)+', '
string+='shuffle: {}'.format(self.shuffle)
return string
def genAndSetUuid(self,low_resolution=False):
self.uuid=self.genAndGetUuid(low_resolution=low_resolution)
def genAndGetUuid(self,low_resolution=False):
to_hash=self.toString().encode('utf-8')
if low_resolution:
hash_object=hashlib.md5(to_hash)
else:
hash_object=hashlib.sha256(to_hash)
return hash_object.hexdigest()
@staticmethod
def jsonDecoder(obj):
if '__type__' in obj and obj['__type__'] == 'Hyperparameters':
return Hyperparameters(name=obj['name'],input_features=obj['input_features'],output_feature=obj['output_feature'],index_feature=obj['index_feature'],backwards_samples=obj['backwards_samples'],forward_samples=obj['forward_samples'],lstm_layers=obj['lstm_layers'],max_epochs=obj['max_epochs'],patience_epochs_stop=obj['patience_epochs_stop'],patience_epochs_reduce=obj['patience_epochs_reduce'],reduce_factor=obj['reduce_factor'],batch_size=obj['batch_size'],stateful=obj['stateful'],dropout_values=obj['dropout_values'],layer_sizes=obj['layer_sizes'],normalize=obj['normalize'],optimizer=obj['optimizer'],model_metrics=obj['model_metrics'],loss=obj['loss'],train_percent=obj['train_percent'],val_percent=obj['val_percent'],amount_companies=obj['amount_companies'],shuffle=obj['shuffle'],activation_functions=obj['activation_functions'],recurrent_activation_functions=obj['recurrent_activation_functions'],bias=obj['bias'],use_dense_on_output=obj['use_dense_on_output'],unit_forget_bias=obj['unit_forget_bias'],go_backwards=obj['go_backwards'],recurrent_dropout_values=obj['recurrent_dropout_values'],binary_classifier=obj['binary_classifier'])
return obj
@staticmethod
def loadJson(path):
with open(path, 'r') as fp :
return json.load(fp,object_hook=Hyperparameters.jsonDecoder)
def saveJson(self,path):
hyperparameters_dict=self.__dict__
hyperparameters_dict['__type__']='Hyperparameters'
Utils.saveJson(hyperparameters_dict,path)
@staticmethod
def estimateLayerOutputSize(layer_input_size,network_output_size,train_data_size=0,a=2,second_formula=False):
if not second_formula:
return int(math.ceil(train_data_size/(a*(layer_input_size+network_output_size))))
else:
return int(math.ceil(2/3*(layer_input_size+network_output_size)))
def setName(self,name):
self.name=name
self.genAndSetUuid()
@staticmethod
def valueToClass(value):
threshold=.5
return 1 if float(value)>threshold else 0
@staticmethod
def getFeatureGroups():
return [
[Features.OC,Features.OH,Features.OL,Features.CH,Features.CL,Features.LH,Features.VOLUME,Features.LOG_RETURN,Features.FAST_EXP_MOVING_AVG,Features.SLOW_EXP_MOVING_AVG],
[Features.OC,Features.OH,Features.OL,Features.CH,Features.CL,Features.LH,Features.VOLUME,Features.LOG_RETURN,Features.FAST_EXP_MOVING_AVG,Features.SLOW_EXP_MOVING_AVG,Features.ADJ_CLOSE,Features.FAST_MOVING_AVG,Features.SLOW_MOVING_AVG],
[],
[Features.OPEN,Features.HIGH,Features.LOW,Features.ADJ_CLOSE,Features.VOLUME],
[Features.OPEN,Features.HIGH,Features.LOW,Features.ADJ_CLOSE,Features.VOLUME,Features.OC,Features.OH,Features.OL,Features.CH,Features.CL,Features.LH,Features.FAST_EXP_MOVING_AVG,Features.SLOW_EXP_MOVING_AVG,Features.LOG_RETURN,Features.FAST_MOVING_AVG,Features.SLOW_MOVING_AVG]
]
| true | true |
f7fd1a7c74e897ac68aa7ba5df6d5a96e7686531 | 5,979 | py | Python | backbones/aggregator/ECAPA-TDNN.py | gzhu06/TDspkr-mismatch-study | 1106a988e15a111646981c5b6fb30219d1ff6e8a | [
"MIT"
] | 3 | 2021-11-13T15:45:22.000Z | 2022-01-18T00:48:45.000Z | backbones/aggregator/ECAPA-TDNN.py | gzhu06/TDspkr-mismatch-study | 1106a988e15a111646981c5b6fb30219d1ff6e8a | [
"MIT"
] | null | null | null | backbones/aggregator/ECAPA-TDNN.py | gzhu06/TDspkr-mismatch-study | 1106a988e15a111646981c5b6fb30219d1ff6e8a | [
"MIT"
] | null | null | null | '''
Reference: https://github.com/lawlict/ECAPA-TDNN
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
''' Res2Conv1d + BatchNorm1d + ReLU
'''
class Res2Conv1dReluBn(nn.Module):
'''
in_channels == out_channels == channels
'''
def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False, scale=4):
super().__init__()
assert channels % scale == 0, "{} % {} != 0".format(channels, scale)
self.scale = scale
self.width = channels // scale
self.nums = scale if scale == 1 else scale - 1
self.convs = []
self.bns = []
for i in range(self.nums):
self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))
self.bns.append(nn.BatchNorm1d(self.width))
self.convs = nn.ModuleList(self.convs)
self.bns = nn.ModuleList(self.bns)
def forward(self, x):
out = []
spx = torch.split(x, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
# Order: conv -> relu -> bn
sp = self.convs[i](sp)
sp = self.bns[i](F.relu(sp))
out.append(sp)
if self.scale != 1:
out.append(spx[self.nums])
out = torch.cat(out, dim=1)
return out
''' Conv1d + BatchNorm1d + ReLU
'''
class Conv1dReluBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
self.bn = nn.BatchNorm1d(out_channels)
def forward(self, x):
return self.bn(F.relu(self.conv(x)))
''' The SE connection of 1D case.
'''
class SE_Connect(nn.Module):
def __init__(self, channels, s=2):
super().__init__()
assert channels % s == 0, "{} % {} != 0".format(channels, s)
self.linear1 = nn.Linear(channels, channels // s)
self.linear2 = nn.Linear(channels // s, channels)
def forward(self, x):
out = x.mean(dim=2)
out = F.relu(self.linear1(out))
out = torch.sigmoid(self.linear2(out))
out = x * out.unsqueeze(2)
return out
''' SE-Res2Block.
Note: residual connection is implemented in the ECAPA_TDNN model, not here.
'''
def SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale):
return nn.Sequential(
Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),
Res2Conv1dReluBn(channels, kernel_size, stride, padding, dilation, scale=scale),
Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),
SE_Connect(channels)
)
''' Attentive weighted mean and standard deviation pooling.
'''
class AttentiveStatsPool(nn.Module):
def __init__(self, in_dim, bottleneck_dim):
super().__init__()
# Use Conv1d with stride == 1 rather than Linear, then we don't need to transpose inputs.
self.linear1 = nn.Conv1d(in_dim, bottleneck_dim, kernel_size=1) # equals W and b in the paper
self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1) # equals V and k in the paper
def forward(self, x):
# DON'T use ReLU here! In experiments, I find ReLU hard to converge.
alpha = torch.tanh(self.linear1(x))
alpha = torch.softmax(self.linear2(alpha), dim=2)
mean = torch.sum(alpha * x, dim=2)
residuals = torch.sum(alpha * x ** 2, dim=2) - mean ** 2
std = torch.sqrt(residuals.clamp(min=1e-9))
return torch.cat([mean, std], dim=1)
''' Implementation of
"ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification".
Note that we DON'T concatenate the last frame-wise layer with non-weighted mean and standard deviation,
because it brings little improvment but significantly increases model parameters.
As a result, this implementation basically equals the A.2 of Table 2 in the paper.
'''
class ECAPA_TDNN(nn.Module):
def __init__(self, in_channels=80, channels=512, embd_dim=192):
super().__init__()
self.layer1 = Conv1dReluBn(in_channels, channels, kernel_size=5, padding=2)
self.layer2 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=2, dilation=2, scale=8)
self.layer3 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=3, dilation=3, scale=8)
self.layer4 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=4, dilation=4, scale=8)
cat_channels = channels * 3
self.conv = nn.Conv1d(cat_channels, 1536, kernel_size=1)
self.pooling = AttentiveStatsPool(1536, 128)
self.bn1 = nn.BatchNorm1d(3072)
self.linear = nn.Linear(3072, embd_dim)
self.bn2 = nn.BatchNorm1d(embd_dim)
def forward(self, x):
# x = x.transpose(1, 2)
out1 = self.layer1(x)
out2 = self.layer2(out1) + out1
out3 = self.layer3(out1 + out2) + out1 + out2
out4 = self.layer4(out1 + out2 + out3) + out1 + out2 + out3
out = torch.cat([out2, out3, out4], dim=1)
out = F.relu(self.conv(out))
out = self.bn1(self.pooling(out))
out = self.bn2(self.linear(out))
return out
class architecture(nn.Module):
def __init__(self, embed_dim=512):
super(architecture, self).__init__()
self.tdnn_aggregator = ECAPA_TDNN(in_channels=512, channels=512, embd_dim=512)
def forward(self, x):
out = self.tdnn_aggregator(x)
return out
if __name__ == '__main__':
# Input size: batch_size * seq_len * feat_dim
x = torch.zeros(2, 200, 80)
model = ECAPA_TDNN(in_channels=80, channels=512, embd_dim=192)
out = model(x)
print(model)
print(out.shape) # should be [2, 192] | 38.574194 | 115 | 0.627362 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Res2Conv1dReluBn(nn.Module):
def __init__(self, channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False, scale=4):
super().__init__()
assert channels % scale == 0, "{} % {} != 0".format(channels, scale)
self.scale = scale
self.width = channels // scale
self.nums = scale if scale == 1 else scale - 1
self.convs = []
self.bns = []
for i in range(self.nums):
self.convs.append(nn.Conv1d(self.width, self.width, kernel_size, stride, padding, dilation, bias=bias))
self.bns.append(nn.BatchNorm1d(self.width))
self.convs = nn.ModuleList(self.convs)
self.bns = nn.ModuleList(self.bns)
def forward(self, x):
out = []
spx = torch.split(x, self.width, 1)
for i in range(self.nums):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.bns[i](F.relu(sp))
out.append(sp)
if self.scale != 1:
out.append(spx[self.nums])
out = torch.cat(out, dim=1)
return out
class Conv1dReluBn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super().__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)
self.bn = nn.BatchNorm1d(out_channels)
def forward(self, x):
return self.bn(F.relu(self.conv(x)))
class SE_Connect(nn.Module):
def __init__(self, channels, s=2):
super().__init__()
assert channels % s == 0, "{} % {} != 0".format(channels, s)
self.linear1 = nn.Linear(channels, channels // s)
self.linear2 = nn.Linear(channels // s, channels)
def forward(self, x):
out = x.mean(dim=2)
out = F.relu(self.linear1(out))
out = torch.sigmoid(self.linear2(out))
out = x * out.unsqueeze(2)
return out
def SE_Res2Block(channels, kernel_size, stride, padding, dilation, scale):
return nn.Sequential(
Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),
Res2Conv1dReluBn(channels, kernel_size, stride, padding, dilation, scale=scale),
Conv1dReluBn(channels, channels, kernel_size=1, stride=1, padding=0),
SE_Connect(channels)
)
class AttentiveStatsPool(nn.Module):
def __init__(self, in_dim, bottleneck_dim):
super().__init__()
self.linear1 = nn.Conv1d(in_dim, bottleneck_dim, kernel_size=1) # equals W and b in the paper
self.linear2 = nn.Conv1d(bottleneck_dim, in_dim, kernel_size=1) # equals V and k in the paper
def forward(self, x):
# DON'T use ReLU here! In experiments, I find ReLU hard to converge.
alpha = torch.tanh(self.linear1(x))
alpha = torch.softmax(self.linear2(alpha), dim=2)
mean = torch.sum(alpha * x, dim=2)
residuals = torch.sum(alpha * x ** 2, dim=2) - mean ** 2
std = torch.sqrt(residuals.clamp(min=1e-9))
return torch.cat([mean, std], dim=1)
class ECAPA_TDNN(nn.Module):
def __init__(self, in_channels=80, channels=512, embd_dim=192):
super().__init__()
self.layer1 = Conv1dReluBn(in_channels, channels, kernel_size=5, padding=2)
self.layer2 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=2, dilation=2, scale=8)
self.layer3 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=3, dilation=3, scale=8)
self.layer4 = SE_Res2Block(channels, kernel_size=3, stride=1, padding=4, dilation=4, scale=8)
cat_channels = channels * 3
self.conv = nn.Conv1d(cat_channels, 1536, kernel_size=1)
self.pooling = AttentiveStatsPool(1536, 128)
self.bn1 = nn.BatchNorm1d(3072)
self.linear = nn.Linear(3072, embd_dim)
self.bn2 = nn.BatchNorm1d(embd_dim)
def forward(self, x):
out1 = self.layer1(x)
out2 = self.layer2(out1) + out1
out3 = self.layer3(out1 + out2) + out1 + out2
out4 = self.layer4(out1 + out2 + out3) + out1 + out2 + out3
out = torch.cat([out2, out3, out4], dim=1)
out = F.relu(self.conv(out))
out = self.bn1(self.pooling(out))
out = self.bn2(self.linear(out))
return out
class architecture(nn.Module):
def __init__(self, embed_dim=512):
super(architecture, self).__init__()
self.tdnn_aggregator = ECAPA_TDNN(in_channels=512, channels=512, embd_dim=512)
def forward(self, x):
out = self.tdnn_aggregator(x)
return out
if __name__ == '__main__':
x = torch.zeros(2, 200, 80)
model = ECAPA_TDNN(in_channels=80, channels=512, embd_dim=192)
out = model(x)
print(model)
print(out.shape) | true | true |
f7fd1b5bd5d34adf7d563ce08d8f4122fc67dbfc | 9,413 | py | Python | electrum/gui/qt/request_list.py | AvianNetwork/electrum | 36c99526a0caed5fadf49db25db94717f96e39c0 | [
"MIT"
] | 1 | 2022-02-21T03:17:41.000Z | 2022-02-21T03:17:41.000Z | electrum/gui/qt/request_list.py | AvianNetwork/electrum | 36c99526a0caed5fadf49db25db94717f96e39c0 | [
"MIT"
] | null | null | null | electrum/gui/qt/request_list.py | AvianNetwork/electrum | 36c99526a0caed5fadf49db25db94717f96e39c0 | [
"MIT"
] | 1 | 2022-02-28T19:42:24.000Z | 2022-02-28T19:42:24.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Avian client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QMenu, QAbstractItemView
from PyQt5.QtCore import Qt, QItemSelectionModel, QModelIndex
from electrum.i18n import _
from electrum.util import format_time
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, LNInvoice, OnchainInvoice
from electrum.plugin import run_hook
from electrum.invoices import Invoice
from .util import MyTreeView, pr_icons, read_QIcon, webopen, MySortModel
if TYPE_CHECKING:
from .main_window import ElectrumWindow
ROLE_REQUEST_TYPE = Qt.UserRole
ROLE_KEY = Qt.UserRole + 1
ROLE_SORT_ORDER = Qt.UserRole + 2
class RequestList(MyTreeView):
class Columns(IntEnum):
DATE = 0
DESCRIPTION = 1
AMOUNT = 2
STATUS = 3
headers = {
Columns.DATE: _('Date'),
Columns.DESCRIPTION: _('Description'),
Columns.AMOUNT: _('Amount'),
Columns.STATUS: _('Status'),
}
filter_columns = [Columns.DATE, Columns.DESCRIPTION, Columns.AMOUNT]
def __init__(self, parent: 'ElectrumWindow'):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.DESCRIPTION,
editable_columns=[])
self.wallet = self.parent.wallet
self.std_model = QStandardItemModel(self)
self.proxy = MySortModel(self, sort_role=ROLE_SORT_ORDER)
self.proxy.setSourceModel(self.std_model)
self.setModel(self.proxy)
self.setSortingEnabled(True)
self.selectionModel().currentRowChanged.connect(self.item_changed)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.update()
def select_key(self, key):
for i in range(self.model().rowCount()):
item = self.model().index(i, self.Columns.DATE)
row_key = item.data(ROLE_KEY)
if key == row_key:
self.selectionModel().setCurrentIndex(item, QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
break
def item_changed(self, idx: Optional[QModelIndex]):
if idx is None:
self.parent.receive_payreq_e.setText('')
self.parent.receive_address_e.setText('')
return
if not idx.isValid():
return
# TODO use siblingAtColumn when min Qt version is >=5.11
item = self.item_from_index(idx.sibling(idx.row(), self.Columns.DATE))
key = item.data(ROLE_KEY)
req = self.wallet.get_request(key)
if req is None:
self.update()
return
if req.is_lightning():
self.parent.receive_payreq_e.setText(req.invoice) # TODO maybe prepend "lightning:" ??
self.parent.receive_address_e.setText(req.invoice)
else:
self.parent.receive_payreq_e.setText(self.parent.wallet.get_request_URI(req))
self.parent.receive_address_e.setText(req.get_address())
self.parent.receive_payreq_e.repaint() # macOS hack (similar to #4777)
self.parent.receive_address_e.repaint() # macOS hack (similar to #4777)
def clearSelection(self):
super().clearSelection()
self.selectionModel().clearCurrentIndex()
def refresh_status(self):
m = self.std_model
for r in range(m.rowCount()):
idx = m.index(r, self.Columns.STATUS)
date_idx = idx.sibling(idx.row(), self.Columns.DATE)
date_item = m.itemFromIndex(date_idx)
status_item = m.itemFromIndex(idx)
key = date_item.data(ROLE_KEY)
req = self.wallet.get_request(key)
if req:
status = self.parent.wallet.get_request_status(key)
status_str = req.get_status_str(status)
status_item.setText(status_str)
status_item.setIcon(read_QIcon(pr_icons.get(status)))
def update_item(self, key, invoice: Invoice):
model = self.std_model
for row in range(0, model.rowCount()):
item = model.item(row, 0)
if item.data(ROLE_KEY) == key:
break
else:
return
status_item = model.item(row, self.Columns.STATUS)
status = self.parent.wallet.get_request_status(key)
status_str = invoice.get_status_str(status)
status_item.setText(status_str)
status_item.setIcon(read_QIcon(pr_icons.get(status)))
def update(self):
# not calling maybe_defer_update() as it interferes with conditional-visibility
self.parent.update_receive_address_styling()
self.proxy.setDynamicSortFilter(False) # temp. disable re-sorting after every change
self.std_model.clear()
self.update_headers(self.__class__.headers)
for req in self.wallet.get_unpaid_requests():
key = self.wallet.get_key_for_receive_request(req)
status = self.parent.wallet.get_request_status(key)
status_str = req.get_status_str(status)
request_type = req.type
timestamp = req.time
amount = req.get_amount_sat()
message = req.message
date = format_time(timestamp)
amount_str = self.parent.format_amount(amount) if amount else ""
labels = [date, message, amount_str, status_str]
if req.is_lightning():
icon = read_QIcon("lightning.png")
tooltip = 'lightning request'
else:
icon = read_QIcon("avian.png")
tooltip = 'onchain request'
items = [QStandardItem(e) for e in labels]
self.set_editability(items)
items[self.Columns.DATE].setData(request_type, ROLE_REQUEST_TYPE)
items[self.Columns.DATE].setData(key, ROLE_KEY)
items[self.Columns.DATE].setData(timestamp, ROLE_SORT_ORDER)
items[self.Columns.DATE].setIcon(icon)
items[self.Columns.STATUS].setIcon(read_QIcon(pr_icons.get(status)))
items[self.Columns.DATE].setToolTip(tooltip)
self.std_model.insertRow(self.std_model.rowCount(), items)
self.filter()
self.proxy.setDynamicSortFilter(True)
# sort requests by date
self.sortByColumn(self.Columns.DATE, Qt.DescendingOrder)
# hide list if empty
if self.parent.isVisible():
b = self.std_model.rowCount() > 0
self.setVisible(b)
self.parent.receive_requests_label.setVisible(b)
if not b:
# list got hidden, so selected item should also be cleared:
self.item_changed(None)
def create_menu(self, position):
items = self.selected_in_column(0)
if len(items)>1:
keys = [item.data(ROLE_KEY) for item in items]
menu = QMenu(self)
menu.addAction(_("Delete requests"), lambda: self.parent.delete_requests(keys))
menu.exec_(self.viewport().mapToGlobal(position))
return
idx = self.indexAt(position)
# TODO use siblingAtColumn when min Qt version is >=5.11
item = self.item_from_index(idx.sibling(idx.row(), self.Columns.DATE))
if not item:
return
key = item.data(ROLE_KEY)
req = self.wallet.get_request(key)
if req is None:
self.update()
return
menu = QMenu(self)
self.add_copy_menu(menu, idx)
if req.is_lightning():
menu.addAction(_("Copy Request"), lambda: self.parent.do_copy(req.invoice, title='Lightning Request'))
else:
URI = self.wallet.get_request_URI(req)
menu.addAction(_("Copy Request"), lambda: self.parent.do_copy(URI, title='Avian URI'))
menu.addAction(_("Copy Address"), lambda: self.parent.do_copy(req.get_address(), title='Avian Address'))
#if 'view_url' in req:
# menu.addAction(_("View in web browser"), lambda: webopen(req['view_url']))
menu.addAction(_("Delete"), lambda: self.parent.delete_requests([key]))
run_hook('receive_list_menu', self.parent, menu, key)
menu.exec_(self.viewport().mapToGlobal(position))
| 42.786364 | 121 | 0.651652 |
from enum import IntEnum
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QMenu, QAbstractItemView
from PyQt5.QtCore import Qt, QItemSelectionModel, QModelIndex
from electrum.i18n import _
from electrum.util import format_time
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, LNInvoice, OnchainInvoice
from electrum.plugin import run_hook
from electrum.invoices import Invoice
from .util import MyTreeView, pr_icons, read_QIcon, webopen, MySortModel
if TYPE_CHECKING:
from .main_window import ElectrumWindow
ROLE_REQUEST_TYPE = Qt.UserRole
ROLE_KEY = Qt.UserRole + 1
ROLE_SORT_ORDER = Qt.UserRole + 2
class RequestList(MyTreeView):
class Columns(IntEnum):
DATE = 0
DESCRIPTION = 1
AMOUNT = 2
STATUS = 3
headers = {
Columns.DATE: _('Date'),
Columns.DESCRIPTION: _('Description'),
Columns.AMOUNT: _('Amount'),
Columns.STATUS: _('Status'),
}
filter_columns = [Columns.DATE, Columns.DESCRIPTION, Columns.AMOUNT]
def __init__(self, parent: 'ElectrumWindow'):
super().__init__(parent, self.create_menu,
stretch_column=self.Columns.DESCRIPTION,
editable_columns=[])
self.wallet = self.parent.wallet
self.std_model = QStandardItemModel(self)
self.proxy = MySortModel(self, sort_role=ROLE_SORT_ORDER)
self.proxy.setSourceModel(self.std_model)
self.setModel(self.proxy)
self.setSortingEnabled(True)
self.selectionModel().currentRowChanged.connect(self.item_changed)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.update()
def select_key(self, key):
for i in range(self.model().rowCount()):
item = self.model().index(i, self.Columns.DATE)
row_key = item.data(ROLE_KEY)
if key == row_key:
self.selectionModel().setCurrentIndex(item, QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
break
def item_changed(self, idx: Optional[QModelIndex]):
if idx is None:
self.parent.receive_payreq_e.setText('')
self.parent.receive_address_e.setText('')
return
if not idx.isValid():
return
item = self.item_from_index(idx.sibling(idx.row(), self.Columns.DATE))
key = item.data(ROLE_KEY)
req = self.wallet.get_request(key)
if req is None:
self.update()
return
if req.is_lightning():
self.parent.receive_payreq_e.setText(req.invoice)
self.parent.receive_address_e.setText(req.invoice)
else:
self.parent.receive_payreq_e.setText(self.parent.wallet.get_request_URI(req))
self.parent.receive_address_e.setText(req.get_address())
self.parent.receive_payreq_e.repaint() self.parent.receive_address_e.repaint() def clearSelection(self):
super().clearSelection()
self.selectionModel().clearCurrentIndex()
def refresh_status(self):
m = self.std_model
for r in range(m.rowCount()):
idx = m.index(r, self.Columns.STATUS)
date_idx = idx.sibling(idx.row(), self.Columns.DATE)
date_item = m.itemFromIndex(date_idx)
status_item = m.itemFromIndex(idx)
key = date_item.data(ROLE_KEY)
req = self.wallet.get_request(key)
if req:
status = self.parent.wallet.get_request_status(key)
status_str = req.get_status_str(status)
status_item.setText(status_str)
status_item.setIcon(read_QIcon(pr_icons.get(status)))
def update_item(self, key, invoice: Invoice):
model = self.std_model
for row in range(0, model.rowCount()):
item = model.item(row, 0)
if item.data(ROLE_KEY) == key:
break
else:
return
status_item = model.item(row, self.Columns.STATUS)
status = self.parent.wallet.get_request_status(key)
status_str = invoice.get_status_str(status)
status_item.setText(status_str)
status_item.setIcon(read_QIcon(pr_icons.get(status)))
def update(self):
self.parent.update_receive_address_styling()
self.proxy.setDynamicSortFilter(False)
self.std_model.clear()
self.update_headers(self.__class__.headers)
for req in self.wallet.get_unpaid_requests():
key = self.wallet.get_key_for_receive_request(req)
status = self.parent.wallet.get_request_status(key)
status_str = req.get_status_str(status)
request_type = req.type
timestamp = req.time
amount = req.get_amount_sat()
message = req.message
date = format_time(timestamp)
amount_str = self.parent.format_amount(amount) if amount else ""
labels = [date, message, amount_str, status_str]
if req.is_lightning():
icon = read_QIcon("lightning.png")
tooltip = 'lightning request'
else:
icon = read_QIcon("avian.png")
tooltip = 'onchain request'
items = [QStandardItem(e) for e in labels]
self.set_editability(items)
items[self.Columns.DATE].setData(request_type, ROLE_REQUEST_TYPE)
items[self.Columns.DATE].setData(key, ROLE_KEY)
items[self.Columns.DATE].setData(timestamp, ROLE_SORT_ORDER)
items[self.Columns.DATE].setIcon(icon)
items[self.Columns.STATUS].setIcon(read_QIcon(pr_icons.get(status)))
items[self.Columns.DATE].setToolTip(tooltip)
self.std_model.insertRow(self.std_model.rowCount(), items)
self.filter()
self.proxy.setDynamicSortFilter(True)
self.sortByColumn(self.Columns.DATE, Qt.DescendingOrder)
if self.parent.isVisible():
b = self.std_model.rowCount() > 0
self.setVisible(b)
self.parent.receive_requests_label.setVisible(b)
if not b:
self.item_changed(None)
def create_menu(self, position):
items = self.selected_in_column(0)
if len(items)>1:
keys = [item.data(ROLE_KEY) for item in items]
menu = QMenu(self)
menu.addAction(_("Delete requests"), lambda: self.parent.delete_requests(keys))
menu.exec_(self.viewport().mapToGlobal(position))
return
idx = self.indexAt(position)
item = self.item_from_index(idx.sibling(idx.row(), self.Columns.DATE))
if not item:
return
key = item.data(ROLE_KEY)
req = self.wallet.get_request(key)
if req is None:
self.update()
return
menu = QMenu(self)
self.add_copy_menu(menu, idx)
if req.is_lightning():
menu.addAction(_("Copy Request"), lambda: self.parent.do_copy(req.invoice, title='Lightning Request'))
else:
URI = self.wallet.get_request_URI(req)
menu.addAction(_("Copy Request"), lambda: self.parent.do_copy(URI, title='Avian URI'))
menu.addAction(_("Copy Address"), lambda: self.parent.do_copy(req.get_address(), title='Avian Address'))
menu.addAction(_("Delete"), lambda: self.parent.delete_requests([key]))
run_hook('receive_list_menu', self.parent, menu, key)
menu.exec_(self.viewport().mapToGlobal(position))
| true | true |
f7fd1be48c1602a7c146c512000616d3ecceb815 | 603 | py | Python | main.py | akiraak/gae-twitter-bot | 420a752f3013ab42edccf7014df729f6e26c5bad | [
"MIT"
] | 1 | 2016-08-09T21:33:01.000Z | 2016-08-09T21:33:01.000Z | main.py | akiraak/gae-twitter-bot | 420a752f3013ab42edccf7014df729f6e26c5bad | [
"MIT"
] | null | null | null | main.py | akiraak/gae-twitter-bot | 420a752f3013ab42edccf7014df729f6e26c5bad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import wsgiref.handlers
from google.appengine.ext.webapp import template
import handler
def create_app(debug=True):
return webapp.WSGIApplication(
[
('/', handler.TopHandler),
('/jobs_local/cron-twitter', handler.CronTwitterHandler),
('/jobs_local/post-twitter', handler.PostTwitterHandler),
],
debug=debug)
def main():
util.run_wsgi_app(create_app())
if __name__ == '__main__':
main()
| 24.12 | 69 | 0.669983 |
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import wsgiref.handlers
from google.appengine.ext.webapp import template
import handler
def create_app(debug=True):
return webapp.WSGIApplication(
[
('/', handler.TopHandler),
('/jobs_local/cron-twitter', handler.CronTwitterHandler),
('/jobs_local/post-twitter', handler.PostTwitterHandler),
],
debug=debug)
def main():
util.run_wsgi_app(create_app())
if __name__ == '__main__':
main()
| true | true |
f7fd1c3f2cfb7cf338795157041178d144e1a5e6 | 60,752 | py | Python | matplotlib/collections.py | Solid-Mechanics/matplotlib-4-abaqus | 1117070fb824210c217c564ac36e69112ce70501 | [
"MIT"
] | 35 | 2015-10-23T08:15:36.000Z | 2022-02-03T10:17:15.000Z | site-packages/matplotlib-1.3.1/lib/matplotlib/collections.py | Nuevalgo/Feedbot | 96bdd150fcd92fa155dfc7b13d930bab394e8e47 | [
"BSD-3-Clause"
] | 3 | 2015-09-17T16:27:45.000Z | 2018-07-31T05:59:33.000Z | site-packages/matplotlib-1.3.1/lib/matplotlib/collections.py | Nuevalgo/Feedbot | 96bdd150fcd92fa155dfc7b13d930bab394e8e47 | [
"BSD-3-Clause"
] | 25 | 2016-01-18T12:19:11.000Z | 2021-12-11T15:45:17.000Z | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g., a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g., you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g., a large set of solid
line segemnts)
"""
from __future__ import print_function
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
from matplotlib import _path
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *offset_position*: 'screen' (default) or 'data'
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *hatch*: None
* *zorder*: 1
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets). If offset_position is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates. If
offset_position is 'data', the offset is applied before the master
transform, i.e., the offsets are in data coordinates.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
# _offsets must be a Nx2 array!
_offsets.shape = (0, 2)
_transOffset = transforms.IdentityTransform()
_transforms = []
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds=None,
offsets=None,
transOffset=None,
norm=None, # optional for ScalarMappable
cmap=None, # ditto
pickradius=5.0,
hatch=None,
urls=None,
offset_position='screen',
zorder=1,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self.set_offset_position(offset_position)
self.set_zorder(zorder)
self._uniform_offsets = None
self._offsets = np.array([[0, 0]], np.float_)
if offsets is not None:
offsets = np.asanyarray(offsets)
offsets.shape = (-1, 2) # Make it Nx2
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._path_effects = None
self.update(kwargs)
self._paths = None
@staticmethod
def _get_value(val):
try:
return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try:
float(val[0])
except (TypeError, ValueError):
pass # raise below
else:
return val
raise TypeError('val must be a float or nonzero sequence of floats')
@staticmethod
def _get_bool(val):
if not cbook.iterable(val):
val = (val,)
try:
bool(val[0])
except (TypeError, IndexError):
raise TypeError('val must be a bool or nonzero sequence of them')
return val
def get_paths(self):
return self._paths
def set_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
t = self._transOffset
if (not isinstance(t, transforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
return t
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asanyarray(offsets, np.float_)
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# get_path_collection_extents handles nan but not masked arrays
offsets.shape = (-1, 2) # Make it Nx2
if paths:
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
else:
result = transforms.Bbox([[0, 0], [0, 0]])
return result
def get_window_extent(self, renderer):
# TODO:check to ensure that this does not fail for
# cases other than scatter plot legend
return self.get_datalim(transforms.IdentityTransform())
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if offsets.size > 0:
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = zip(xs, ys)
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path)
for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
# This might have changed an ndarray into a masked array.
transOffset = transOffset.get_affine()
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# Changing from a masked array to nan-filled ndarray
# is probably most efficient at this point.
return transform, transOffset, offsets, paths
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, transOffset, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
for pe in self.get_path_effects():
pe.draw_path_collection(renderer,
gc, transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles, self._antialiaseds, self._urls,
self._offset_position)
else:
renderer.draw_path_collection(
gc, transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles, self._antialiaseds, self._urls,
self._offset_position)
gc.restore()
renderer.close_group(self.__class__.__name__)
def set_pickradius(self, pr):
self._pickradius = pr
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible():
return False, {}
if self._picker is True: # the Boolean constant, not just nonzero or 1
pickradius = self._pickradius
else:
try:
pickradius = float(self._picker)
except TypeError:
# This should not happen if "contains" is called via
# pick, the normal route; the check is here in case
# it is called through some unanticipated route.
warnings.warn(
"Collection picker %s could not be converted to float"
% self._picker)
pickradius = self._pickradius
transform, transOffset, offsets, paths = self._prepare_points()
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, pickradius <= 0,
self.get_offset_position())
return len(ind) > 0, dict(ind=ind)
def set_urls(self, urls):
if urls is None:
self._urls = [None, ]
else:
self._urls = urls
def get_urls(self):
return self._urls
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Unlike other properties such as linewidth and colors, hatching
can only be specified for the collection as a whole, not separately
for each member.
ACCEPTS: [ '/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_offset_position(self, offset_position):
"""
Set how offsets are applied. If *offset_position* is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates.
If offset_position is 'data', the offset is applied before the
master transform, i.e., the offsets are in data coordinates.
"""
if offset_position not in ('screen', 'data'):
raise ValueError("offset_position must be 'screen' or 'data'")
self._offset_position = offset_position
def get_offset_position(self):
"""
Returns how offsets are applied for the collection. If
*offset_position* is 'screen', the offset is applied after the
master transform has been applied, that is, the offsets are in
screen coordinates. If offset_position is 'data', the offset
is applied before the master transform, i.e., the offsets are
in data coordinates.
"""
return self._offset_position
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None:
lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls) == 2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes' % ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'none', the patch will not be filled.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._is_filled = True
try:
if c.lower() == 'none':
self._is_filled = False
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color. If it is 'none', the patch boundary will not
be drawn.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._is_stroked = True
try:
if c.lower() == 'none':
self._is_stroked = False
except AttributeError:
pass
try:
if c.lower() == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
return
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None:
return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if not self.check_update("array"):
return
if self._is_filled:
self._facecolors = self.to_rgba(self._A, self._alpha)
elif self._is_stroked:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
# update_from for scalarmappable
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
# self.update_dict = other.update_dict # do we need to copy this? -JJL
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Collection="""\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
""")
class PathCollection(Collection):
"""
This is the most basic :class:`Collection` subclass.
"""
@docstring.dedent_interpd
def __init__(self, paths, sizes=None, **kwargs):
"""
*paths* is a sequence of :class:`matplotlib.path.Path`
instances.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_paths(paths)
self._sizes = sizes
def set_paths(self, paths):
self._paths = paths
def get_paths(self):
return self._paths
def get_sizes(self):
return self._sizes
@allow_rasterization
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class PolyCollection(Collection):
@docstring.dedent_interpd
def __init__(self, verts, sizes=None, closed=True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if np.ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
# This is much faster than having Path do it one at a time.
if closed:
self._paths = []
for xy in verts:
if len(xy):
if np.ma.isMaskedArray(xy):
xy = np.ma.concatenate([xy, np.zeros((1, 2))])
else:
xy = np.asarray(xy)
xy = np.concatenate([xy, np.zeros((1, 2))])
codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths.append(mpath.Path(xy, codes))
else:
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
set_paths = set_verts
@allow_rasterization
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
@docstring.dedent_interpd
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [[(xmin, ymin),
(xmin, ymax),
(xmin + xwidth, ymax),
(xmin + xwidth, ymin),
(xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned. *kwargs* are
passed on to the collection.
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1] - xslice[0]))
collection = BrokenBarHCollection(
xranges, [ymin, ymax - ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
@docstring.dedent_interpd
def __init__(self,
numsides,
rotation=0,
sizes=(1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self, **kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
@allow_rasterization
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
def __init__(self, segments, # Can be None.
linewidths=None,
colors=None,
antialiaseds=None,
linestyles='solid',
offsets=None,
transOffset=None,
norm=None,
cmap=None,
pickradius=5,
zorder=2,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
*zorder*
The zorder of the LineCollection. Default is 2
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None:
colors = mpl.rcParams['lines.color']
if linewidths is None:
linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None:
antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = mcolors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
facecolors='none',
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
zorder=zorder,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None:
return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
set_paths = set_segments
def get_segments(self):
segments = []
for path in self._paths:
vertices = [vertex for vertex, _ in path.iter_segments()]
vertices = np.asarray(vertices)
segments.append(vertices)
return segments
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i % Noffs
segs[i] = segs[i] + offsets[io:io + 1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self.set_edgecolor(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class EventCollection(LineCollection):
'''
A collection of discrete events.
An event is a 1-dimensional value, usually the position of something along
an axis, such as time or length. Events do not have an amplitude. They
are displayed as v
'''
def __init__(self,
positions, # Can be None.
orientation=None,
lineoffset=0,
linelength=1,
linewidth=None,
color=None,
linestyle='solid',
antialiased=None,
**kwargs
):
"""
*positions*
a sequence of numerical values or a 1D numpy array. Can be None
*orientation* [ 'horizontal' | 'vertical' | None ]
defaults to 'horizontal' if not specified or None
*lineoffset*
a single numerical value, corresponding to the offset of the center
of the markers from the origin
*linelength*
a single numerical value, corresponding to the total height of the
marker (i.e. the marker stretches from lineoffset+linelength/2 to
lineoffset-linelength/2). Defaults to 1
*linewidth*
a single numerical value
*color*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*linestyle* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
*antialiased*
1 or 2
If *linewidth*, *color*, or *antialiased* is None, they
default to their rcParams setting, in sequence form.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
**Example:**
.. plot:: mpl_examples/pylab_examples/eventcollection_demo.py
"""
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
if len(positions) == 0:
segments = []
elif hasattr(positions, 'ndim') and positions.ndim > 1:
raise ValueError('if positions is an ndarry it cannot have '
'dimensionality great than 1 ')
elif (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
positions.sort()
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = True
elif orientation.lower() == 'vertical':
positions.sort()
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
LineCollection.__init__(self,
segments,
linewidths=linewidth,
colors=color,
antialiaseds=antialiased,
linestyles=linestyle,
**kwargs)
self._linelength = linelength
self._lineoffset = lineoffset
def get_positions(self):
'''
return an array containing the floating-point values of the positions
'''
segments = self.get_segments()
pos = 0 if self.is_horizontal() else 1
positions = []
for segment in segments:
positions.append(segment[0, pos])
return positions
def set_positions(self, positions):
'''
set the positions of the events to the specified value
'''
if positions is None or (hasattr(positions, 'len') and
len(positions) == 0):
self.set_segments([])
return
lineoffset = self.get_lineoffset()
linelength = self.get_linelength()
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
positions = np.asanyarray(positions)
positions.sort()
if self.is_horizontal():
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
else:
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self.set_segments(segments)
def add_positions(self, position):
'''
add one or more events at the specified positions
'''
if position is None or (hasattr(position, 'len') and
len(position) == 0):
return
positions = self.get_positions()
positions = np.hstack([positions, np.asanyarray(position)])
self.set_positions(positions)
extend_positions = append_positions = add_positions
def is_horizontal(self):
'''
True if the eventcollection is horizontal, False if vertical
'''
return self._is_horizontal
def get_orientation(self):
'''
get the orientation of the event line, may be:
[ 'horizontal' | 'vertical' ]
'''
return 'horizontal' if self.is_horizontal() else 'vertical'
def switch_orientation(self):
'''
switch the orientation of the event line, either from vertical to
horizontal or vice versus
'''
segments = self.get_segments()
for i, segment in enumerate(segments):
segments[i] = np.fliplr(segment)
self.set_segments(segments)
self._is_horizontal = not self.is_horizontal()
def set_orientation(self, orientation=None):
'''
set the orientation of the event line
[ 'horizontal' | 'vertical' | None ]
defaults to 'horizontal' if not specified or None
'''
if (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
is_horizontal = True
elif orientation.lower() == 'vertical':
is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
if is_horizontal == self.is_horizontal():
return
self.switch_orientation()
def get_linelength(self):
'''
get the length of the lines used to mark each event
'''
return self._linelength
def set_linelength(self, linelength):
'''
set the length of the lines used to mark each event
'''
if linelength == self.get_linelength():
return
lineoffset = self.get_lineoffset()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._linelength = linelength
def get_lineoffset(self):
'''
get the offset of the lines used to mark each event
'''
return self._lineoffset
def set_lineoffset(self, lineoffset):
'''
set the offset of the lines used to mark each event
'''
if lineoffset == self.get_lineoffset():
return
linelength = self.get_linelength()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._lineoffset = lineoffset
def get_linewidth(self):
'''
get the width of the lines used to mark each event
'''
return self.get_linewidths()[0]
def get_linestyle(self):
'''
get the style of the lines used to mark each event
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
'''
return self.get_linestyles()
def get_color(self):
'''
get the color of the lines used to mark each event
'''
return self.get_colors()[0]
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
@docstring.dedent_interpd
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
def get_sizes(self):
"return sizes of circles"
return self._sizes
@allow_rasterization
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
@docstring.dedent_interpd
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
lengths of first axes (e.g., major axis lengths)
*heights*: sequence
lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height'
| 'x' | 'y' | 'xy']
units in which majors and minors are given; 'width' and
'height' refer to the dimensions of the axes, while 'x'
and 'y' refer to the *offsets* data units. 'xy' differs
from all others in that the angle as plotted varies with
the aspect ratio, and equals the specified angle only when
the aspect ratio is unity. Hence it behaves the same as
the :class:`~matplotlib.patches.Ellipse` with
axes.transData as its transform.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self._widths = 0.5 * np.asarray(widths).ravel()
self._heights = 0.5 * np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() * (np.pi / 180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
"""
Calculate transforms immediately before drawing.
"""
self._transforms = []
ax = self.axes
fig = self.figure
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
Collection.draw(self, renderer)
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidth() for p in patches]
linestyles = [p.get_linestyle() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds)
else:
Collection.__init__(self, **kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
"""
Class for the efficient drawing of a triangular mesh using
Gouraud shading.
A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
object.
"""
def __init__(self, triangulation, **kwargs):
Collection.__init__(self, **kwargs)
self._triangulation = triangulation
self._shading = 'gouraud'
self._is_filled = True
self._bbox = transforms.Bbox.unit()
# Unfortunately this requires a copy, unless Triangulation
# was rewritten.
xy = np.hstack((triangulation.x.reshape(-1, 1),
triangulation.y.reshape(-1, 1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support meshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
return [Path(x) for x in verts]
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
self.update_scalarmappable()
colors = self._facecolors[triangles]
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
*shading* may be 'flat', or 'gouraud'
"""
def __init__(self, meshWidth, meshHeight, coordinates,
antialiased=True, shading='flat', **kwargs):
Collection.__init__(self, **kwargs)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._antialiased = antialiased
self._shading = shading
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape(
(meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1:],
c[1:, 1:],
c[1:, 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of triangles, each point
with its own color. This is useful for experiments using
`draw_qouraud_triangle`.
"""
if ma.isMaskedArray(coordinates):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate((
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
), axis=2)
triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
c_a = c[:-1, :-1]
c_b = c[:-1, 1:]
c_c = c[1:, 1:]
c_d = c[1:, :-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate((
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
), axis=2)
colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
return triangles, colors
def get_datalim(self, transData):
return self._bbox
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:, 0])
ys = self.convert_yunits(self._offsets[:, 1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self.convert_mesh_to_triangles(
self._meshWidth, self._meshHeight, coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(), self._meshWidth, self._meshHeight,
coordinates, offsets, transOffset, self.get_facecolor(),
self._antialiased, self.get_edgecolors())
gc.restore()
renderer.close_group(self.__class__.__name__)
patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
'RegularPolyCollection', 'PathCollection',
'StarPolygonCollection', 'PatchCollection',
'CircleCollection', 'Collection',):
docstring.interpd.update({k: patchstr})
docstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))
| 34.207207 | 87 | 0.580491 | from __future__ import print_function
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
from matplotlib import _path
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
_offsets = np.array([], np.float_)
_offsets.shape = (0, 2)
_transOffset = transforms.IdentityTransform()
_transforms = []
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds=None,
offsets=None,
transOffset=None,
norm=None,
cmap=None,
pickradius=5.0,
hatch=None,
urls=None,
offset_position='screen',
zorder=1,
**kwargs
):
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self.set_offset_position(offset_position)
self.set_zorder(zorder)
self._uniform_offsets = None
self._offsets = np.array([[0, 0]], np.float_)
if offsets is not None:
offsets = np.asanyarray(offsets)
offsets.shape = (-1, 2)
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._path_effects = None
self.update(kwargs)
self._paths = None
@staticmethod
def _get_value(val):
try:
return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try:
float(val[0])
except (TypeError, ValueError):
pass
else:
return val
raise TypeError('val must be a float or nonzero sequence of floats')
@staticmethod
def _get_bool(val):
if not cbook.iterable(val):
val = (val,)
try:
bool(val[0])
except (TypeError, IndexError):
raise TypeError('val must be a bool or nonzero sequence of them')
return val
def get_paths(self):
return self._paths
def set_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
t = self._transOffset
if (not isinstance(t, transforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
return t
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asanyarray(offsets, np.float_)
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
offsets.shape = (-1, 2)
if paths:
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
else:
result = transforms.Bbox([[0, 0], [0, 0]])
return result
def get_window_extent(self, renderer):
return self.get_datalim(transforms.IdentityTransform())
def _prepare_points(self):
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if offsets.size > 0:
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = zip(xs, ys)
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path)
for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
return transform, transOffset, offsets, paths
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, transOffset, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
for pe in self.get_path_effects():
pe.draw_path_collection(renderer,
gc, transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles, self._antialiaseds, self._urls,
self._offset_position)
else:
renderer.draw_path_collection(
gc, transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles, self._antialiaseds, self._urls,
self._offset_position)
gc.restore()
renderer.close_group(self.__class__.__name__)
def set_pickradius(self, pr):
self._pickradius = pr
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
if callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible():
return False, {}
if self._picker is True:
pickradius = self._pickradius
else:
try:
pickradius = float(self._picker)
except TypeError:
warnings.warn(
"Collection picker %s could not be converted to float"
% self._picker)
pickradius = self._pickradius
transform, transOffset, offsets, paths = self._prepare_points()
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, pickradius <= 0,
self.get_offset_position())
return len(ind) > 0, dict(ind=ind)
def set_urls(self, urls):
if urls is None:
self._urls = [None, ]
else:
self._urls = urls
def get_urls(self):
return self._urls
def set_hatch(self, hatch):
self._hatch = hatch
def get_hatch(self):
return self._hatch
def set_offsets(self, offsets):
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2)
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_offset_position(self, offset_position):
if offset_position not in ('screen', 'data'):
raise ValueError("offset_position must be 'screen' or 'data'")
self._offset_position = offset_position
def get_offset_position(self):
return self._offset_position
def set_linewidth(self, lw):
if lw is None:
lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
return self.set_linewidth(lw)
def set_lw(self, lw):
return self.set_linewidth(lw)
def set_linestyle(self, ls):
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls) == 2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes' % ls)
self._linestyles = dashes
def set_linestyles(self, ls):
return self.set_linestyle(ls)
def set_dashes(self, ls):
return self.set_linestyle(ls)
def set_antialiased(self, aa):
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
return self.set_antialiased(aa)
def set_color(self, c):
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
self._is_filled = True
try:
if c.lower() == 'none':
self._is_filled = False
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
self._is_stroked = True
try:
if c.lower() == 'none':
self._is_stroked = False
except AttributeError:
pass
try:
if c.lower() == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
return
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
return self.set_edgecolor(c)
def set_alpha(self, alpha):
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
if self._A is None:
return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if not self.check_update("array"):
return
if self._is_filled:
self._facecolors = self.to_rgba(self._A, self._alpha)
elif self._is_stroked:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
Collection="""\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
""")
class PathCollection(Collection):
@docstring.dedent_interpd
def __init__(self, paths, sizes=None, **kwargs):
Collection.__init__(self, **kwargs)
self.set_paths(paths)
self._sizes = sizes
def set_paths(self, paths):
self._paths = paths
def get_paths(self):
return self._paths
def get_sizes(self):
return self._sizes
@allow_rasterization
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class PolyCollection(Collection):
@docstring.dedent_interpd
def __init__(self, verts, sizes=None, closed=True, **kwargs):
Collection.__init__(self, **kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
def set_verts(self, verts, closed=True):
if np.ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
if closed:
self._paths = []
for xy in verts:
if len(xy):
if np.ma.isMaskedArray(xy):
xy = np.ma.concatenate([xy, np.zeros((1, 2))])
else:
xy = np.asarray(xy)
xy = np.concatenate([xy, np.zeros((1, 2))])
codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths.append(mpath.Path(xy, codes))
else:
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
set_paths = set_verts
@allow_rasterization
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
@docstring.dedent_interpd
def __init__(self, xranges, yrange, **kwargs):
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [[(xmin, ymin),
(xmin, ymax),
(xmin + xwidth, ymax),
(xmin + xwidth, ymin),
(xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1] - xslice[0]))
collection = BrokenBarHCollection(
xranges, [ymin, ymax - ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
_path_generator = mpath.Path.unit_regular_polygon
@docstring.dedent_interpd
def __init__(self,
numsides,
rotation=0,
sizes=(1,),
**kwargs):
Collection.__init__(self, **kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
@allow_rasterization
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
def __init__(self, segments,
linewidths=None,
colors=None,
antialiaseds=None,
linestyles='solid',
offsets=None,
transOffset=None,
norm=None,
cmap=None,
pickradius=5,
zorder=2,
**kwargs
):
if colors is None:
colors = mpl.rcParams['lines.color']
if linewidths is None:
linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None:
antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = mcolors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
facecolors='none',
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
zorder=zorder,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None:
return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments
set_paths = set_segments
def get_segments(self):
segments = []
for path in self._paths:
vertices = [vertex for vertex, _ in path.iter_segments()]
vertices = np.asarray(vertices)
segments.append(vertices)
return segments
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i % Noffs
segs[i] = segs[i] + offsets[io:io + 1]
return segs
def set_color(self, c):
self.set_edgecolor(c)
def color(self, c):
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color
class EventCollection(LineCollection):
def __init__(self,
positions,
orientation=None,
lineoffset=0,
linelength=1,
linewidth=None,
color=None,
linestyle='solid',
antialiased=None,
**kwargs
):
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
if len(positions) == 0:
segments = []
elif hasattr(positions, 'ndim') and positions.ndim > 1:
raise ValueError('if positions is an ndarry it cannot have '
'dimensionality great than 1 ')
elif (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
positions.sort()
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = True
elif orientation.lower() == 'vertical':
positions.sort()
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
LineCollection.__init__(self,
segments,
linewidths=linewidth,
colors=color,
antialiaseds=antialiased,
linestyles=linestyle,
**kwargs)
self._linelength = linelength
self._lineoffset = lineoffset
def get_positions(self):
segments = self.get_segments()
pos = 0 if self.is_horizontal() else 1
positions = []
for segment in segments:
positions.append(segment[0, pos])
return positions
def set_positions(self, positions):
if positions is None or (hasattr(positions, 'len') and
len(positions) == 0):
self.set_segments([])
return
lineoffset = self.get_lineoffset()
linelength = self.get_linelength()
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
positions = np.asanyarray(positions)
positions.sort()
if self.is_horizontal():
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
else:
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self.set_segments(segments)
def add_positions(self, position):
if position is None or (hasattr(position, 'len') and
len(position) == 0):
return
positions = self.get_positions()
positions = np.hstack([positions, np.asanyarray(position)])
self.set_positions(positions)
extend_positions = append_positions = add_positions
def is_horizontal(self):
return self._is_horizontal
def get_orientation(self):
return 'horizontal' if self.is_horizontal() else 'vertical'
def switch_orientation(self):
segments = self.get_segments()
for i, segment in enumerate(segments):
segments[i] = np.fliplr(segment)
self.set_segments(segments)
self._is_horizontal = not self.is_horizontal()
def set_orientation(self, orientation=None):
if (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
is_horizontal = True
elif orientation.lower() == 'vertical':
is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
if is_horizontal == self.is_horizontal():
return
self.switch_orientation()
def get_linelength(self):
return self._linelength
def set_linelength(self, linelength):
if linelength == self.get_linelength():
return
lineoffset = self.get_lineoffset()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._linelength = linelength
def get_lineoffset(self):
return self._lineoffset
def set_lineoffset(self, lineoffset):
if lineoffset == self.get_lineoffset():
return
linelength = self.get_linelength()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._lineoffset = lineoffset
def get_linewidth(self):
return self.get_linewidths()[0]
def get_linestyle(self):
return self.get_linestyles()
def get_color(self):
return self.get_colors()[0]
class CircleCollection(Collection):
@docstring.dedent_interpd
def __init__(self, sizes, **kwargs):
Collection.__init__(self, **kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
def get_sizes(self):
return self._sizes
@allow_rasterization
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
class EllipseCollection(Collection):
@docstring.dedent_interpd
def __init__(self, widths, heights, angles, units='points', **kwargs):
Collection.__init__(self, **kwargs)
self._widths = 0.5 * np.asarray(widths).ravel()
self._heights = 0.5 * np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() * (np.pi / 180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
self._transforms = []
ax = self.axes
fig = self.figure
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
Collection.draw(self, renderer)
class PatchCollection(Collection):
def __init__(self, patches, match_original=False, **kwargs):
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidth() for p in patches]
linestyles = [p.get_linestyle() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds)
else:
Collection.__init__(self, **kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
def __init__(self, triangulation, **kwargs):
Collection.__init__(self, **kwargs)
self._triangulation = triangulation
self._shading = 'gouraud'
self._is_filled = True
self._bbox = transforms.Bbox.unit()
xy = np.hstack((triangulation.x.reshape(-1, 1),
triangulation.y.reshape(-1, 1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
Path = mpath.Path
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
return [Path(x) for x in verts]
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
self.update_scalarmappable()
colors = self._facecolors[triangles]
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class QuadMesh(Collection):
def __init__(self, meshWidth, meshHeight, coordinates,
antialiased=True, shading='flat', **kwargs):
Collection.__init__(self, **kwargs)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._antialiased = antialiased
self._shading = shading
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
self._coordinates = self._coordinates.reshape(
(meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1:],
c[1:, 1:],
c[1:, 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
if ma.isMaskedArray(coordinates):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate((
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
), axis=2)
triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
c_a = c[:-1, :-1]
c_b = c[:-1, 1:]
c_c = c[1:, 1:]
c_d = c[1:, :-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate((
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
), axis=2)
colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
return triangles, colors
def get_datalim(self, transData):
return self._bbox
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:, 0])
ys = self.convert_yunits(self._offsets[:, 1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
offsets.shape = (-1, 2)
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self.convert_mesh_to_triangles(
self._meshWidth, self._meshHeight, coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(), self._meshWidth, self._meshHeight,
coordinates, offsets, transOffset, self.get_facecolor(),
self._antialiased, self.get_edgecolors())
gc.restore()
renderer.close_group(self.__class__.__name__)
patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
'RegularPolyCollection', 'PathCollection',
'StarPolygonCollection', 'PatchCollection',
'CircleCollection', 'Collection',):
docstring.interpd.update({k: patchstr})
docstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))
| true | true |
f7fd1ca7748dff860ba120b996166cf102457bfa | 72,663 | py | Python | sympy/sets/sets.py | SirAbhi13/sympy | 5868aa1cc649f048cdbddd0082be67f2b65f0d95 | [
"BSD-3-Clause"
] | null | null | null | sympy/sets/sets.py | SirAbhi13/sympy | 5868aa1cc649f048cdbddd0082be67f2b65f0d95 | [
"BSD-3-Clause"
] | null | null | null | sympy/sets/sets.py | SirAbhi13/sympy | 5868aa1cc649f048cdbddd0082be67f2b65f0d95 | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional
from functools import reduce
from collections import defaultdict
import inspect
from sympy.core.basic import Basic
from sympy.core.compatibility import ordered
from sympy.core.containers import Tuple
from sympy.core.decorators import (deprecated, sympify_method_args,
sympify_return)
from sympy.core.evalf import EvalfMixin, prec_to_dps
from sympy.core.expr import Expr
from sympy.core.logic import (FuzzyBool, fuzzy_bool, fuzzy_or, fuzzy_and,
fuzzy_not)
from sympy.core.numbers import Float
from sympy.core.operations import LatticeOp
from sympy.core.parameters import global_parameters
from sympy.core.relational import Eq, Ne, is_lt
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import symbols, Symbol, Dummy, uniquely_named_symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And, Or, Not, Xor, true, false
from sympy.sets.contains import Contains
from sympy.utilities import subsets
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iproduct, sift, roundrobin, iterable
from sympy.utilities.misc import func_name, filldedent
from mpmath import mpi, mpf
tfn = defaultdict(lambda: None, {
True: S.true,
S.true: S.true,
False: S.false,
S.false: S.false})
@sympify_method_args
class Set(Basic, EvalfMixin):
"""
The base class for any kind of set.
Explanation
===========
This is not meant to be used directly as a container of items. It does not
behave like the builtin ``set``; see :class:`FiniteSet` for that.
Real intervals are represented by the :class:`Interval` class and unions of
sets by the :class:`Union` class. The empty set is represented by the
:class:`EmptySet` class and available as a singleton as ``S.EmptySet``.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None # type: Optional[bool]
is_UniversalSet = None # type: Optional[bool]
is_Complement = None # type: Optional[bool]
is_ComplexRegion = False
is_empty = None # type: FuzzyBool
is_finite_set = None # type: FuzzyBool
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return None
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
infimum = infimum.evalf() # issue #18505
except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
"""
Returns the union of ``self`` and ``other``.
Examples
========
As a shortcut it is possible to use the ``+`` operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
Union(Interval(0, 1), Interval(2, 3))
>>> Interval(0, 1) + Interval(2, 3)
Union(Interval(0, 1), Interval(2, 3))
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
Union({3}, Interval.Lopen(1, 2))
Similarly it is possible to use the ``-`` operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
Interval.Lopen(1, 2)
>>> Interval(1, 3) - FiniteSet(2)
Union(Interval.Ropen(1, 2), Interval.Lopen(2, 3))
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
Interval(1, 2)
>>> from sympy import imageset, Lambda, symbols, S
>>> n, m = symbols('n m')
>>> a = imageset(Lambda(n, 2*n), S.Integers)
>>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers))
EmptySet
"""
return Intersection(self, other)
def intersection(self, other):
"""
Alias for :meth:`intersect()`
"""
return self.intersect(other)
def is_disjoint(self, other):
"""
Returns True if ``self`` and ``other`` are disjoint.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 2).is_disjoint(Interval(1, 2))
False
>>> Interval(0, 2).is_disjoint(Interval(3, 4))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Disjoint_sets
"""
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
"""
Alias for :meth:`is_disjoint()`
"""
return self.is_disjoint(other)
def complement(self, universe):
r"""
The complement of 'self' w.r.t the given universe.
Examples
========
>>> from sympy import Interval, S
>>> Interval(0, 1).complement(S.Reals)
Union(Interval.open(-oo, 0), Interval.open(1, oo))
>>> Interval(0, 1).complement(S.UniversalSet)
Complement(UniversalSet, Interval(0, 1))
"""
return Complement(universe, self)
def _complement(self, other):
# this behaves as other - self
if isinstance(self, ProductSet) and isinstance(other, ProductSet):
# If self and other are disjoint then other - self == self
if len(self.sets) != len(other.sets):
return other
# There can be other ways to represent this but this gives:
# (A x B) - (C x D) = ((A - C) x B) U (A x (B - D))
overlaps = []
pairs = list(zip(self.sets, other.sets))
for n in range(len(pairs)):
sets = (o if i != n else o-s for i, (s, o) in enumerate(pairs))
overlaps.append(ProductSet(*sets))
return Union(*overlaps)
elif isinstance(other, Interval):
if isinstance(self, (Interval, FiniteSet)):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(*(o - self for o in other.args))
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self), evaluate=False)
elif other is S.EmptySet:
return S.EmptySet
elif isinstance(other, FiniteSet):
sifted = sift(other, lambda x: fuzzy_bool(self.contains(x)))
# ignore those that are contained in self
return Union(FiniteSet(*(sifted[False])),
Complement(FiniteSet(*(sifted[None])), self, evaluate=False)
if sifted[None] else S.EmptySet)
def symmetric_difference(self, other):
"""
Returns symmetric difference of ``self`` and ``other``.
Examples
========
>>> from sympy import Interval, S
>>> Interval(1, 3).symmetric_difference(S.Reals)
Union(Interval.open(-oo, 1), Interval.open(3, oo))
>>> Interval(1, 10).symmetric_difference(S.Reals)
Union(Interval.open(-oo, 1), Interval.open(10, oo))
>>> from sympy import S, EmptySet
>>> S.Reals.symmetric_difference(EmptySet)
Reals
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
"""
The infimum of ``self``.
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of ``self``.
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns a SymPy value indicating whether ``other`` is contained
in ``self``: ``true`` if it is, ``false`` if it isn't, else
an unevaluated ``Contains`` expression (or, as in the case of
ConditionSet and a union of FiniteSet/Intervals, an expression
indicating the conditions for containment).
Examples
========
>>> from sympy import Interval, S
>>> from sympy.abc import x
>>> Interval(0, 1).contains(0.5)
True
As a shortcut it is possible to use the ``in`` operator, but that
will raise an error unless an affirmative true or false is not
obtained.
>>> Interval(0, 1).contains(x)
(0 <= x) & (x <= 1)
>>> x in Interval(0, 1)
Traceback (most recent call last):
...
TypeError: did not evaluate to a bool: None
The result of 'in' is a bool, not a SymPy value
>>> 1 in Interval(0, 2)
True
>>> _ is S.true
False
"""
other = sympify(other, strict=True)
c = self._contains(other)
if isinstance(c, Contains):
return c
if c is None:
return Contains(other, self, evaluate=False)
b = tfn[c]
if b is None:
return c
return b
def _contains(self, other):
raise NotImplementedError(filldedent('''
(%s)._contains(%s) is not defined. This method, when
defined, will receive a sympified object. The method
should return True, False, None or something that
expresses what must be true for the containment of that
object in self to be evaluated. If None is returned
then a generic Contains object will be returned
by the ``contains`` method.''' % (self, other)))
def is_subset(self, other):
"""
Returns True if ``self`` is a subset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True))
False
"""
if not isinstance(other, Set):
raise ValueError("Unknown argument '%s'" % other)
# Handle the trivial cases
if self == other:
return True
is_empty = self.is_empty
if is_empty is True:
return True
elif fuzzy_not(is_empty) and other.is_empty:
return False
if self.is_finite_set is False and other.is_finite_set:
return False
# Dispatch on subclass rules
ret = self._eval_is_subset(other)
if ret is not None:
return ret
ret = other._eval_is_superset(self)
if ret is not None:
return ret
# Use pairwise rules from multiple dispatch
from sympy.sets.handlers.issubset import is_subset_sets
ret = is_subset_sets(self, other)
if ret is not None:
return ret
# Fall back on computing the intersection
# XXX: We shouldn't do this. A query like this should be handled
# without evaluating new Set objects. It should be the other way round
# so that the intersect method uses is_subset for evaluation.
if self.intersect(other) == self:
return True
def _eval_is_subset(self, other):
'''Returns a fuzzy bool for whether self is a subset of other.'''
return None
def _eval_is_superset(self, other):
'''Returns a fuzzy bool for whether self is a subset of other.'''
return None
# This should be deprecated:
def issubset(self, other):
"""
Alias for :meth:`is_subset()`
"""
return self.is_subset(other)
def is_proper_subset(self, other):
"""
Returns True if ``self`` is a proper subset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_proper_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_proper_subset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
"""
Returns True if ``self`` is a superset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_superset(Interval(0, 1))
False
>>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True))
True
"""
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
# This should be deprecated:
def issuperset(self, other):
"""
Alias for :meth:`is_superset()`
"""
return self.is_superset(other)
def is_proper_superset(self, other):
"""
Returns True if ``self`` is a proper superset of ``other``.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_proper_superset(Interval(0, 0.5))
True
>>> Interval(0, 1).is_proper_superset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
from .powerset import PowerSet
return PowerSet(self)
def powerset(self):
"""
Find the Power set of ``self``.
Examples
========
>>> from sympy import EmptySet, FiniteSet, Interval
A power set of an empty set:
>>> A = EmptySet
>>> A.powerset()
{EmptySet}
A power set of a finite set:
>>> A = FiniteSet(1, 2)
>>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)
>>> A.powerset() == FiniteSet(a, b, c, EmptySet)
True
A power set of an interval:
>>> Interval(1, 2).powerset()
PowerSet(Interval(1, 2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Power_set
"""
return self._eval_powerset()
@property
def measure(self):
"""
The (Lebesgue) measure of ``self``.
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def boundary(self):
"""
The boundary or frontier of a set.
Explanation
===========
A point x is on the boundary of a set S if
1. x is in the closure of S.
I.e. Every neighborhood of x contains a point in S.
2. x is not in the interior of S.
I.e. There does not exist an open set centered on x contained
entirely within S.
There are the points on the outer rim of S. If S is open then these
points need not actually be contained within S.
For example, the boundary of an interval is its start and end points.
This is true regardless of whether or not the interval is open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).boundary
{0, 1}
>>> Interval(0, 1, True, False).boundary
{0, 1}
"""
return self._boundary
@property
def is_open(self):
"""
Property method to check whether a set is open.
Explanation
===========
A set is open if and only if it has an empty intersection with its
boundary. In particular, a subset A of the reals is open if and only
if each one of its points is contained in an open interval that is a
subset of A.
Examples
========
>>> from sympy import S
>>> S.Reals.is_open
True
>>> S.Rationals.is_open
False
"""
return Intersection(self, self.boundary).is_empty
@property
def is_closed(self):
"""
A property method to check whether a set is closed.
Explanation
===========
A set is closed if its complement is an open set. The closedness of a
subset of the reals is determined with respect to R and its standard
topology.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_closed
True
"""
return self.boundary.is_subset(self)
@property
def closure(self):
"""
Property method which returns the closure of a set.
The closure is defined as the union of the set itself and its
boundary.
Examples
========
>>> from sympy import S, Interval
>>> S.Reals.closure
Reals
>>> Interval(0, 1).closure
Interval(0, 1)
"""
return self + self.boundary
@property
def interior(self):
"""
Property method which returns the interior of a set.
The interior of a set S consists all points of S that do not
belong to the boundary of S.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).interior
Interval.open(0, 1)
>>> Interval(0, 1).boundary.interior
EmptySet
"""
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return self.func(*[arg.evalf(n=dps) for arg in self.args])
@sympify_return([('other', 'Set')], NotImplemented)
def __add__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __or__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __and__(self, other):
return self.intersect(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __mul__(self, other):
return ProductSet(self, other)
@sympify_return([('other', 'Set')], NotImplemented)
def __xor__(self, other):
return SymmetricDifference(self, other)
@sympify_return([('exp', Expr)], NotImplemented)
def __pow__(self, exp):
if not (exp.is_Integer and exp >= 0):
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet(*[self]*exp)
@sympify_return([('other', 'Set')], NotImplemented)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
other = _sympify(other)
c = self._contains(other)
b = tfn[c]
if b is None:
# x in y must evaluate to T or F; to entertain a None
# result with Set use y.contains(x)
raise TypeError('did not evaluate to a bool: %r' % c)
return b
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Explanation
===========
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use ``*`` operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
ProductSet(Interval(0, 5), {1, 2, 3})
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
ProductSet(Interval(0, 1), Interval(0, 1))
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
{(H, H), (H, T), (T, H), (T, T)}
The Cartesian product is not commutative or associative e.g.:
>>> I*S == S*I
False
>>> (I*I)*I == I*(I*I)
False
Notes
=====
- Passes most operations down to the argument sets
References
==========
.. [1] https://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
if len(sets) == 1 and iterable(sets[0]) and not isinstance(sets[0], (Set, set)):
SymPyDeprecationWarning(
feature="ProductSet(iterable)",
useinstead="ProductSet(*iterable)",
issue=17557,
deprecated_since_version="1.5"
).warn()
sets = tuple(sets[0])
sets = [sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Arguments to ProductSet should be of type Set")
# Nullary product of sets is *not* the empty set
if len(sets) == 0:
return FiniteSet(())
if S.EmptySet in sets:
return S.EmptySet
return Basic.__new__(cls, *sets, **assumptions)
@property
def sets(self):
return self.args
def flatten(self):
def _flatten(sets):
for s in sets:
if s.is_ProductSet:
yield from _flatten(s.sets)
else:
yield s
return ProductSet(*_flatten(self.sets))
def _contains(self, element):
"""
``in`` operator for ProductSets.
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
if element.is_Symbol:
return None
if not isinstance(element, Tuple) or len(element) != len(self.sets):
return False
return fuzzy_and(s._contains(e) for s, e in zip(self.sets, element))
def as_relational(self, *symbols):
symbols = [_sympify(s) for s in symbols]
if len(symbols) != len(self.sets) or not all(
i.is_Symbol for i in symbols):
raise ValueError(
'number of symbols must match the number of sets')
return And(*[s.as_relational(i) for s, i in zip(self.sets, symbols)])
@property
def _boundary(self):
return Union(*(ProductSet(*(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets)))
for i, a in enumerate(self.sets)))
@property
def is_iterable(self):
"""
A property method which tests whether a set is iterable or not.
Returns True if set is iterable, otherwise returns False.
Examples
========
>>> from sympy import FiniteSet, Interval
>>> I = Interval(0, 1)
>>> A = FiniteSet(1, 2, 3, 4, 5)
>>> I.is_iterable
False
>>> A.is_iterable
True
"""
return all(set.is_iterable for set in self.sets)
def __iter__(self):
"""
A method which implements is_iterable property method.
If self.is_iterable returns True (both constituent sets are iterable),
then return the Cartesian Product. Otherwise, raise TypeError.
"""
return iproduct(*self.sets)
@property
def is_empty(self):
return fuzzy_or(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def _measure(self):
measure = 1
for s in self.sets:
measure *= s.measure
return measure
def __len__(self):
return reduce(lambda a, b: a*b, (len(s) for s in self.args))
def __bool__(self):
return all(self.sets)
class Interval(Set):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points ``start`` and ``end``.
For ``left_open=True`` (default ``left_open`` is ``False``) the interval
will be open on the left. Similarly, for ``right_open=True`` the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval
>>> Interval(0, 1)
Interval(0, 1)
>>> Interval.Ropen(0, 1)
Interval.Ropen(0, 1)
>>> Interval.Ropen(0, 1)
Interval.Ropen(0, 1)
>>> Interval.Lopen(0, 1)
Interval.Lopen(0, 1)
>>> Interval.open(0, 1)
Interval.open(0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
Interval(0, a)
Notes
=====
- Only real end points are supported
- ``Interval(a, b)`` with $a > b$ will return the empty set
- Use the ``evalf()`` method to turn an Interval into an mpmath
``mpi`` interval instance
References
==========
.. [1] https://en.wikipedia.org/wiki/Interval_%28mathematics%29
"""
is_Interval = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
# Only allow real intervals
if fuzzy_not(fuzzy_and(i.is_extended_real for i in (start, end, end-start))):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if is_lt(end, start):
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
if start is S.Infinity or start is S.NegativeInfinity:
return S.EmptySet
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start is S.NegativeInfinity:
left_open = true
if end is S.Infinity:
right_open = true
if start == S.Infinity or end == S.NegativeInfinity:
return S.EmptySet
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of the interval.
This property takes the same value as the ``inf`` property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
@property
def end(self):
"""
The right end point of the interval.
This property takes the same value as the ``sup`` property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
@property
def left_open(self):
"""
True if interval is left-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if interval is right-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
@classmethod
def open(cls, a, b):
"""Return an interval including neither boundary."""
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
"""Return an interval not including the left boundary."""
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
"""Return an interval not including the right boundary."""
return cls(a, b, False, True)
@property
def _inf(self):
return self.start
@property
def _sup(self):
return self.end
@property
def left(self):
return self.start
@property
def right(self):
return self.end
@property
def is_empty(self):
if self.left_open or self.right_open:
cond = self.start >= self.end # One/both bounds open
else:
cond = self.start > self.end # Both bounds closed
return fuzzy_bool(cond)
@property
def is_finite_set(self):
return self.measure.is_zero
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
@property
def _boundary(self):
finite_points = [p for p in (self.start, self.end)
if abs(p) != S.Infinity]
return FiniteSet(*finite_points)
def _contains(self, other):
if (not isinstance(other, Expr) or other is S.NaN
or other.is_real is False or other.has(S.ComplexInfinity)):
# if an expression has zoo it will be zoo or nan
# and neither of those is real
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if other.is_real is not None:
return other.is_real
d = Dummy()
return self.as_relational(d).subs(d, other)
def as_relational(self, x):
"""Rewrite an interval in terms of inequalities and logic operators."""
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._evalf(prec), self.right._evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def _eval_Eq(self, other):
if not isinstance(other, Interval):
if isinstance(other, FiniteSet):
return false
elif isinstance(other, Set):
return None
return false
class Union(Set, LatticeOp):
"""
Represents a union of sets as a :class:`Set`.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
Union(Interval(1, 2), Interval(3, 4))
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
Interval(1, 3)
See Also
========
Intersection
References
==========
.. [1] https://en.wikipedia.org/wiki/Union_%28set_theory%29
"""
is_Union = True
@property
def identity(self):
return S.EmptySet
@property
def zero(self):
return S.UniversalSet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = _sympify(args)
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_union(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
@property
def is_empty(self):
return fuzzy_and(set.is_empty for set in self.args)
@property
def is_finite_set(self):
return fuzzy_and(set.is_finite_set for set in self.args)
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for _set in sets:
if _set[0] in sos_list:
continue
else:
sos_list.append(_set[0])
sets_list.append(_set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
""" The boundary of set i minus interior of all other sets """
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(*map(boundary_of_set, range(len(self.args))))
def _contains(self, other):
return Or(*[s.contains(other) for s in self.args])
def is_subset(self, other):
return fuzzy_and(s.is_subset(other) for s in self.args)
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
if (len(self.args) == 2 and
all(isinstance(i, Interval) for i in self.args)):
# optimization to give 3 args as (x > 1) & (x < 5) & Ne(x, 3)
# instead of as 4, ((1 <= x) & (x < 3)) | ((x <= 5) & (3 < x))
a, b = self.args
if (a.sup == b.inf and
not any(a.sup in i for i in self.args)):
return And(Ne(symbol, a.sup), symbol < b.sup, symbol > a.inf)
return Or(*[i.as_relational(symbol) for i in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def __iter__(self):
return roundrobin(*(iter(arg) for arg in self.args))
class Intersection(Set, LatticeOp):
"""
Represents an intersection of sets as a :class:`Set`.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
Interval(2, 3)
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
Interval(2, 3)
See Also
========
Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Intersection_%28set_theory%29
"""
is_Intersection = True
@property
def identity(self):
return S.UniversalSet
@property
def zero(self):
return S.EmptySet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = list(ordered(set(_sympify(args))))
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_intersection(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def is_finite_set(self):
if fuzzy_or(arg.is_finite_set for arg in self.args):
return True
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _contains(self, other):
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
sets_sift = sift(self.args, lambda x: x.is_iterable)
completed = False
candidates = sets_sift[True] + sets_sift[None]
finite_candidates, others = [], []
for candidate in candidates:
length = None
try:
length = len(candidate)
except TypeError:
others.append(candidate)
if length is not None:
finite_candidates.append(candidate)
finite_candidates.sort(key=len)
for s in finite_candidates + others:
other_sets = set(self.args) - {s}
other = Intersection(*other_sets, evaluate=False)
completed = True
for x in s:
try:
if x in other:
yield x
except TypeError:
completed = False
if completed:
return
if not completed:
if not candidates:
raise TypeError("None of the constituent sets are iterable")
raise TypeError(
"The computation had not completed because of the "
"undecidable set membership is found in every candidates.")
@staticmethod
def _handle_finite_sets(args):
'''Simplify intersection of one or more FiniteSets and other sets'''
# First separate the FiniteSets from the others
fs_args, others = sift(args, lambda x: x.is_FiniteSet, binary=True)
# Let the caller handle intersection of non-FiniteSets
if not fs_args:
return
# Convert to Python sets and build the set of all elements
fs_sets = [set(fs) for fs in fs_args]
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
# Extract elements that are definitely in or definitely not in the
# intersection. Here we check contains for all of args.
definite = set()
for e in all_elements:
inall = fuzzy_and(s.contains(e) for s in args)
if inall is True:
definite.add(e)
if inall is not None:
for s in fs_sets:
s.discard(e)
# At this point all elements in all of fs_sets are possibly in the
# intersection. In some cases this is because they are definitely in
# the intersection of the finite sets but it's not clear if they are
# members of others. We might have {m, n}, {m}, and Reals where we
# don't know if m or n is real. We want to remove n here but it is
# possibly in because it might be equal to m. So what we do now is
# extract the elements that are definitely in the remaining finite
# sets iteratively until we end up with {n}, {}. At that point if we
# get any empty set all remaining elements are discarded.
fs_elements = reduce(lambda a, b: a | b, fs_sets, set())
# Need fuzzy containment testing
fs_symsets = [FiniteSet(*s) for s in fs_sets]
while fs_elements:
for e in fs_elements:
infs = fuzzy_and(s.contains(e) for s in fs_symsets)
if infs is True:
definite.add(e)
if infs is not None:
for n, s in enumerate(fs_sets):
# Update Python set and FiniteSet
if e in s:
s.remove(e)
fs_symsets[n] = FiniteSet(*s)
fs_elements.remove(e)
break
# If we completed the for loop without removing anything we are
# done so quit the outer while loop
else:
break
# If any of the sets of remainder elements is empty then we discard
# all of them for the intersection.
if not all(fs_sets):
fs_sets = [set()]
# Here we fold back the definitely included elements into each fs.
# Since they are definitely included they must have been members of
# each FiniteSet to begin with. We could instead fold these in with a
# Union at the end to get e.g. {3}|({x}&{y}) rather than {3,x}&{3,y}.
if definite:
fs_sets = [fs | definite for fs in fs_sets]
if fs_sets == [set()]:
return S.EmptySet
sets = [FiniteSet(*s) for s in fs_sets]
# Any set in others is redundant if it contains all the elements that
# are in the finite sets so we don't need it in the Intersection
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
is_redundant = lambda o: all(fuzzy_bool(o.contains(e)) for e in all_elements)
others = [o for o in others if not is_redundant(o)]
if others:
rest = Intersection(*others)
# XXX: Maybe this shortcut should be at the beginning. For large
# FiniteSets it could much more efficient to process the other
# sets first...
if rest is S.EmptySet:
return S.EmptySet
# Flatten the Intersection
if rest.is_Intersection:
sets.extend(rest.args)
else:
sets.append(rest)
if len(sets) == 1:
return sets[0]
else:
return Intersection(*sets, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set):
r"""Represents the set difference or relative complement of a set with
another set.
$$A - B = \{x \in A \mid x \notin B\}$$
Examples
========
>>> from sympy import Complement, FiniteSet
>>> Complement(FiniteSet(0, 1, 2), FiniteSet(1))
{0, 2}
See Also
=========
Intersection, Union
References
==========
.. [1] http://mathworld.wolfram.com/ComplementSet.html
"""
is_Complement = True
def __new__(cls, a, b, evaluate=True):
a, b = map(_sympify, (a, b))
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
"""
Simplify a :class:`Complement`.
"""
if B == S.UniversalSet or A.is_subset(B):
return S.EmptySet
if isinstance(B, Union):
return Intersection(*(s.complement(A) for s in B.args))
result = B._complement(A)
if result is not None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
def as_relational(self, symbol):
"""Rewrite a complement in terms of equalities and logic
operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = Not(B.as_relational(symbol))
return And(A_rel, B_rel)
@property
def is_iterable(self):
if self.args[0].is_iterable:
return True
@property
def is_finite_set(self):
A, B = self.args
a_finite = A.is_finite_set
if a_finite is True:
return True
elif a_finite is False and B.is_finite_set:
return False
def __iter__(self):
A, B = self.args
for a in A:
if a not in B:
yield a
else:
continue
class EmptySet(Set, metaclass=Singleton):
"""
Represents the empty set. The empty set is available as a singleton
as ``S.EmptySet``.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet
See Also
========
UniversalSet
References
==========
.. [1] https://en.wikipedia.org/wiki/Empty_set
"""
is_empty = True
is_finite_set = True
is_FiniteSet = True
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return True
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return false
def __len__(self):
return 0
def __iter__(self):
return iter([])
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(Set, metaclass=Singleton):
"""
Represents the set of all things.
The universal set is available as a singleton as ``S.UniversalSet``.
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet
>>> Interval(1, 2).intersect(S.UniversalSet)
Interval(1, 2)
See Also
========
EmptySet
References
==========
.. [1] https://en.wikipedia.org/wiki/Universal_set
"""
is_UniversalSet = True
is_empty = False
is_finite_set = False
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return true
@property
def _boundary(self):
return S.EmptySet
class FiniteSet(Set):
"""
Represents a finite set of discrete numbers.
Examples
========
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> members = [1, 2, 3, 4]
>>> f = FiniteSet(*members)
>>> f
{1, 2, 3, 4}
>>> f - FiniteSet(2)
{1, 3, 4}
>>> f + FiniteSet(2, 5)
{1, 2, 3, 4, 5}
References
==========
.. [1] https://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
is_empty = False
is_finite_set = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return S.EmptySet
else:
args = list(map(sympify, args))
# keep the form of the first canonical arg
dargs = {}
for i in reversed(list(ordered(args))):
if i.is_Symbol:
dargs[i] = i
else:
try:
dargs[i.as_dummy()] = i
except TypeError:
# e.g. i = class without args like `Interval`
dargs[i] = i
_args_set = set(dargs.values())
args = list(ordered(_args_set, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._args_set = _args_set
return obj
def __iter__(self):
return iter(self.args)
def _complement(self, other):
if isinstance(other, Interval):
# Splitting in sub-intervals is only done for S.Reals;
# other cases that need splitting will first pass through
# Set._complement().
nums, syms = [], []
for m in self.args:
if m.is_number and m.is_real:
nums.append(m)
elif m.is_real == False:
pass # drop non-reals
else:
syms.append(m) # various symbolic expressions
if other == S.Reals and nums != []:
nums.sort()
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(*intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(*intervals, evaluate=False)
elif nums == []: # no splitting necessary or possible:
if syms:
return Complement(other, FiniteSet(*syms), evaluate=False)
else:
return other
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Explanation
===========
The actual test is for mathematical equality (as opposed to
syntactical equality). In the worst case all elements of the
set must be checked.
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
if other in self._args_set:
return True
else:
# evaluate=True is needed to override evaluate=False context;
# we need Eq to do the evaluation
return fuzzy_or(fuzzy_bool(Eq(e, other, evaluate=True))
for e in self.args)
def _eval_is_subset(self, other):
return fuzzy_and(other._contains(e) for e in self.args)
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return FiniteSet(*[elem.evalf(n=dps) for elem in self])
def _eval_simplify(self, **kwargs):
from sympy.simplify import simplify
return FiniteSet(*[simplify(elem, **kwargs) for elem in self])
@property
def _sorted_args(self):
return self.args
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def _eval_rewrite_as_PowerSet(self, *args, **kwargs):
"""Rewriting method for a finite set to a power set."""
from .powerset import PowerSet
is2pow = lambda n: bool(n and not n & (n - 1))
if not is2pow(len(self)):
return None
fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet
if not all(fs_test(arg) for arg in args):
return None
biggest = max(args, key=len)
for arg in subsets(biggest.args):
arg_set = FiniteSet(*arg)
if arg_set not in args:
return None
return PowerSet(biggest)
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
converter[set] = lambda x: FiniteSet(*x)
converter[frozenset] = lambda x: FiniteSet(*x)
class SymmetricDifference(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
{1, 2, 4, 5}
See Also
========
Complement, Union
References
==========
.. [1] https://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def as_relational(self, symbol):
"""Rewrite a symmetric_difference in terms of equalities and
logic operators"""
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = B.as_relational(symbol)
return Xor(A_rel, B_rel)
@property
def is_iterable(self):
if all(arg.is_iterable for arg in self.args):
return True
def __iter__(self):
args = self.args
union = roundrobin(*(iter(arg) for arg in args))
for item in union:
count = 0
for s in args:
if item in s:
count += 1
if count % 2 == 1:
yield item
class DisjointUnion(Set):
""" Represents the disjoint union (also known as the external disjoint union)
of a finite number of sets.
Examples
========
>>> from sympy import DisjointUnion, FiniteSet, Interval, Union, Symbol
>>> A = FiniteSet(1, 2, 3)
>>> B = Interval(0, 5)
>>> DisjointUnion(A, B)
DisjointUnion({1, 2, 3}, Interval(0, 5))
>>> DisjointUnion(A, B).rewrite(Union)
Union(ProductSet({1, 2, 3}, {0}), ProductSet(Interval(0, 5), {1}))
>>> C = FiniteSet(Symbol('x'), Symbol('y'), Symbol('z'))
>>> DisjointUnion(C, C)
DisjointUnion({x, y, z}, {x, y, z})
>>> DisjointUnion(C, C).rewrite(Union)
ProductSet({x, y, z}, {0, 1})
References
==========
https://en.wikipedia.org/wiki/Disjoint_union
"""
def __new__(cls, *sets):
dj_collection = []
for set_i in sets:
if isinstance(set_i, Set):
dj_collection.append(set_i)
else:
raise TypeError("Invalid input: '%s', input args \
to DisjointUnion must be Sets" % set_i)
obj = Basic.__new__(cls, *dj_collection)
return obj
@property
def sets(self):
return self.args
@property
def is_empty(self):
return fuzzy_and(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def is_iterable(self):
if self.is_empty:
return False
iter_flag = True
for set_i in self.sets:
if not set_i.is_empty:
iter_flag = iter_flag and set_i.is_iterable
return iter_flag
def _eval_rewrite_as_Union(self, *sets):
"""
Rewrites the disjoint union as the union of (``set`` x {``i``})
where ``set`` is the element in ``sets`` at index = ``i``
"""
dj_union = S.EmptySet
index = 0
for set_i in sets:
if isinstance(set_i, Set):
cross = ProductSet(set_i, FiniteSet(index))
dj_union = Union(dj_union, cross)
index = index + 1
return dj_union
def _contains(self, element):
"""
``in`` operator for DisjointUnion
Examples
========
>>> from sympy import Interval, DisjointUnion
>>> D = DisjointUnion(Interval(0, 1), Interval(0, 2))
>>> (0.5, 0) in D
True
>>> (0.5, 1) in D
True
>>> (1.5, 0) in D
False
>>> (1.5, 1) in D
True
Passes operation on to constituent sets
"""
if not isinstance(element, Tuple) or len(element) != 2:
return False
if not element[1].is_Integer:
return False
if element[1] >= len(self.sets) or element[1] < 0:
return False
return element[0] in self.sets[element[1]]
def __iter__(self):
if self.is_iterable:
from sympy.core.numbers import Integer
iters = []
for i, s in enumerate(self.sets):
iters.append(iproduct(s, {Integer(i)}))
return iter(roundrobin(*iters))
else:
raise ValueError("'%s' is not iterable." % self)
def __len__(self):
"""
Returns the length of the disjoint union, i.e., the number of elements in the set.
Examples
========
>>> from sympy import FiniteSet, DisjointUnion, EmptySet
>>> D1 = DisjointUnion(FiniteSet(1, 2, 3, 4), EmptySet, FiniteSet(3, 4, 5))
>>> len(D1)
7
>>> D2 = DisjointUnion(FiniteSet(3, 5, 7), EmptySet, FiniteSet(3, 5, 7))
>>> len(D2)
6
>>> D3 = DisjointUnion(EmptySet, EmptySet)
>>> len(D3)
0
Adds up the lengths of the constituent sets.
"""
if self.is_finite_set:
size = 0
for set in self.sets:
size += len(set)
return size
else:
raise ValueError("'%s' is not a finite set." % self)
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
Explanation
===========
If this function cannot compute the image, it returns an
unevaluated ImageSet object.
.. math::
\{ f(x) \mid x \in \mathrm{self} \}
Examples
========
>>> from sympy import S, Interval, imageset, sin, Lambda
>>> from sympy.abc import x
>>> imageset(x, 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(lambda x: 2*x, Interval(0, 2))
Interval(0, 4)
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), Interval(-2, 1))
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(y, x + y), Interval(-2, 1))
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers)
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core.function import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.sets.setexpr import set_function
if len(args) < 2:
raise ValueError('imageset expects at least 2 args, got: %s' % len(args))
if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:
f = Lambda(args[0], args[1])
set_list = args[2:]
else:
f = args[0]
set_list = args[1:]
if isinstance(f, Lambda):
pass
elif callable(f):
nargs = getattr(f, 'nargs', {})
if nargs:
if len(nargs) != 1:
raise NotImplementedError(filldedent('''
This function can take more than 1 arg
but the potentially complicated set input
has not been analyzed at this point to
know its dimensions. TODO
'''))
N = nargs.args[0]
if N == 1:
s = 'x'
else:
s = [Symbol('x%i' % i) for i in range(1, N + 1)]
else:
s = inspect.signature(f).parameters
dexpr = _sympify(f(*[Dummy() for i in s]))
var = tuple(uniquely_named_symbol(
Symbol(i), dexpr) for i in s)
f = Lambda(var, f(*var))
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass,
not \'%s\'.''' % func_name(f)))
if any(not isinstance(s, Set) for s in set_list):
name = [func_name(s) for s in set_list]
raise ValueError(
'arguments after mapping should be sets, not %s' % name)
if len(set_list) == 1:
set = set_list[0]
try:
# TypeError if arg count != set dimensions
r = set_function(f, set)
if r is None:
raise TypeError
if not r:
return r
except TypeError:
r = ImageSet(f, set)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
# XXX: Maybe this should just be:
# f2 = set.lambda
# fun = Lambda(f2.signature, f(*f2.expr))
# return imageset(fun, *set.base_sets)
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
x = set.lamda.variables[0]
y = f.variables[0]
return imageset(
Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)
if r is not None:
return r
return ImageSet(f, *set_list)
def is_function_invertible_in_set(func, setv):
"""
Checks whether function ``func`` is invertible when the domain is
restricted to set ``setv``.
"""
from sympy.functions.elementary.exponential import (exp, log)
# Functions known to always be invertible:
if func in (exp, log):
return True
u = Dummy("u")
fdiff = func(u).diff(u)
# monotonous functions:
# TODO: check subsets (`func` in `setv`)
if (fdiff > 0) == True or (fdiff < 0) == True:
return True
# TODO: support more
return None
def simplify_union(args):
"""
Simplify a :class:`Union` using known rules.
Explanation
===========
We first start with global rules like 'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent. This process depends
on ``union_sets(a, b)`` functions.
"""
from sympy.sets.handlers.union import union_sets
# ===== Global Rules =====
if not args:
return S.EmptySet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = union_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = {new_set}
new_args = (args - {s, t}).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(*args, evaluate=False)
def simplify_intersection(args):
"""
Simplify an intersection using known rules.
Explanation
===========
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
if not args:
return S.UniversalSet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# If any EmptySets return EmptySet
if S.EmptySet in args:
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - {s}
if len(other_sets) > 0:
other = Intersection(*other_sets)
return Union(*(Intersection(arg, other) for arg in s.args))
else:
return Union(*[arg for arg in s.args])
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
from sympy.sets.handlers.intersection import intersection_sets
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = intersection_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - {s, t}).union({new_set})
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(*args, evaluate=False)
def _handle_finite_sets(op, x, y, commutative):
# Handle finite sets:
fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True)
if len(fs_args) == 2:
return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]])
elif len(fs_args) == 1:
sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]]
return Union(*sets)
else:
return None
def _apply_operation(op, x, y, commutative):
from sympy.sets import ImageSet
from sympy.core.function import Lambda
d = Dummy('d')
out = _handle_finite_sets(op, x, y, commutative)
if out is None:
out = op(x, y)
if out is None and commutative:
out = op(y, x)
if out is None:
_x, _y = symbols("x y")
if isinstance(x, Set) and not isinstance(y, Set):
out = ImageSet(Lambda(d, op(d, y)), x).doit()
elif not isinstance(x, Set) and isinstance(y, Set):
out = ImageSet(Lambda(d, op(x, d)), y).doit()
else:
out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y)
return out
def set_add(x, y):
from sympy.sets.handlers.add import _set_add
return _apply_operation(_set_add, x, y, commutative=True)
def set_sub(x, y):
from sympy.sets.handlers.add import _set_sub
return _apply_operation(_set_sub, x, y, commutative=False)
def set_mul(x, y):
from sympy.sets.handlers.mul import _set_mul
return _apply_operation(_set_mul, x, y, commutative=True)
def set_div(x, y):
from sympy.sets.handlers.mul import _set_div
return _apply_operation(_set_div, x, y, commutative=False)
def set_pow(x, y):
from sympy.sets.handlers.power import _set_pow
return _apply_operation(_set_pow, x, y, commutative=False)
def set_function(f, x):
from sympy.sets.handlers.functions import _set_function
return _set_function(f, x)
| 28.709206 | 90 | 0.565047 | from typing import Optional
from functools import reduce
from collections import defaultdict
import inspect
from sympy.core.basic import Basic
from sympy.core.compatibility import ordered
from sympy.core.containers import Tuple
from sympy.core.decorators import (deprecated, sympify_method_args,
sympify_return)
from sympy.core.evalf import EvalfMixin, prec_to_dps
from sympy.core.expr import Expr
from sympy.core.logic import (FuzzyBool, fuzzy_bool, fuzzy_or, fuzzy_and,
fuzzy_not)
from sympy.core.numbers import Float
from sympy.core.operations import LatticeOp
from sympy.core.parameters import global_parameters
from sympy.core.relational import Eq, Ne, is_lt
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import symbols, Symbol, Dummy, uniquely_named_symbol
from sympy.core.sympify import _sympify, sympify, converter
from sympy.logic.boolalg import And, Or, Not, Xor, true, false
from sympy.sets.contains import Contains
from sympy.utilities import subsets
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import iproduct, sift, roundrobin, iterable
from sympy.utilities.misc import func_name, filldedent
from mpmath import mpi, mpf
tfn = defaultdict(lambda: None, {
True: S.true,
S.true: S.true,
False: S.false,
S.false: S.false})
@sympify_method_args
class Set(Basic, EvalfMixin):
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None
is_UniversalSet = None
is_Complement = None
is_ComplexRegion = False
is_empty = None
is_finite_set = None
@property
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return None
@staticmethod
def _infimum_key(expr):
try:
infimum = expr.inf
assert infimum.is_comparable
infimum = infimum.evalf() except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
return Union(self, other)
def intersect(self, other):
return Intersection(self, other)
def intersection(self, other):
return self.intersect(other)
def is_disjoint(self, other):
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
return self.is_disjoint(other)
def complement(self, universe):
return Complement(universe, self)
def _complement(self, other):
if isinstance(self, ProductSet) and isinstance(other, ProductSet):
if len(self.sets) != len(other.sets):
return other
overlaps = []
pairs = list(zip(self.sets, other.sets))
for n in range(len(pairs)):
sets = (o if i != n else o-s for i, (s, o) in enumerate(pairs))
overlaps.append(ProductSet(*sets))
return Union(*overlaps)
elif isinstance(other, Interval):
if isinstance(self, (Interval, FiniteSet)):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(*(o - self for o in other.args))
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self), evaluate=False)
elif other is S.EmptySet:
return S.EmptySet
elif isinstance(other, FiniteSet):
sifted = sift(other, lambda x: fuzzy_bool(self.contains(x)))
return Union(FiniteSet(*(sifted[False])),
Complement(FiniteSet(*(sifted[None])), self, evaluate=False)
if sifted[None] else S.EmptySet)
def symmetric_difference(self, other):
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
other = sympify(other, strict=True)
c = self._contains(other)
if isinstance(c, Contains):
return c
if c is None:
return Contains(other, self, evaluate=False)
b = tfn[c]
if b is None:
return c
return b
def _contains(self, other):
raise NotImplementedError(filldedent('''
(%s)._contains(%s) is not defined. This method, when
defined, will receive a sympified object. The method
should return True, False, None or something that
expresses what must be true for the containment of that
object in self to be evaluated. If None is returned
then a generic Contains object will be returned
by the ``contains`` method.''' % (self, other)))
def is_subset(self, other):
if not isinstance(other, Set):
raise ValueError("Unknown argument '%s'" % other)
if self == other:
return True
is_empty = self.is_empty
if is_empty is True:
return True
elif fuzzy_not(is_empty) and other.is_empty:
return False
if self.is_finite_set is False and other.is_finite_set:
return False
ret = self._eval_is_subset(other)
if ret is not None:
return ret
ret = other._eval_is_superset(self)
if ret is not None:
return ret
from sympy.sets.handlers.issubset import is_subset_sets
ret = is_subset_sets(self, other)
if ret is not None:
return ret
# without evaluating new Set objects. It should be the other way round
# so that the intersect method uses is_subset for evaluation.
if self.intersect(other) == self:
return True
def _eval_is_subset(self, other):
return None
def _eval_is_superset(self, other):
return None
# This should be deprecated:
def issubset(self, other):
return self.is_subset(other)
def is_proper_subset(self, other):
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
# This should be deprecated:
def issuperset(self, other):
return self.is_superset(other)
def is_proper_superset(self, other):
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
from .powerset import PowerSet
return PowerSet(self)
def powerset(self):
return self._eval_powerset()
@property
def measure(self):
return self._measure
@property
def boundary(self):
return self._boundary
@property
def is_open(self):
return Intersection(self, self.boundary).is_empty
@property
def is_closed(self):
return self.boundary.is_subset(self)
@property
def closure(self):
return self + self.boundary
@property
def interior(self):
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return self.func(*[arg.evalf(n=dps) for arg in self.args])
@sympify_return([('other', 'Set')], NotImplemented)
def __add__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __or__(self, other):
return self.union(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __and__(self, other):
return self.intersect(other)
@sympify_return([('other', 'Set')], NotImplemented)
def __mul__(self, other):
return ProductSet(self, other)
@sympify_return([('other', 'Set')], NotImplemented)
def __xor__(self, other):
return SymmetricDifference(self, other)
@sympify_return([('exp', Expr)], NotImplemented)
def __pow__(self, exp):
if not (exp.is_Integer and exp >= 0):
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet(*[self]*exp)
@sympify_return([('other', 'Set')], NotImplemented)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
other = _sympify(other)
c = self._contains(other)
b = tfn[c]
if b is None:
# x in y must evaluate to T or F; to entertain a None
# result with Set use y.contains(x)
raise TypeError('did not evaluate to a bool: %r' % c)
return b
class ProductSet(Set):
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
if len(sets) == 1 and iterable(sets[0]) and not isinstance(sets[0], (Set, set)):
SymPyDeprecationWarning(
feature="ProductSet(iterable)",
useinstead="ProductSet(*iterable)",
issue=17557,
deprecated_since_version="1.5"
).warn()
sets = tuple(sets[0])
sets = [sympify(s) for s in sets]
if not all(isinstance(s, Set) for s in sets):
raise TypeError("Arguments to ProductSet should be of type Set")
# Nullary product of sets is *not* the empty set
if len(sets) == 0:
return FiniteSet(())
if S.EmptySet in sets:
return S.EmptySet
return Basic.__new__(cls, *sets, **assumptions)
@property
def sets(self):
return self.args
def flatten(self):
def _flatten(sets):
for s in sets:
if s.is_ProductSet:
yield from _flatten(s.sets)
else:
yield s
return ProductSet(*_flatten(self.sets))
def _contains(self, element):
if element.is_Symbol:
return None
if not isinstance(element, Tuple) or len(element) != len(self.sets):
return False
return fuzzy_and(s._contains(e) for s, e in zip(self.sets, element))
def as_relational(self, *symbols):
symbols = [_sympify(s) for s in symbols]
if len(symbols) != len(self.sets) or not all(
i.is_Symbol for i in symbols):
raise ValueError(
'number of symbols must match the number of sets')
return And(*[s.as_relational(i) for s, i in zip(self.sets, symbols)])
@property
def _boundary(self):
return Union(*(ProductSet(*(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets)))
for i, a in enumerate(self.sets)))
@property
def is_iterable(self):
return all(set.is_iterable for set in self.sets)
def __iter__(self):
return iproduct(*self.sets)
@property
def is_empty(self):
return fuzzy_or(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def _measure(self):
measure = 1
for s in self.sets:
measure *= s.measure
return measure
def __len__(self):
return reduce(lambda a, b: a*b, (len(s) for s in self.args))
def __bool__(self):
return all(self.sets)
class Interval(Set):
is_Interval = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
# Only allow real intervals
if fuzzy_not(fuzzy_and(i.is_extended_real for i in (start, end, end-start))):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if is_lt(end, start):
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
if start is S.Infinity or start is S.NegativeInfinity:
return S.EmptySet
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start is S.NegativeInfinity:
left_open = true
if end is S.Infinity:
right_open = true
if start == S.Infinity or end == S.NegativeInfinity:
return S.EmptySet
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
return self._args[0]
@property
def end(self):
return self._args[1]
@property
def left_open(self):
return self._args[2]
@property
def right_open(self):
return self._args[3]
@classmethod
def open(cls, a, b):
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
return cls(a, b, False, True)
@property
def _inf(self):
return self.start
@property
def _sup(self):
return self.end
@property
def left(self):
return self.start
@property
def right(self):
return self.end
@property
def is_empty(self):
if self.left_open or self.right_open:
cond = self.start >= self.end # One/both bounds open
else:
cond = self.start > self.end # Both bounds closed
return fuzzy_bool(cond)
@property
def is_finite_set(self):
return self.measure.is_zero
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
@property
def _boundary(self):
finite_points = [p for p in (self.start, self.end)
if abs(p) != S.Infinity]
return FiniteSet(*finite_points)
def _contains(self, other):
if (not isinstance(other, Expr) or other is S.NaN
or other.is_real is False or other.has(S.ComplexInfinity)):
# if an expression has zoo it will be zoo or nan
# and neither of those is real
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if other.is_real is not None:
return other.is_real
d = Dummy()
return self.as_relational(d).subs(d, other)
def as_relational(self, x):
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._evalf(prec), self.right._evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
return self.right is S.Infinity or self.right == Float("+inf")
def _eval_Eq(self, other):
if not isinstance(other, Interval):
if isinstance(other, FiniteSet):
return false
elif isinstance(other, Set):
return None
return false
class Union(Set, LatticeOp):
is_Union = True
@property
def identity(self):
return S.EmptySet
@property
def zero(self):
return S.UniversalSet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
# flatten inputs to merge intersections and iterables
args = _sympify(args)
# Reduce sets using known rules
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_union(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
@property
def is_empty(self):
return fuzzy_and(set.is_empty for set in self.args)
@property
def is_finite_set(self):
return fuzzy_and(set.is_finite_set for set in self.args)
@property
def _measure(self):
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
measure += parity * sum(inter.measure for sos, inter in sets)
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
sos_list = []
sets_list = []
for _set in sets:
if _set[0] in sos_list:
continue
else:
sos_list.append(_set[0])
sets_list.append(_set)
sets = sets_list
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(*map(boundary_of_set, range(len(self.args))))
def _contains(self, other):
return Or(*[s.contains(other) for s in self.args])
def is_subset(self, other):
return fuzzy_and(s.is_subset(other) for s in self.args)
def as_relational(self, symbol):
if (len(self.args) == 2 and
all(isinstance(i, Interval) for i in self.args)):
a, b = self.args
if (a.sup == b.inf and
not any(a.sup in i for i in self.args)):
return And(Ne(symbol, a.sup), symbol < b.sup, symbol > a.inf)
return Or(*[i.as_relational(symbol) for i in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def __iter__(self):
return roundrobin(*(iter(arg) for arg in self.args))
class Intersection(Set, LatticeOp):
is_Intersection = True
@property
def identity(self):
return S.UniversalSet
@property
def zero(self):
return S.EmptySet
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
args = list(ordered(set(_sympify(args))))
if evaluate:
args = list(cls._new_args_filter(args))
return simplify_intersection(args)
args = list(ordered(args, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._argset = frozenset(args)
return obj
@property
def args(self):
return self._args
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def is_finite_set(self):
if fuzzy_or(arg.is_finite_set for arg in self.args):
return True
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _contains(self, other):
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
sets_sift = sift(self.args, lambda x: x.is_iterable)
completed = False
candidates = sets_sift[True] + sets_sift[None]
finite_candidates, others = [], []
for candidate in candidates:
length = None
try:
length = len(candidate)
except TypeError:
others.append(candidate)
if length is not None:
finite_candidates.append(candidate)
finite_candidates.sort(key=len)
for s in finite_candidates + others:
other_sets = set(self.args) - {s}
other = Intersection(*other_sets, evaluate=False)
completed = True
for x in s:
try:
if x in other:
yield x
except TypeError:
completed = False
if completed:
return
if not completed:
if not candidates:
raise TypeError("None of the constituent sets are iterable")
raise TypeError(
"The computation had not completed because of the "
"undecidable set membership is found in every candidates.")
@staticmethod
def _handle_finite_sets(args):
fs_args, others = sift(args, lambda x: x.is_FiniteSet, binary=True)
if not fs_args:
return
fs_sets = [set(fs) for fs in fs_args]
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
definite = set()
for e in all_elements:
inall = fuzzy_and(s.contains(e) for s in args)
if inall is True:
definite.add(e)
if inall is not None:
for s in fs_sets:
s.discard(e)
# members of others. We might have {m, n}, {m}, and Reals where we
# don't know if m or n is real. We want to remove n here but it is
fs_elements = reduce(lambda a, b: a | b, fs_sets, set())
fs_symsets = [FiniteSet(*s) for s in fs_sets]
while fs_elements:
for e in fs_elements:
infs = fuzzy_and(s.contains(e) for s in fs_symsets)
if infs is True:
definite.add(e)
if infs is not None:
for n, s in enumerate(fs_sets):
if e in s:
s.remove(e)
fs_symsets[n] = FiniteSet(*s)
fs_elements.remove(e)
break
else:
break
if not all(fs_sets):
fs_sets = [set()]
if definite:
fs_sets = [fs | definite for fs in fs_sets]
if fs_sets == [set()]:
return S.EmptySet
sets = [FiniteSet(*s) for s in fs_sets]
all_elements = reduce(lambda a, b: a | b, fs_sets, set())
is_redundant = lambda o: all(fuzzy_bool(o.contains(e)) for e in all_elements)
others = [o for o in others if not is_redundant(o)]
if others:
rest = Intersection(*others)
# XXX: Maybe this shortcut should be at the beginning. For large
# FiniteSets it could much more efficient to process the other
# sets first...
if rest is S.EmptySet:
return S.EmptySet
# Flatten the Intersection
if rest.is_Intersection:
sets.extend(rest.args)
else:
sets.append(rest)
if len(sets) == 1:
return sets[0]
else:
return Intersection(*sets, evaluate=False)
def as_relational(self, symbol):
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set):
is_Complement = True
def __new__(cls, a, b, evaluate=True):
a, b = map(_sympify, (a, b))
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
if B == S.UniversalSet or A.is_subset(B):
return S.EmptySet
if isinstance(B, Union):
return Intersection(*(s.complement(A) for s in B.args))
result = B._complement(A)
if result is not None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
def as_relational(self, symbol):
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = Not(B.as_relational(symbol))
return And(A_rel, B_rel)
@property
def is_iterable(self):
if self.args[0].is_iterable:
return True
@property
def is_finite_set(self):
A, B = self.args
a_finite = A.is_finite_set
if a_finite is True:
return True
elif a_finite is False and B.is_finite_set:
return False
def __iter__(self):
A, B = self.args
for a in A:
if a not in B:
yield a
else:
continue
class EmptySet(Set, metaclass=Singleton):
is_empty = True
is_finite_set = True
is_FiniteSet = True
@property # type: ignore
@deprecated(useinstead="is S.EmptySet or is_empty",
issue=16946, deprecated_since_version="1.5")
def is_EmptySet(self):
return True
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return false
def __len__(self):
return 0
def __iter__(self):
return iter([])
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(Set, metaclass=Singleton):
is_UniversalSet = True
is_empty = False
is_finite_set = False
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return true
@property
def _boundary(self):
return S.EmptySet
class FiniteSet(Set):
is_FiniteSet = True
is_iterable = True
is_empty = False
is_finite_set = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_parameters.evaluate)
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return S.EmptySet
else:
args = list(map(sympify, args))
# keep the form of the first canonical arg
dargs = {}
for i in reversed(list(ordered(args))):
if i.is_Symbol:
dargs[i] = i
else:
try:
dargs[i.as_dummy()] = i
except TypeError:
# e.g. i = class without args like `Interval`
dargs[i] = i
_args_set = set(dargs.values())
args = list(ordered(_args_set, Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._args_set = _args_set
return obj
def __iter__(self):
return iter(self.args)
def _complement(self, other):
if isinstance(other, Interval):
# Splitting in sub-intervals is only done for S.Reals;
# other cases that need splitting will first pass through
# Set._complement().
nums, syms = [], []
for m in self.args:
if m.is_number and m.is_real:
nums.append(m)
elif m.is_real == False:
pass # drop non-reals
else:
syms.append(m) # various symbolic expressions
if other == S.Reals and nums != []:
nums.sort()
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(*intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(*intervals, evaluate=False)
elif nums == []: # no splitting necessary or possible:
if syms:
return Complement(other, FiniteSet(*syms), evaluate=False)
else:
return other
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _contains(self, other):
if other in self._args_set:
return True
else:
# evaluate=True is needed to override evaluate=False context;
# we need Eq to do the evaluation
return fuzzy_or(fuzzy_bool(Eq(e, other, evaluate=True))
for e in self.args)
def _eval_is_subset(self, other):
return fuzzy_and(other._contains(e) for e in self.args)
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return FiniteSet(*[elem.evalf(n=dps) for elem in self])
def _eval_simplify(self, **kwargs):
from sympy.simplify import simplify
return FiniteSet(*[simplify(elem, **kwargs) for elem in self])
@property
def _sorted_args(self):
return self.args
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def _eval_rewrite_as_PowerSet(self, *args, **kwargs):
from .powerset import PowerSet
is2pow = lambda n: bool(n and not n & (n - 1))
if not is2pow(len(self)):
return None
fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet
if not all(fs_test(arg) for arg in args):
return None
biggest = max(args, key=len)
for arg in subsets(biggest.args):
arg_set = FiniteSet(*arg)
if arg_set not in args:
return None
return PowerSet(biggest)
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
converter[set] = lambda x: FiniteSet(*x)
converter[frozenset] = lambda x: FiniteSet(*x)
class SymmetricDifference(Set):
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def as_relational(self, symbol):
A, B = self.args
A_rel = A.as_relational(symbol)
B_rel = B.as_relational(symbol)
return Xor(A_rel, B_rel)
@property
def is_iterable(self):
if all(arg.is_iterable for arg in self.args):
return True
def __iter__(self):
args = self.args
union = roundrobin(*(iter(arg) for arg in args))
for item in union:
count = 0
for s in args:
if item in s:
count += 1
if count % 2 == 1:
yield item
class DisjointUnion(Set):
def __new__(cls, *sets):
dj_collection = []
for set_i in sets:
if isinstance(set_i, Set):
dj_collection.append(set_i)
else:
raise TypeError("Invalid input: '%s', input args \
to DisjointUnion must be Sets" % set_i)
obj = Basic.__new__(cls, *dj_collection)
return obj
@property
def sets(self):
return self.args
@property
def is_empty(self):
return fuzzy_and(s.is_empty for s in self.sets)
@property
def is_finite_set(self):
all_finite = fuzzy_and(s.is_finite_set for s in self.sets)
return fuzzy_or([self.is_empty, all_finite])
@property
def is_iterable(self):
if self.is_empty:
return False
iter_flag = True
for set_i in self.sets:
if not set_i.is_empty:
iter_flag = iter_flag and set_i.is_iterable
return iter_flag
def _eval_rewrite_as_Union(self, *sets):
dj_union = S.EmptySet
index = 0
for set_i in sets:
if isinstance(set_i, Set):
cross = ProductSet(set_i, FiniteSet(index))
dj_union = Union(dj_union, cross)
index = index + 1
return dj_union
def _contains(self, element):
if not isinstance(element, Tuple) or len(element) != 2:
return False
if not element[1].is_Integer:
return False
if element[1] >= len(self.sets) or element[1] < 0:
return False
return element[0] in self.sets[element[1]]
def __iter__(self):
if self.is_iterable:
from sympy.core.numbers import Integer
iters = []
for i, s in enumerate(self.sets):
iters.append(iproduct(s, {Integer(i)}))
return iter(roundrobin(*iters))
else:
raise ValueError("'%s' is not iterable." % self)
def __len__(self):
if self.is_finite_set:
size = 0
for set in self.sets:
size += len(set)
return size
else:
raise ValueError("'%s' is not a finite set." % self)
def imageset(*args):
from sympy.core.function import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.sets.setexpr import set_function
if len(args) < 2:
raise ValueError('imageset expects at least 2 args, got: %s' % len(args))
if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:
f = Lambda(args[0], args[1])
set_list = args[2:]
else:
f = args[0]
set_list = args[1:]
if isinstance(f, Lambda):
pass
elif callable(f):
nargs = getattr(f, 'nargs', {})
if nargs:
if len(nargs) != 1:
raise NotImplementedError(filldedent('''
This function can take more than 1 arg
but the potentially complicated set input
has not been analyzed at this point to
know its dimensions. TODO
'''))
N = nargs.args[0]
if N == 1:
s = 'x'
else:
s = [Symbol('x%i' % i) for i in range(1, N + 1)]
else:
s = inspect.signature(f).parameters
dexpr = _sympify(f(*[Dummy() for i in s]))
var = tuple(uniquely_named_symbol(
Symbol(i), dexpr) for i in s)
f = Lambda(var, f(*var))
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass,
not \'%s\'.''' % func_name(f)))
if any(not isinstance(s, Set) for s in set_list):
name = [func_name(s) for s in set_list]
raise ValueError(
'arguments after mapping should be sets, not %s' % name)
if len(set_list) == 1:
set = set_list[0]
try:
# TypeError if arg count != set dimensions
r = set_function(f, set)
if r is None:
raise TypeError
if not r:
return r
except TypeError:
r = ImageSet(f, set)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
# XXX: Maybe this should just be:
# f2 = set.lambda
# fun = Lambda(f2.signature, f(*f2.expr))
# return imageset(fun, *set.base_sets)
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
x = set.lamda.variables[0]
y = f.variables[0]
return imageset(
Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)
if r is not None:
return r
return ImageSet(f, *set_list)
def is_function_invertible_in_set(func, setv):
from sympy.functions.elementary.exponential import (exp, log)
# Functions known to always be invertible:
if func in (exp, log):
return True
u = Dummy("u")
fdiff = func(u).diff(u)
# monotonous functions:
# TODO: check subsets (`func` in `setv`)
if (fdiff > 0) == True or (fdiff < 0) == True:
return True
# TODO: support more
return None
def simplify_union(args):
from sympy.sets.handlers.union import union_sets
# ===== Global Rules =====
if not args:
return S.EmptySet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = union_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = {new_set}
new_args = (args - {s, t}).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(*args, evaluate=False)
def simplify_intersection(args):
# ===== Global Rules =====
if not args:
return S.UniversalSet
for arg in args:
if not isinstance(arg, Set):
raise TypeError("Input args to Union must be Sets")
# If any EmptySets return EmptySet
if S.EmptySet in args:
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - {s}
if len(other_sets) > 0:
other = Intersection(*other_sets)
return Union(*(Intersection(arg, other) for arg in s.args))
else:
return Union(*[arg for arg in s.args])
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
from sympy.sets.handlers.intersection import intersection_sets
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while new_args:
for s in args:
new_args = False
for t in args - {s}:
new_set = intersection_sets(s, t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - {s, t}).union({new_set})
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(*args, evaluate=False)
def _handle_finite_sets(op, x, y, commutative):
# Handle finite sets:
fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True)
if len(fs_args) == 2:
return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]])
elif len(fs_args) == 1:
sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]]
return Union(*sets)
else:
return None
def _apply_operation(op, x, y, commutative):
from sympy.sets import ImageSet
from sympy.core.function import Lambda
d = Dummy('d')
out = _handle_finite_sets(op, x, y, commutative)
if out is None:
out = op(x, y)
if out is None and commutative:
out = op(y, x)
if out is None:
_x, _y = symbols("x y")
if isinstance(x, Set) and not isinstance(y, Set):
out = ImageSet(Lambda(d, op(d, y)), x).doit()
elif not isinstance(x, Set) and isinstance(y, Set):
out = ImageSet(Lambda(d, op(x, d)), y).doit()
else:
out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y)
return out
def set_add(x, y):
from sympy.sets.handlers.add import _set_add
return _apply_operation(_set_add, x, y, commutative=True)
def set_sub(x, y):
from sympy.sets.handlers.add import _set_sub
return _apply_operation(_set_sub, x, y, commutative=False)
def set_mul(x, y):
from sympy.sets.handlers.mul import _set_mul
return _apply_operation(_set_mul, x, y, commutative=True)
def set_div(x, y):
from sympy.sets.handlers.mul import _set_div
return _apply_operation(_set_div, x, y, commutative=False)
def set_pow(x, y):
from sympy.sets.handlers.power import _set_pow
return _apply_operation(_set_pow, x, y, commutative=False)
def set_function(f, x):
from sympy.sets.handlers.functions import _set_function
return _set_function(f, x)
| true | true |
f7fd1cc145f862634e937327d3fdb3afc0ebd5c5 | 841 | py | Python | main.py | ak64th/weather_record | 4724dad297244de6eca20b8944335efe086d2107 | [
"Unlicense"
] | null | null | null | main.py | ak64th/weather_record | 4724dad297244de6eca20b8944335efe086d2107 | [
"Unlicense"
] | null | null | null | main.py | ak64th/weather_record | 4724dad297244de6eca20b8944335efe086d2107 | [
"Unlicense"
] | null | null | null | # coding=utf-8
import logging
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor
from pytz import timezone
from task import get_weathers
from db import db
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('weather_record')
logger.addHandler(logging.StreamHandler())
executors = {'default': ThreadPoolExecutor(20)}
# 确保数据表已经创建
# 设置定时器,cron任务只需要放内存里。如果需要每隔一定时间执行任务,需要把任务放入数据库
scheduler = BlockingScheduler(logger=logger, executors=executors, timezone=timezone('Asia/Shanghai'))
scheduler.add_job(get_weathers, trigger='cron', hour='10')
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass | 32.346154 | 105 | 0.768133 |
import logging
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor
from pytz import timezone
from task import get_weathers
from db import db
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('weather_record')
logger.addHandler(logging.StreamHandler())
executors = {'default': ThreadPoolExecutor(20)}
scheduler = BlockingScheduler(logger=logger, executors=executors, timezone=timezone('Asia/Shanghai'))
scheduler.add_job(get_weathers, trigger='cron', hour='10')
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass | true | true |
f7fd1e37f1fb07cbe6b06acddff1f9f7c6fa3e6e | 110,531 | py | Python | elastalert/alerts.py | archfz/elastalert2 | ec6160b77a3d891675e3cea4b3b58ead1a13772e | [
"Apache-2.0"
] | null | null | null | elastalert/alerts.py | archfz/elastalert2 | ec6160b77a3d891675e3cea4b3b58ead1a13772e | [
"Apache-2.0"
] | null | null | null | elastalert/alerts.py | archfz/elastalert2 | ec6160b77a3d891675e3cea4b3b58ead1a13772e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
import datetime
import json
import os
import subprocess
import sys
import time
import uuid
import warnings
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto3
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.auth import HTTPProxyAuth
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client as TwilioClient
from .util import EAException
from .util import elastalert_logger
from .util import lookup_es_key
from .util import pretty_ts
from .util import resolve_string
from .util import ts_now
from .util import ts_to_dt
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
alert_text = str(self.rule.get('alert_text', ''))
if 'alert_text_jinja' == self.rule.get('alert_text_type'):
# Top fields are accessible via `{{field_name}}` or `{{jinja_root_name['field_name']}}`
# `jinja_root_name` dict is useful when accessing *fields with dots in their keys*,
# as Jinja treat dot as a nested field.
alert_text = self.rule.get("jinja_template").render(**self.match,
**{self.rule['jinja_root_name']: self.match})
elif 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i, text_value in enumerate(alert_text_values):
if text_value is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in list(self.rule.get('alert_text_kw').items()):
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in list(self.match.items()):
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = list(counts.items())
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = list(self.match.items())
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = str(value)
value_str.replace('\\n', '\n')
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only' and self.rule.get('alert_text_type') != 'alert_text_jinja':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in list(self.match.items()) if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = '{{code}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().items():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = str(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = str(self.rule['alert_subject'])
alert_subject_max_len = int(self.rule.get('alert_subject_max_len', 2048))
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i, subject_value in enumerate(alert_subject_values):
if subject_value is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
alert_subject_values = [missing if val is None else val for val in alert_subject_values]
alert_subject = alert_subject.format(*alert_subject_values)
if len(alert_subject) > alert_subject_max_len:
alert_subject = alert_subject[:alert_subject_max_len]
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
if self.rule.get('alert_text_type') != 'aggregation_summary_only':
for match in matches:
body += str(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text__maximum_width(self):
"""Get maximum width allowed for summary text."""
return 80
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
text = self.rule.get('summary_prefix', '')
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(
summary_table_fields_with_count
)
text_table = Texttable(max_width=self.get_aggregation_summary_text__maximum_width())
text_table.header(summary_table_fields_with_count)
# Format all fields as 'text' to avoid long numbers being shown as scientific notation
text_table.set_cols_dtype(['t' for i in summary_table_fields_with_count])
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([str(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.items():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
text += self.rule.get('summary_prefix', '')
return str(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
""" Gets the username and password from an account file.
:param account_file: Path to the file which contains user and password information.
It can be either an absolute file path or one that is relative to the given rule.
"""
if os.path.isabs(account_file):
account_file_path = account_file
else:
account_file_path = os.path.join(os.path.dirname(self.rule['rule_file']), account_file)
account_conf = yaml_loader(account_file_path)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
""" The stomp alerter publishes alerts via stomp to a broker. """
required_options = frozenset(
['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk is not None:
resmatch = lookup_es_key(match, qk)
else:
resmatch = None
if resmatch is not None:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key(
match, self.rule['timestamp_field']))
)
fullmessage['match'] = resmatch
else:
elastalert_logger.info('Rule %s generated an alert at %s:' % (
self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'Rule %s generated an alert at %s:' % (self.rule['name'], lookup_es_key(
match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(
match, self.rule['timestamp_field'])
elastalert_logger.info(str(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['rule_file'] = self.rule['rule_file']
fullmessage['matching'] = str(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now(
).strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
fullmessage['matches'] = matches
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get(
'stomp_destination', '/queue/ALERT')
self.stomp_ssl = self.rule.get('stomp_ssl', False)
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)], use_ssl=self.stomp_ssl)
conn.connect(self.stomp_login, self.stomp_password)
# Ensures that the CONNECTED frame is received otherwise, the disconnect call will fail.
time.sleep(1)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(str(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.assets_dir = self.rule.get('assets_dir', '/tmp')
self.images_dictionary = dict(zip(self.rule.get('email_image_keys', []), self.rule.get('email_image_values', [])))
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port', 25)
if self.rule.get('smtp_auth_file'):
self.get_account(self.rule['smtp_auth_file'])
self.smtp_key_file = self.rule.get('smtp_key_file')
self.smtp_cert_file = self.rule.get('smtp_cert_file')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], str):
self.rule['email'] = [self.rule['email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('cc')
if cc and isinstance(cc, str):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, str):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, str):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'email_add_domain' in self.rule:
to_addr = [name + self.rule['email_add_domain'] for name in to_addr]
if self.rule.get('email_format') == 'html':
# email_msg = MIMEText(body, 'html', _charset='UTF-8') # old way
email_msg = MIMEMultipart()
msgText = MIMEText(body, 'html', _charset='UTF-8')
email_msg.attach(msgText) # Added, and edited the previous line
for image_key in self.images_dictionary:
fp = open(os.path.join(self.assets_dir, self.images_dictionary[image_key]), 'rb')
img = MIMEImage(fp.read())
fp.close()
img.add_header('Content-ID', '<{}>'.format(image_key))
email_msg.attach(img)
else:
email_msg = MIMEText(body, _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
if 'smtp_auth_file' in self.rule:
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.quit()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_after_inactivity',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_only',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_transition_to',
'jira_watchers',
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
# Deferred settings refer to values that can only be resolved when a match
# is found and as such loading them will be delayed until we find a match
self.deferred_settings = []
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', 0)
self.bump_only = self.rule.get('jira_bump_only', False)
self.transition = self.rule.get('jira_transition_to', False)
self.watchers = self.rule.get('jira_watchers')
self.client = None
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
elastalert_logger.warning(msg)
self.reset_jira_args()
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.jira_fields = self.client.fields()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024])).with_traceback(sys.exc_info()[2])
self.set_priority()
def set_priority(self):
try:
if self.priority is not None and self.client is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
elastalert_logger.error("Priority %s not found. Valid priorities are %s" % (self.priority, list(self.priority_ids.keys())))
def reset_jira_args(self):
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
self.set_priority()
def set_jira_arg(self, jira_field, value, fields):
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
# Handle arrays of simple types like strings or numbers
if arg_type == 'array':
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field['schema']['items']
# Simple string types
if array_items in ['string', 'date', 'datetime']:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{'name': v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ['string', 'date', 'datetime']:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
# Complex type
else:
self.jira_args[arg_name] = {'name': value}
def get_arbitrary_fields(self):
# Clear jira_args
self.reset_jira_args()
for jira_field, value in self.rule.items():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] != '#':
self.set_jira_arg(jira_field, value, self.jira_fields)
if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] == '#':
self.deferred_settings.append(jira_field)
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status for status
in self.bump_in_statuses]))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status
for status in self.bump_not_in_statuses]))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
elastalert_logger.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = str(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def transition_ticket(self, ticket):
transitions = self.client.transitions(ticket)
for t in transitions:
if t['name'] == self.transition:
self.client.transition_issue(ticket, t['id'])
def alert(self, matches):
# Reset arbitrary fields to pick up changes
self.get_arbitrary_fields()
if len(self.deferred_settings) > 0:
fields = self.client.fields()
for jira_field in self.deferred_settings:
value = lookup_es_key(matches[0], self.rule[jira_field][1:])
self.set_jira_arg(jira_field, value, fields)
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline['jira_ticket'] = None
self.pipeline['jira_server'] = self.server
return None
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
elastalert_logger.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.labels:
for label in self.labels:
try:
ticket.fields.labels.append(label)
except JIRAError as e:
elastalert_logger.exception("Error while appending labels to ticket %s: %s" % (ticket, e))
if self.transition:
elastalert_logger.info('Transitioning existing ticket %s' % (ticket.key))
try:
self.transition_ticket(ticket)
except JIRAError as e:
elastalert_logger.exception("Error while transitioning ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
if self.bump_only:
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(
watcher,
ex
)).with_traceback(sys.exc_info()[2])
except JIRAError as e:
raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
if self.rule.get('alert_text_type') != 'aggregation_summary_only':
for match in matches:
body += str(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = '{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
timestamp = matches[0].get(self.rule['timestamp_field'])
if timestamp:
title += ' - %s' % (pretty_ts(timestamp, self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], str):
self.shell = True
if '%' in self.rule['command']:
elastalert_logger.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
def alert(self, matches):
# Format the command and arguments
try:
command = [resolve_string(command_arg, matches[0]) for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json.encode())
elif self.rule.get('pipe_alert_text'):
alert_text = self.create_alert_body(matches)
stdout, stderr = subp.communicate(input=alert_text.encode())
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
""" Send alert using AWS SNS service """
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.sns_aws_access_key_id = self.rule.get('sns_aws_access_key_id')
self.sns_aws_secret_access_key = self.rule.get('sns_aws_secret_access_key')
self.sns_aws_region = self.rule.get('sns_aws_region', 'us-east-1')
self.profile = self.rule.get('boto_profile', None) # Deprecated
self.profile = self.rule.get('sns_aws_profile', None)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
if self.profile is None:
session = boto3.Session(
aws_access_key_id=self.sns_aws_access_key_id,
aws_secret_access_key=self.sns_aws_access_key_id,
region_name=self.sns_aws_region
)
else:
session = boto3.Session(profile_name=self.profile)
sns_client = session.client('sns')
sns_client.publish(
TopicArn=self.sns_topic_arn,
Message=body,
Subject=self.create_title(matches)
)
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class MsTeamsAlerter(Alerter):
""" Creates a Microsoft Teams Conversation Message for each alert """
required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary'])
def __init__(self, rule):
super(MsTeamsAlerter, self).__init__(rule)
self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url']
if isinstance(self.ms_teams_webhook_url, str):
self.ms_teams_webhook_url = [self.ms_teams_webhook_url]
self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None)
self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message')
self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False)
self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '')
def format_body(self, body):
if self.ms_teams_alert_fixed_width:
body = body.replace('`', "'")
body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to Teams
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None
payload = {
'@type': 'MessageCard',
'@context': 'http://schema.org/extensions',
'summary': self.ms_teams_alert_summary,
'title': self.create_title(matches),
'text': body
}
if self.ms_teams_theme_color != '':
payload['themeColor'] = self.ms_teams_theme_color
for url in self.ms_teams_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ms teams: %s" % e)
elastalert_logger.info("Alert sent to MS Teams")
def get_info(self):
return {'type': 'ms_teams',
'ms_teams_webhook_url': self.ms_teams_webhook_url}
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, str):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
if isinstance(self.slack_channel_override, str):
self.slack_channel_override = [self.slack_channel_override]
self.slack_title_link = self.rule.get('slack_title_link', '')
self.slack_title = self.rule.get('slack_title', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
self.slack_alert_fields = self.rule.get('slack_alert_fields', '')
self.slack_ignore_ssl_errors = self.rule.get('slack_ignore_ssl_errors', False)
self.slack_timeout = self.rule.get('slack_timeout', 10)
self.slack_ca_certs = self.rule.get('slack_ca_certs')
self.slack_attach_kibana_discover_url = self.rule.get('slack_attach_kibana_discover_url', False)
self.slack_kibana_discover_color = self.rule.get('slack_kibana_discover_color', '#ec4b98')
self.slack_kibana_discover_title = self.rule.get('slack_kibana_discover_title', 'Discover in Kibana')
self.slack_footer = self.rule.get('slack_footer', '')
self.slack_footer_icon = self.rule.get('slack_footer_icon', '')
self.slack_image_url = self.rule.get('slack_image_url', '')
self.slack_thumb_url = self.rule.get('slack_thumb_url', '')
self.slack_author_name = self.rule.get('slack_author_name', '')
self.slack_author_link = self.rule.get('slack_author_link', '')
self.slack_author_icon = self.rule.get('slack_author_icon', '')
self.slack_msg_pretext = self.rule.get('slack_msg_pretext', '')
def format_body(self, body):
# https://api.slack.com/docs/formatting
return body
def get_aggregation_summary_text__maximum_width(self):
width = super(SlackAlerter, self).get_aggregation_summary_text__maximum_width()
# Reduced maximum width for prettier Slack display.
return min(width, 75)
def get_aggregation_summary_text(self, matches):
text = super(SlackAlerter, self).get_aggregation_summary_text(matches)
if text:
text = '```\n{0}```\n'.format(text)
return text
def populate_fields(self, matches):
alert_fields = []
for arg in self.slack_alert_fields:
arg = copy.copy(arg)
arg['value'] = lookup_es_key(matches[0], arg['value'])
alert_fields.append(arg)
return alert_fields
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'mrkdwn_in': ['text', 'pretext'],
'fields': []
}
]
}
# if we have defined fields, populate noteable fields for the alert
if self.slack_alert_fields != '':
payload['attachments'][0]['fields'] = self.populate_fields(matches)
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
if self.slack_title != '':
payload['attachments'][0]['title'] = self.slack_title
if self.slack_title_link != '':
payload['attachments'][0]['title_link'] = self.slack_title_link
if self.slack_footer != '':
payload['attachments'][0]['footer'] = self.slack_footer
if self.slack_footer_icon != '':
payload['attachments'][0]['footer_icon'] = self.slack_footer_icon
if self.slack_image_url != '':
payload['attachments'][0]['image_url'] = self.slack_image_url
if self.slack_thumb_url != '':
payload['attachments'][0]['thumb_url'] = self.slack_thumb_url
if self.slack_author_name != '':
payload['attachments'][0]['author_name'] = self.slack_author_name
if self.slack_author_link != '':
payload['attachments'][0]['author_link'] = self.slack_author_link
if self.slack_author_icon != '':
payload['attachments'][0]['author_icon'] = self.slack_author_icon
if self.slack_msg_pretext != '':
payload['attachments'][0]['pretext'] = self.slack_msg_pretext
if self.slack_attach_kibana_discover_url:
kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url')
if kibana_discover_url:
payload['attachments'].append({
'color': self.slack_kibana_discover_color,
'title': self.slack_kibana_discover_title,
'title_link': kibana_discover_url
})
for url in self.slack_webhook_url:
for channel_override in self.slack_channel_override:
try:
if self.slack_ca_certs:
verify = self.slack_ca_certs
else:
verify = not self.slack_ignore_ssl_errors
if self.slack_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
payload['channel'] = channel_override
response = requests.post(
url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, verify=verify,
proxies=proxies,
timeout=self.slack_timeout)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert '%s' sent to Slack" % self.rule['name'])
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override}
class MattermostAlerter(Alerter):
""" Creates a Mattermsot post for each alert """
required_options = frozenset(['mattermost_webhook_url'])
def __init__(self, rule):
super(MattermostAlerter, self).__init__(rule)
# HTTP config
self.mattermost_webhook_url = self.rule['mattermost_webhook_url']
if isinstance(self.mattermost_webhook_url, str):
self.mattermost_webhook_url = [self.mattermost_webhook_url]
self.mattermost_proxy = self.rule.get('mattermost_proxy', None)
self.mattermost_ignore_ssl_errors = self.rule.get('mattermost_ignore_ssl_errors', False)
# Override webhook config
self.mattermost_username_override = self.rule.get('mattermost_username_override', 'elastalert')
self.mattermost_channel_override = self.rule.get('mattermost_channel_override', '')
self.mattermost_icon_url_override = self.rule.get('mattermost_icon_url_override', '')
# Message properties
self.mattermost_msg_pretext = self.rule.get('mattermost_msg_pretext', '')
self.mattermost_msg_color = self.rule.get('mattermost_msg_color', 'danger')
self.mattermost_msg_fields = self.rule.get('mattermost_msg_fields', '')
self.mattermost_image_url = self.rule.get('mattermost_image_url', '')
self.mattermost_title_link = self.rule.get('mattermost_title_link', '')
self.mattermost_footer = self.rule.get('mattermost_footer', '')
self.mattermost_footer_icon = self.rule.get('mattermost_footer_icon', '')
self.mattermost_image_url = self.rule.get('mattermost_image_url', '')
self.mattermost_thumb_url = self.rule.get('mattermost_thumb_url', '')
self.mattermost_author_name = self.rule.get('mattermost_author_name', '')
self.mattermost_author_link = self.rule.get('mattermost_author_link', '')
self.mattermost_author_icon = self.rule.get('mattermost_author_icon', '')
def get_aggregation_summary_text__maximum_width(self):
width = super(MattermostAlerter, self).get_aggregation_summary_text__maximum_width()
# Reduced maximum width for prettier Mattermost display.
return min(width, 75)
def get_aggregation_summary_text(self, matches):
text = super(MattermostAlerter, self).get_aggregation_summary_text(matches)
if text:
text = '```\n{0}```\n'.format(text)
return text
def populate_fields(self, matches):
alert_fields = []
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
for field in self.mattermost_msg_fields:
field = copy.copy(field)
if 'args' in field:
args_values = [lookup_es_key(matches[0], arg) or missing for arg in field['args']]
if 'value' in field:
field['value'] = field['value'].format(*args_values)
else:
field['value'] = "\n".join(str(arg) for arg in args_values)
del(field['args'])
alert_fields.append(field)
return alert_fields
def alert(self, matches):
body = self.create_alert_body(matches)
title = self.create_title(matches)
# post to mattermost
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.mattermost_proxy} if self.mattermost_proxy else None
payload = {
'attachments': [
{
'fallback': "{0}: {1}".format(title, self.mattermost_msg_pretext),
'color': self.mattermost_msg_color,
'title': title,
'pretext': self.mattermost_msg_pretext,
'fields': []
}
]
}
if self.rule.get('alert_text_type') == 'alert_text_only':
payload['attachments'][0]['text'] = body
else:
payload['text'] = body
if self.mattermost_msg_fields != '':
payload['attachments'][0]['fields'] = self.populate_fields(matches)
if self.mattermost_icon_url_override != '':
payload['icon_url'] = self.mattermost_icon_url_override
if self.mattermost_username_override != '':
payload['username'] = self.mattermost_username_override
if self.mattermost_channel_override != '':
payload['channel'] = self.mattermost_channel_override
if self.mattermost_title_link != '':
payload['attachments'][0]['title_link'] = self.mattermost_title_link
if self.mattermost_footer != '':
payload['attachments'][0]['footer'] = self.mattermost_footer
if self.mattermost_footer_icon != '':
payload['attachments'][0]['footer_icon'] = self.mattermost_footer_icon
if self.mattermost_image_url != '':
payload['attachments'][0]['image_url'] = self.mattermost_image_url
if self.mattermost_thumb_url != '':
payload['attachments'][0]['thumb_url'] = self.mattermost_thumb_url
if self.mattermost_author_name != '':
payload['attachments'][0]['author_name'] = self.mattermost_author_name
if self.mattermost_author_link != '':
payload['attachments'][0]['author_link'] = self.mattermost_author_link
if self.mattermost_author_icon != '':
payload['attachments'][0]['author_icon'] = self.mattermost_author_icon
for url in self.mattermost_webhook_url:
try:
if self.mattermost_ignore_ssl_errors:
requests.urllib3.disable_warnings()
response = requests.post(
url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, verify=not self.mattermost_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Mattermost: %s" % e)
elastalert_logger.info("Alert sent to Mattermost")
def get_info(self):
return {'type': 'mattermost',
'mattermost_username_override': self.mattermost_username_override,
'mattermost_webhook_url': self.mattermost_webhook_url}
class PagerDutyAlerter(Alerter):
""" Create an incident on PagerDuty for each alert """
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None)
self.pagerduty_event_type = self.rule.get('pagerduty_event_type', 'trigger')
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.pagerduty_api_version = self.rule.get('pagerduty_api_version', 'v1')
self.pagerduty_v2_payload_class = self.rule.get('pagerduty_v2_payload_class', '')
self.pagerduty_v2_payload_class_args = self.rule.get('pagerduty_v2_payload_class_args', None)
self.pagerduty_v2_payload_component = self.rule.get('pagerduty_v2_payload_component', '')
self.pagerduty_v2_payload_component_args = self.rule.get('pagerduty_v2_payload_component_args', None)
self.pagerduty_v2_payload_group = self.rule.get('pagerduty_v2_payload_group', '')
self.pagerduty_v2_payload_group_args = self.rule.get('pagerduty_v2_payload_group_args', None)
self.pagerduty_v2_payload_severity = self.rule.get('pagerduty_v2_payload_severity', 'critical')
self.pagerduty_v2_payload_source = self.rule.get('pagerduty_v2_payload_source', 'ElastAlert')
self.pagerduty_v2_payload_source_args = self.rule.get('pagerduty_v2_payload_source_args', None)
self.pagerduty_v2_payload_custom_details = self.rule.get('pagerduty_v2_payload_custom_details', {})
self.pagerduty_v2_payload_include_all_info = self.rule.get('pagerduty_v2_payload_include_all_info', True)
if self.pagerduty_api_version == 'v2':
self.url = 'https://events.pagerduty.com/v2/enqueue'
else:
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
# post to pagerduty
headers = {'content-type': 'application/json'}
if self.pagerduty_api_version == 'v2':
custom_details_payload = {'information': body} if self.pagerduty_v2_payload_include_all_info else {}
if self.pagerduty_v2_payload_custom_details:
for match in matches:
for custom_details_key, es_key in list(self.pagerduty_v2_payload_custom_details.items()):
custom_details_payload[custom_details_key] = lookup_es_key(match, es_key)
payload = {
'routing_key': self.pagerduty_service_key,
'event_action': self.pagerduty_event_type,
'dedup_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'payload': {
'class': self.resolve_formatted_key(self.pagerduty_v2_payload_class,
self.pagerduty_v2_payload_class_args,
matches),
'component': self.resolve_formatted_key(self.pagerduty_v2_payload_component,
self.pagerduty_v2_payload_component_args,
matches),
'group': self.resolve_formatted_key(self.pagerduty_v2_payload_group,
self.pagerduty_v2_payload_group_args,
matches),
'severity': self.pagerduty_v2_payload_severity,
'source': self.resolve_formatted_key(self.pagerduty_v2_payload_source,
self.pagerduty_v2_payload_source_args,
matches),
'summary': self.create_title(matches),
'custom_details': custom_details_payload,
},
}
match_timestamp = lookup_es_key(matches[0], self.rule.get('timestamp_field', '@timestamp'))
if match_timestamp:
payload['payload']['timestamp'] = match_timestamp
else:
payload = {
'service_key': self.pagerduty_service_key,
'description': self.create_title(matches),
'event_type': self.pagerduty_event_type,
'incident_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'details': {
"information": body,
},
}
# set https proxy, if it was provided
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(
self.url,
data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False),
headers=headers,
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
if self.pagerduty_event_type == 'trigger':
elastalert_logger.info("Trigger sent to PagerDuty")
elif self.pagerduty_event_type == 'resolve':
elastalert_logger.info("Resolve sent to PagerDuty")
elif self.pagerduty_event_type == 'acknowledge':
elastalert_logger.info("acknowledge sent to PagerDuty")
def resolve_formatted_key(self, key, args, matches):
if args:
key_values = [lookup_es_key(matches[0], arg) for arg in args]
# Populate values with rule level properties too
for i in range(len(key_values)):
if key_values[i] is None:
key_value = self.rule.get(args[i])
if key_value:
key_values[i] = key_value
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
key_values = [missing if val is None else val for val in key_values]
return key.format(*key_values)
else:
return key
def get_incident_key(self, matches):
if self.pagerduty_incident_key_args:
incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args]
# Populate values with rule level properties too
for i in range(len(incident_key_values)):
if incident_key_values[i] is None:
key_value = self.rule.get(self.pagerduty_incident_key_args[i])
if key_value:
incident_key_values[i] = key_value
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
incident_key_values = [missing if val is None else val for val in incident_key_values]
return self.pagerduty_incident_key.format(*incident_key_values)
else:
return self.pagerduty_incident_key
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class PagerTreeAlerter(Alerter):
""" Creates a PagerTree Incident for each alert """
required_options = frozenset(['pagertree_integration_url'])
def __init__(self, rule):
super(PagerTreeAlerter, self).__init__(rule)
self.url = self.rule['pagertree_integration_url']
self.pagertree_proxy = self.rule.get('pagertree_proxy', None)
def alert(self, matches):
# post to pagertree
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.pagertree_proxy} if self.pagertree_proxy else None
payload = {
"event_type": "create",
"Id": str(uuid.uuid4()),
"Title": self.create_title(matches),
"Description": self.create_alert_body(matches)
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to PagerTree: %s" % e)
elastalert_logger.info("Trigger sent to PagerTree")
def get_info(self):
return {'type': 'pagertree',
'pagertree_integration_url': self.url}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except RequestException:
raise EAException("Error posting to Exotel").with_traceback(sys.exc_info()[2])
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_account_sid = self.rule['twilio_account_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule.get('twilio_from_number')
self.twilio_message_service_sid = self.rule.get('twilio_message_service_sid')
self.twilio_use_copilot = self.rule.get('twilio_use_copilot', False)
def alert(self, matches):
client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token)
try:
if self.twilio_use_copilot:
if self.twilio_message_service_sid is None:
raise EAException("Twilio Copilot requires the 'twilio_message_service_sid' option")
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
messaging_service_sid=self.twilio_message_service_sid)
else:
if self.twilio_from_number is None:
raise EAException("Twilio SMS requires the 'twilio_from_number' option")
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_from_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
""" Creates a VictorOps Incident for each alert """
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_id = self.rule.get('victorops_entity_id', None)
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# post to victorops
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
if self.victorops_entity_id:
payload["entity_id"] = self.victorops_entity_id
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
""" Send a Telegram message via bot api for each alert """
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
self.telegram_proxy_login = self.rule.get('telegram_proxy_login', None)
self.telegram_proxy_password = self.rule.get('telegram_proxy_pass', None)
def alert(self, matches):
body = '⚠ *%s* ⚠ ```\n' % (self.create_title(matches))
for match in matches:
body += str(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
if len(body) > 4095:
body = body[0:4000] + "\n⚠ *message was cropped according to telegram limits!* ⚠"
body += ' ```'
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
auth = HTTPProxyAuth(self.telegram_proxy_login, self.telegram_proxy_password) if self.telegram_proxy_login else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s. Details: %s" % (e, "" if e.response is None else e.response.text))
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GoogleChatAlerter(Alerter):
""" Send a notification via Google Chat webhooks """
required_options = frozenset(['googlechat_webhook_url'])
def __init__(self, rule):
super(GoogleChatAlerter, self).__init__(rule)
self.googlechat_webhook_url = self.rule['googlechat_webhook_url']
if isinstance(self.googlechat_webhook_url, str):
self.googlechat_webhook_url = [self.googlechat_webhook_url]
self.googlechat_format = self.rule.get('googlechat_format', 'basic')
self.googlechat_header_title = self.rule.get('googlechat_header_title', None)
self.googlechat_header_subtitle = self.rule.get('googlechat_header_subtitle', None)
self.googlechat_header_image = self.rule.get('googlechat_header_image', None)
self.googlechat_footer_kibanalink = self.rule.get('googlechat_footer_kibanalink', None)
def create_header(self):
header = None
if self.googlechat_header_title:
header = {
"title": self.googlechat_header_title,
"subtitle": self.googlechat_header_subtitle,
"imageUrl": self.googlechat_header_image
}
return header
def create_footer(self):
footer = None
if self.googlechat_footer_kibanalink:
footer = {"widgets": [{
"buttons": [{
"textButton": {
"text": "VISIT KIBANA",
"onClick": {
"openLink": {
"url": self.googlechat_footer_kibanalink
}
}
}
}]
}]
}
return footer
def create_card(self, matches):
card = {"cards": [{
"sections": [{
"widgets": [
{"textParagraph": {"text": self.create_alert_body(matches)}}
]}
]}
]}
# Add the optional header
header = self.create_header()
if header:
card['cards'][0]['header'] = header
# Add the optional footer
footer = self.create_footer()
if footer:
card['cards'][0]['sections'].append(footer)
return card
def create_basic(self, matches):
body = self.create_alert_body(matches)
return {'text': body}
def alert(self, matches):
# Format message
if self.googlechat_format == 'card':
message = self.create_card(matches)
else:
message = self.create_basic(matches)
# Post to webhook
headers = {'content-type': 'application/json'}
for url in self.googlechat_webhook_url:
try:
response = requests.post(url, data=json.dumps(message), headers=headers)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to google chat: {}".format(e))
elastalert_logger.info("Alert sent to Google Chat!")
def get_info(self):
return {'type': 'googlechat',
'googlechat_webhook_url': self.googlechat_webhook_url}
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
""" Creates a ServiceNow alert """
required_options = set([
'username',
'password',
'servicenow_rest_url',
'short_description',
'comments',
'assignment_group',
'category',
'subcategory',
'cmdb_ci',
'caller_id'
])
def __init__(self, rule):
super(ServiceNowAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(
self.servicenow_rest_url,
auth=(self.rule['username'], self.rule['password']),
headers=headers,
data=json.dumps(payload, cls=DateTimeEncoder),
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class AlertaAlerter(Alerter):
""" Creates an Alerta event for each alert """
required_options = frozenset(['alerta_api_url'])
def __init__(self, rule):
super(AlertaAlerter, self).__init__(rule)
# Setup defaul parameters
self.url = self.rule.get('alerta_api_url', None)
self.api_key = self.rule.get('alerta_api_key', None)
self.timeout = self.rule.get('alerta_timeout', 86400)
self.use_match_timestamp = self.rule.get('alerta_use_match_timestamp', False)
self.use_qk_as_resource = self.rule.get('alerta_use_qk_as_resource', False)
self.verify_ssl = not self.rule.get('alerta_api_skip_ssl', False)
self.missing_text = self.rule.get('alert_missing_value', '<MISSING VALUE>')
# Fill up default values of the API JSON payload
self.severity = self.rule.get('alerta_severity', 'warning')
self.resource = self.rule.get('alerta_resource', 'elastalert')
self.environment = self.rule.get('alerta_environment', 'Production')
self.origin = self.rule.get('alerta_origin', 'elastalert')
self.service = self.rule.get('alerta_service', ['elastalert'])
self.text = self.rule.get('alerta_text', 'elastalert')
self.type = self.rule.get('alerta_type', 'elastalert')
self.event = self.rule.get('alerta_event', 'elastalert')
self.correlate = self.rule.get('alerta_correlate', [])
self.tags = self.rule.get('alerta_tags', [])
self.group = self.rule.get('alerta_group', '')
self.attributes_keys = self.rule.get('alerta_attributes_keys', [])
self.attributes_values = self.rule.get('alerta_attributes_values', [])
self.value = self.rule.get('alerta_value', '')
def alert(self, matches):
# Override the resource if requested
if self.use_qk_as_resource and 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
self.resource = lookup_es_key(matches[0], self.rule['query_key'])
headers = {'content-type': 'application/json'}
if self.api_key is not None:
headers['Authorization'] = 'Key %s' % (self.rule['alerta_api_key'])
alerta_payload = self.get_json_payload(matches[0])
try:
response = requests.post(self.url, data=alerta_payload, headers=headers, verify=self.verify_ssl)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Alerta: %s" % e)
elastalert_logger.info("Alert sent to Alerta")
def create_default_title(self, matches):
title = '%s' % (self.rule['name'])
# If the rule has a query_key, add that value
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
title += '.%s' % (qk)
return title
def get_info(self):
return {'type': 'alerta',
'alerta_url': self.url}
def get_json_payload(self, match):
"""
Builds the API Create Alert body, as in
http://alerta.readthedocs.io/en/latest/api/reference.html#create-an-alert
For the values that could have references to fields on the match, resolve those references.
"""
# Using default text and event title if not defined in rule
alerta_text = self.rule['type'].get_match_str([match]) if self.text == '' else resolve_string(self.text, match, self.missing_text)
alerta_event = self.create_default_title([match]) if self.event == '' else resolve_string(self.event, match, self.missing_text)
match_timestamp = lookup_es_key(match, self.rule.get('timestamp_field', '@timestamp'))
if match_timestamp is None:
match_timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if self.use_match_timestamp:
createTime = ts_to_dt(match_timestamp).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
else:
createTime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
alerta_payload_dict = {
'resource': resolve_string(self.resource, match, self.missing_text),
'severity': resolve_string(self.severity, match),
'timeout': self.timeout,
'createTime': createTime,
'type': self.type,
'environment': resolve_string(self.environment, match, self.missing_text),
'origin': resolve_string(self.origin, match, self.missing_text),
'group': resolve_string(self.group, match, self.missing_text),
'event': alerta_event,
'text': alerta_text,
'value': resolve_string(self.value, match, self.missing_text),
'service': [resolve_string(a_service, match, self.missing_text) for a_service in self.service],
'tags': [resolve_string(a_tag, match, self.missing_text) for a_tag in self.tags],
'correlate': [resolve_string(an_event, match, self.missing_text) for an_event in self.correlate],
'attributes': dict(list(zip(self.attributes_keys,
[resolve_string(a_value, match, self.missing_text) for a_value in self.attributes_values]))),
'rawData': self.create_alert_body([match]),
}
try:
payload = json.dumps(alerta_payload_dict, cls=DateTimeEncoder)
except Exception as e:
raise Exception("Error building Alerta request: %s" % e)
return payload
class HTTPPostAlerter(Alerter):
""" Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """
required_options = frozenset(['http_post_url'])
def __init__(self, rule):
super(HTTPPostAlerter, self).__init__(rule)
post_url = self.rule.get('http_post_url')
if isinstance(post_url, str):
post_url = [post_url]
self.post_url = post_url
self.post_proxy = self.rule.get('http_post_proxy')
self.post_payload = self.rule.get('http_post_payload', {})
self.post_static_payload = self.rule.get('http_post_static_payload', {})
self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)
self.post_http_headers = self.rule.get('http_post_headers', {})
self.post_ca_certs = self.rule.get('http_post_ca_certs')
self.post_ignore_ssl_errors = self.rule.get('http_post_ignore_ssl_errors', False)
self.timeout = self.rule.get('http_post_timeout', 10)
def alert(self, matches):
""" Each match will trigger a POST to the specified endpoint(s). """
for match in matches:
payload = match if self.post_all_values else {}
payload.update(self.post_static_payload)
for post_key, es_key in list(self.post_payload.items()):
payload[post_key] = lookup_es_key(match, es_key)
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
if self.post_ca_certs:
verify = self.post_ca_certs
else:
verify = not self.post_ignore_ssl_errors
headers.update(self.post_http_headers)
proxies = {'https': self.post_proxy} if self.post_proxy else None
for url in self.post_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, proxies=proxies, timeout=self.timeout,
verify=verify)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting HTTP Post alert: %s" % e)
elastalert_logger.info("HTTP Post alert sent.")
def get_info(self):
return {'type': 'http_post',
'http_post_webhook_url': self.post_url}
class LineNotifyAlerter(Alerter):
""" Created a Line Notify for each alert """
required_option = frozenset(["linenotify_access_token"])
def __init__(self, rule):
super(LineNotifyAlerter, self).__init__(rule)
self.linenotify_access_token = self.rule["linenotify_access_token"]
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Line Notify
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer {}".format(self.linenotify_access_token)
}
payload = {
"message": body
}
try:
response = requests.post("https://notify-api.line.me/api/notify", data=payload, headers=headers)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Line Notify: %s" % e)
elastalert_logger.info("Alert sent to Line Notify")
def get_info(self):
return {"type": "linenotify", "linenotify_access_token": self.linenotify_access_token}
class HiveAlerter(Alerter):
"""
Use matched data to create alerts containing observables in an instance of TheHive
"""
required_options = set(['hive_connection', 'hive_alert_config'])
def lookup_field(self, match: dict, field_name: str, default):
"""Populates a field with values depending on the contents of the Elastalert match
provided to it.
Uses a similar algorithm to that implemented to populate the `alert_text_args`.
First checks any fields found in the match provided, then any fields defined in
the rule, finally returning the default value provided if no value can be found.
"""
field_value = lookup_es_key(match, field_name)
if field_value is None:
field_value = self.rule.get(field_name, default)
return field_value
# Iterate through the matches, building up a list of observables
def load_observable_artifacts(self, match: dict):
artifacts = []
for mapping in self.rule.get('hive_observable_data_mapping', []):
for observable_type, mapping_key in mapping.items():
data = self.lookup_field(match, mapping_key, '')
artifact = {'tlp': 2,
'tags': [],
'message': None,
'dataType': observable_type,
'data': data}
artifacts.append(artifact)
return artifacts
def load_custom_fields(self, custom_fields_raw: list, match: dict):
custom_fields = {}
position = 0
for field in custom_fields_raw:
if (isinstance(field['value'], str)):
value = self.lookup_field(match, field['value'], field['value'])
else:
value = field['value']
custom_fields[field['name']] = {'order': position, field['type']: value}
position += 1
return custom_fields
def load_tags(self, tag_names: list, match: dict):
tag_values = set()
for tag in tag_names:
tag_value = self.lookup_field(match, tag, tag)
if isinstance(tag_value, list):
for sub_tag in tag_value:
tag_values.add(sub_tag)
else:
tag_values.add(tag_value)
return tag_values
def alert(self, matches):
# Build TheHive alert object, starting with some defaults, updating with any
# user-specified config
alert_config = {
'artifacts': [],
'customFields': {},
'date': int(time.time()) * 1000,
'description': self.create_alert_body(matches),
'sourceRef': str(uuid.uuid4())[0:6],
'tags': [],
'title': self.create_title(matches),
}
alert_config.update(self.rule.get('hive_alert_config', {}))
# Iterate through each match found, populating the alert tags and observables as required
tags = set()
artifacts = []
for match in matches:
artifacts = artifacts + self.load_observable_artifacts(match)
tags.update(self.load_tags(alert_config['tags'], match))
alert_config['artifacts'] = artifacts
alert_config['tags'] = list(tags)
# Populate the customFields
alert_config['customFields'] = self.load_custom_fields(alert_config['customFields'],
matches[0])
# POST the alert to TheHive
connection_details = self.rule['hive_connection']
api_key = connection_details.get('hive_apikey', '')
hive_host = connection_details.get('hive_host', 'http://localhost')
hive_port = connection_details.get('hive_port', 9000)
proxies = connection_details.get('hive_proxies', {'http': '', 'https': ''})
verify = connection_details.get('hive_verify', False)
alert_body = json.dumps(alert_config, indent=4, sort_keys=True)
req = f'{hive_host}:{hive_port}/api/alert'
headers = {'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'}
try:
response = requests.post(req,
headers=headers,
data=alert_body,
proxies=proxies,
verify=verify)
response.raise_for_status()
except RequestException as e:
raise EAException(f"Error posting to TheHive: {e}")
def get_info(self):
return {
'type': 'hivealerter',
'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '')
}
class DiscordAlerter(Alerter):
""" Created a Discord for each alert """
required_options = frozenset(['discord_webhook_url'])
def __init__(self, rule):
super(DiscordAlerter, self).__init__(rule)
self.discord_webhook_url = self.rule['discord_webhook_url']
self.discord_emoji_title = self.rule.get('discord_emoji_title', ':warning:')
self.discord_proxy = self.rule.get('discord_proxy', None)
self.discord_proxy_login = self.rule.get('discord_proxy_login', None)
self.discord_proxy_password = self.rule.get('discord_proxy_password', None)
self.discord_embed_color = self.rule.get('discord_embed_color', 0xffffff)
self.discord_embed_footer = self.rule.get('discord_embed_footer', None)
self.discord_embed_icon_url = self.rule.get('discord_embed_icon_url', None)
def alert(self, matches):
body = ''
title = u'%s' % (self.create_title(matches))
for match in matches:
body += str(BasicMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
if len(body) > 2047:
body = body[0:1950] + '\n *message was cropped according to discord embed description limits!* '
body += '```'
proxies = {'https': self.discord_proxy} if self.discord_proxy else None
auth = HTTPProxyAuth(self.discord_proxy_login, self.discord_proxy_password) if self.discord_proxy_login else None
headers = {"Content-Type": "application/json"}
data = {}
data["content"] = "%s %s %s" % (self.discord_emoji_title, title, self.discord_emoji_title)
data["embeds"] = []
embed = {}
embed["description"] = "%s" % (body)
embed["color"] = (self.discord_embed_color)
if self.discord_embed_footer:
embed["footer"] = {}
embed["footer"]["text"] = (self.discord_embed_footer) if self.discord_embed_footer else None
embed["footer"]["icon_url"] = (self.discord_embed_icon_url) if self.discord_embed_icon_url else None
else:
None
data["embeds"].append(embed)
try:
response = requests.post(self.discord_webhook_url, data=json.dumps(data), headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Discord: %s. Details: %s" % (e, "" if e.response is None else e.response.text))
elastalert_logger.info(
"Alert sent to the webhook %s" % self.discord_webhook_url)
def get_info(self):
return {'type': 'discord',
'discord_webhook_url': self.discord_webhook_url}
class DingTalkAlerter(Alerter):
""" Creates a DingTalk room message for each alert """
required_options = frozenset(['dingtalk_access_token', 'dingtalk_msgtype'])
def __init__(self, rule):
super(DingTalkAlerter, self).__init__(rule)
self.dingtalk_access_token = self.rule.get('dingtalk_access_token')
self.dingtalk_webhook_url = 'https://oapi.dingtalk.com/robot/send?access_token=%s' % (self.dingtalk_access_token)
self.dingtalk_msgtype = self.rule.get('dingtalk_msgtype')
self.dingtalk_single_title = self.rule.get('dingtalk_single_title', 'elastalert')
self.dingtalk_single_url = self.rule.get('dingtalk_single_url', '')
self.dingtalk_btn_orientation = self.rule.get('dingtalk_btn_orientation', '')
self.dingtalk_btns = self.rule.get('dingtalk_btns', [])
self.dingtalk_proxy = self.rule.get('dingtalk_proxy', None)
self.dingtalk_proxy_login = self.rule.get('dingtalk_proxy_login', None)
self.dingtalk_proxy_password = self.rule.get('dingtalk_proxy_pass', None)
def format_body(self, body):
return body.encode('utf8')
def alert(self, matches):
title = self.create_title(matches)
body = self.create_alert_body(matches)
proxies = {'https': self.dingtalk_proxy} if self.dingtalk_proxy else None
auth = HTTPProxyAuth(self.dingtalk_proxy_login, self.dingtalk_proxy_password) if self.dingtalk_proxy_login else None
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=utf-8'
}
if self.dingtalk_msgtype == 'text':
# text
payload = {
'msgtype': self.dingtalk_msgtype,
'text': {
'content': body
}
}
elif self.dingtalk_msgtype == 'markdown':
# markdown
payload = {
'msgtype': self.dingtalk_msgtype,
'markdown': {
'title': title,
'text': body
}
}
elif self.dingtalk_msgtype == 'single_action_card':
# singleActionCard
payload = {
'msgtype': 'actionCard',
'actionCard': {
'title': title,
'text': body,
'singleTitle': self.dingtalk_single_title,
'singleURL': self.dingtalk_single_url
}
}
elif self.dingtalk_msgtype == 'action_card':
# actionCard
payload = {
'msgtype': 'actionCard',
'actionCard': {
'title': title,
'text': body
}
}
if self.dingtalk_btn_orientation != '':
payload['actionCard']['btnOrientation'] = self.dingtalk_btn_orientation
if self.dingtalk_btns:
payload['actionCard']['btns'] = self.dingtalk_btns
try:
response = requests.post(self.dingtalk_webhook_url, data=json.dumps(payload,
cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to dingtalk: %s" % e)
elastalert_logger.info("Trigger sent to dingtalk")
def get_info(self):
return {
"type": "dingtalk",
"dingtalk_webhook_url": self.dingtalk_webhook_url
}
class ChatworkAlerter(Alerter):
""" Creates a Chatwork room message for each alert """
required_options = frozenset(['chatwork_apikey', 'chatwork_room_id'])
def __init__(self, rule):
super(ChatworkAlerter, self).__init__(rule)
self.chatwork_apikey = self.rule.get('chatwork_apikey')
self.chatwork_room_id = self.rule.get('chatwork_room_id')
self.url = 'https://api.chatwork.com/v2/rooms/%s/messages' % (self.chatwork_room_id)
self.chatwork_proxy = self.rule.get('chatwork_proxy', None)
self.chatwork_proxy_login = self.rule.get('chatwork_proxy_login', None)
self.chatwork_proxy_pass = self.rule.get('chatwork_proxy_pass', None)
def alert(self, matches):
body = self.create_alert_body(matches)
headers = {'X-ChatWorkToken': self.chatwork_apikey}
# set https proxy, if it was provided
proxies = {'https': self.chatwork_proxy} if self.chatwork_proxy else None
auth = HTTPProxyAuth(self.chatwork_proxy_login, self.chatwork_proxy_pass) if self.chatwork_proxy_login else None
params = {'body': body}
try:
response = requests.post(self.url, params=params, headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Chattwork: %s. Details: %s" % (e, "" if e.response is None else e.response.text))
elastalert_logger.info(
"Alert sent to Chatwork room %s" % self.chatwork_room_id)
def get_info(self):
return {
"type": "chatwork",
"chatwork_room_id": self.chatwork_room_id
}
class DatadogAlerter(Alerter):
''' Creates a Datadog Event for each alert '''
required_options = frozenset(['datadog_api_key', 'datadog_app_key'])
def __init__(self, rule):
super(DatadogAlerter, self).__init__(rule)
self.dd_api_key = self.rule.get('datadog_api_key', None)
self.dd_app_key = self.rule.get('datadog_app_key', None)
def alert(self, matches):
url = 'https://api.datadoghq.com/api/v1/events'
headers = {
'Content-Type': 'application/json',
'DD-API-KEY': self.dd_api_key,
'DD-APPLICATION-KEY': self.dd_app_key
}
payload = {
'title': self.create_title(matches),
'text': self.create_alert_body(matches)
}
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers)
response.raise_for_status()
except RequestException as e:
raise EAException('Error posting event to Datadog: %s' % e)
elastalert_logger.info('Alert sent to Datadog')
def get_info(self):
return {'type': 'datadog'}
class SesAlerter(Alerter):
""" Sends an email alert using AWS SES """
required_options = frozenset(['ses_email', 'ses_from_addr'])
def __init__(self, *args):
super(SesAlerter, self).__init__(*args)
self.aws_access_key_id = self.rule.get('ses_aws_access_key_id')
self.aws_secret_access_key = self.rule.get('ses_aws_secret_access_key')
self.aws_region = self.rule.get('ses_aws_region', 'us-east-1')
self.aws_profile = self.rule.get('ses_aws_profile', '')
self.from_addr = self.rule.get('ses_from_addr')
# Convert email to a list if it isn't already
if isinstance(self.rule['ses_email'], str):
self.rule['ses_email'] = [self.rule['ses_email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('ses_cc')
if cc and isinstance(cc, str):
self.rule['ses_cc'] = [self.rule['ses_cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('ses_bcc')
if bcc and isinstance(bcc, str):
self.rule['ses_bcc'] = [self.rule['ses_bcc']]
# If there is a email_reply_to then also convert it to a list if it isn't
reply_to = self.rule.get('ses_email_reply_to')
if reply_to and isinstance(reply_to, str):
self.rule['ses_email_reply_to'] = [self.rule['ses_email_reply_to']]
add_suffix = self.rule.get('ses_email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['ses_email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
to_addr = self.rule['ses_email']
if 'ses_email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['ses_email_from_field'])
if isinstance(recipient, str):
if '@' in recipient:
to_addr = [recipient]
elif 'ses_email_add_domain' in self.rule:
to_addr = [recipient + self.rule['ses_email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'ses_email_add_domain' in self.rule:
to_addr = [name + self.rule['ses_email_add_domain'] for name in to_addr]
if self.aws_profile != '':
session = boto3.Session(profile_name=self.aws_profile)
else:
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region
)
client = session.client('ses')
try:
client.send_email(
Source=self.from_addr,
Destination={
'ToAddresses': to_addr,
'CcAddresses': self.rule.get('ses_cc', []),
'BccAddresses': self.rule.get('ses_bcc', [])
},
Message={
'Subject': {
'Charset': 'UTF-8',
'Data': self.create_title(matches),
},
'Body': {
'Text': {
'Charset': 'UTF-8',
'Data': body,
}
}
},
ReplyToAddresses=self.rule.get('ses_email_reply_to', []))
except Exception as e:
raise EAException("Error sending ses: %s" % (e,))
elastalert_logger.info("Sent ses to %s" % (to_addr,))
def create_default_title(self, matches):
subject = 'ElastAlert 2: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'ses',
'recipients': self.rule['ses_email']}
| 44.425643 | 138 | 0.607567 |
import copy
import datetime
import json
import os
import subprocess
import sys
import time
import uuid
import warnings
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto3
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.auth import HTTPProxyAuth
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client as TwilioClient
from .util import EAException
from .util import elastalert_logger
from .util import lookup_es_key
from .util import pretty_ts
from .util import resolve_string
from .util import ts_now
from .util import ts_to_dt
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
alert_text = str(self.rule.get('alert_text', ''))
if 'alert_text_jinja' == self.rule.get('alert_text_type'):
alert_text = self.rule.get("jinja_template").render(**self.match,
**{self.rule['jinja_root_name']: self.match})
elif 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
for i, text_value in enumerate(alert_text_values):
if text_value is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in list(self.rule.get('alert_text_kw').items()):
val = lookup_es_key(self.match, name)
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in list(self.match.items()):
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = list(counts.items())
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = list(self.match.items())
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = str(value)
value_str.replace('\\n', '\n')
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only' and self.rule.get('alert_text_type') != 'alert_text_jinja':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in list(self.match.items()) if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = '{{code}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
for key, value in root.copy().items():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = str(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
raise NotImplementedError()
def get_info(self):
return {'type': 'Unknown'}
def create_title(self, matches):
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = str(self.rule['alert_subject'])
alert_subject_max_len = int(self.rule.get('alert_subject_max_len', 2048))
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i, subject_value in enumerate(alert_subject_values):
if subject_value is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
alert_subject_values = [missing if val is None else val for val in alert_subject_values]
alert_subject = alert_subject.format(*alert_subject_values)
if len(alert_subject) > alert_subject_max_len:
alert_subject = alert_subject[:alert_subject_max_len]
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
if self.rule.get('alert_text_type') != 'aggregation_summary_only':
for match in matches:
body += str(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text__maximum_width(self):
return 80
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
text = self.rule.get('summary_prefix', '')
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(
summary_table_fields_with_count
)
text_table = Texttable(max_width=self.get_aggregation_summary_text__maximum_width())
text_table.header(summary_table_fields_with_count)
# Format all fields as 'text' to avoid long numbers being shown as scientific notation
text_table.set_cols_dtype(['t' for i in summary_table_fields_with_count])
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([str(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.items():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
text += self.rule.get('summary_prefix', '')
return str(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
if os.path.isabs(account_file):
account_file_path = account_file
else:
account_file_path = os.path.join(os.path.dirname(self.rule['rule_file']), account_file)
account_conf = yaml_loader(account_file_path)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
required_options = frozenset(
['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk is not None:
resmatch = lookup_es_key(match, qk)
else:
resmatch = None
if resmatch is not None:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'Alert for %s, %s at %s:' % (self.rule['name'], resmatch, lookup_es_key(
match, self.rule['timestamp_field']))
)
fullmessage['match'] = resmatch
else:
elastalert_logger.info('Rule %s generated an alert at %s:' % (
self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'Rule %s generated an alert at %s:' % (self.rule['name'], lookup_es_key(
match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(
match, self.rule['timestamp_field'])
elastalert_logger.info(str(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['rule_file'] = self.rule['rule_file']
fullmessage['matching'] = str(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now(
).strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
fullmessage['matches'] = matches
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get(
'stomp_destination', '/queue/ALERT')
self.stomp_ssl = self.rule.get('stomp_ssl', False)
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)], use_ssl=self.stomp_ssl)
conn.connect(self.stomp_login, self.stomp_password)
# Ensures that the CONNECTED frame is received otherwise, the disconnect call will fail.
time.sleep(1)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(str(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.assets_dir = self.rule.get('assets_dir', '/tmp')
self.images_dictionary = dict(zip(self.rule.get('email_image_keys', []), self.rule.get('email_image_values', [])))
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port', 25)
if self.rule.get('smtp_auth_file'):
self.get_account(self.rule['smtp_auth_file'])
self.smtp_key_file = self.rule.get('smtp_key_file')
self.smtp_cert_file = self.rule.get('smtp_cert_file')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], str):
self.rule['email'] = [self.rule['email']]
cc = self.rule.get('cc')
if cc and isinstance(cc, str):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, str):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, str):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'email_add_domain' in self.rule:
to_addr = [name + self.rule['email_add_domain'] for name in to_addr]
if self.rule.get('email_format') == 'html':
email_msg = MIMEMultipart()
msgText = MIMEText(body, 'html', _charset='UTF-8')
email_msg.attach(msgText)
for image_key in self.images_dictionary:
fp = open(os.path.join(self.assets_dir, self.images_dictionary[image_key]), 'rb')
img = MIMEImage(fp.read())
fp.close()
img.add_header('Content-ID', '<{}>'.format(image_key))
email_msg.attach(img)
else:
email_msg = MIMEText(body, _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
if 'smtp_auth_file' in self.rule:
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.quit()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_after_inactivity',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_only',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_transition_to',
'jira_watchers',
]
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
self.deferred_settings = []
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', 0)
self.bump_only = self.rule.get('jira_bump_only', False)
self.transition = self.rule.get('jira_transition_to', False)
self.watchers = self.rule.get('jira_watchers')
self.client = None
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
elastalert_logger.warning(msg)
self.reset_jira_args()
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.jira_fields = self.client.fields()
self.get_arbitrary_fields()
except JIRAError as e:
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024])).with_traceback(sys.exc_info()[2])
self.set_priority()
def set_priority(self):
try:
if self.priority is not None and self.client is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
elastalert_logger.error("Priority %s not found. Valid priorities are %s" % (self.priority, list(self.priority_ids.keys())))
def reset_jira_args(self):
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
self.set_priority()
def set_jira_arg(self, jira_field, value, fields):
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
if arg_type == 'array':
if type(value) != list:
value = [value]
array_items = field['schema']['items']
if array_items in ['string', 'date', 'datetime']:
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = [{'name': v} for v in value]
else:
if arg_type in ['string', 'date', 'datetime']:
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = {'name': value}
def get_arbitrary_fields(self):
self.reset_jira_args()
for jira_field, value in self.rule.items():
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] != '
self.set_jira_arg(jira_field, value, self.jira_fields)
if jira_field.startswith('jira_') and jira_field not in self.known_field_list and str(value)[:1] == '
self.deferred_settings.append(jira_field)
def get_priorities(self):
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status for status
in self.bump_in_statuses]))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(["\"%s\"" % status if ' ' in status else status
for status in self.bump_not_in_statuses]))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
elastalert_logger.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = str(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def transition_ticket(self, ticket):
transitions = self.client.transitions(ticket)
for t in transitions:
if t['name'] == self.transition:
self.client.transition_issue(ticket, t['id'])
def alert(self, matches):
# Reset arbitrary fields to pick up changes
self.get_arbitrary_fields()
if len(self.deferred_settings) > 0:
fields = self.client.fields()
for jira_field in self.deferred_settings:
value = lookup_es_key(matches[0], self.rule[jira_field][1:])
self.set_jira_arg(jira_field, value, fields)
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline['jira_ticket'] = None
self.pipeline['jira_server'] = self.server
return None
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
elastalert_logger.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.labels:
for label in self.labels:
try:
ticket.fields.labels.append(label)
except JIRAError as e:
elastalert_logger.exception("Error while appending labels to ticket %s: %s" % (ticket, e))
if self.transition:
elastalert_logger.info('Transitioning existing ticket %s' % (ticket.key))
try:
self.transition_ticket(ticket)
except JIRAError as e:
elastalert_logger.exception("Error while transitioning ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
if self.bump_only:
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(
watcher,
ex
)).with_traceback(sys.exc_info()[2])
except JIRAError as e:
raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
if self.rule.get('alert_text_type') != 'aggregation_summary_only':
for match in matches:
body += str(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = '{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
timestamp = matches[0].get(self.rule['timestamp_field'])
if timestamp:
title += ' - %s' % (pretty_ts(timestamp, self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], str):
self.shell = True
if '%' in self.rule['command']:
elastalert_logger.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
def alert(self, matches):
# Format the command and arguments
try:
command = [resolve_string(command_arg, matches[0]) for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json.encode())
elif self.rule.get('pipe_alert_text'):
alert_text = self.create_alert_body(matches)
stdout, stderr = subp.communicate(input=alert_text.encode())
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.sns_aws_access_key_id = self.rule.get('sns_aws_access_key_id')
self.sns_aws_secret_access_key = self.rule.get('sns_aws_secret_access_key')
self.sns_aws_region = self.rule.get('sns_aws_region', 'us-east-1')
self.profile = self.rule.get('boto_profile', None) # Deprecated
self.profile = self.rule.get('sns_aws_profile', None)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
if self.profile is None:
session = boto3.Session(
aws_access_key_id=self.sns_aws_access_key_id,
aws_secret_access_key=self.sns_aws_access_key_id,
region_name=self.sns_aws_region
)
else:
session = boto3.Session(profile_name=self.profile)
sns_client = session.client('sns')
sns_client.publish(
TopicArn=self.sns_topic_arn,
Message=body,
Subject=self.create_title(matches)
)
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class MsTeamsAlerter(Alerter):
required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary'])
def __init__(self, rule):
super(MsTeamsAlerter, self).__init__(rule)
self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url']
if isinstance(self.ms_teams_webhook_url, str):
self.ms_teams_webhook_url = [self.ms_teams_webhook_url]
self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None)
self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message')
self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False)
self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '')
def format_body(self, body):
if self.ms_teams_alert_fixed_width:
body = body.replace('`', "'")
body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
headers = {'content-type': 'application/json'}
proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None
payload = {
'@type': 'MessageCard',
'@context': 'http://schema.org/extensions',
'summary': self.ms_teams_alert_summary,
'title': self.create_title(matches),
'text': body
}
if self.ms_teams_theme_color != '':
payload['themeColor'] = self.ms_teams_theme_color
for url in self.ms_teams_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ms teams: %s" % e)
elastalert_logger.info("Alert sent to MS Teams")
def get_info(self):
return {'type': 'ms_teams',
'ms_teams_webhook_url': self.ms_teams_webhook_url}
class SlackAlerter(Alerter):
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, str):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
if isinstance(self.slack_channel_override, str):
self.slack_channel_override = [self.slack_channel_override]
self.slack_title_link = self.rule.get('slack_title_link', '')
self.slack_title = self.rule.get('slack_title', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
self.slack_alert_fields = self.rule.get('slack_alert_fields', '')
self.slack_ignore_ssl_errors = self.rule.get('slack_ignore_ssl_errors', False)
self.slack_timeout = self.rule.get('slack_timeout', 10)
self.slack_ca_certs = self.rule.get('slack_ca_certs')
self.slack_attach_kibana_discover_url = self.rule.get('slack_attach_kibana_discover_url', False)
self.slack_kibana_discover_color = self.rule.get('slack_kibana_discover_color', '#ec4b98')
self.slack_kibana_discover_title = self.rule.get('slack_kibana_discover_title', 'Discover in Kibana')
self.slack_footer = self.rule.get('slack_footer', '')
self.slack_footer_icon = self.rule.get('slack_footer_icon', '')
self.slack_image_url = self.rule.get('slack_image_url', '')
self.slack_thumb_url = self.rule.get('slack_thumb_url', '')
self.slack_author_name = self.rule.get('slack_author_name', '')
self.slack_author_link = self.rule.get('slack_author_link', '')
self.slack_author_icon = self.rule.get('slack_author_icon', '')
self.slack_msg_pretext = self.rule.get('slack_msg_pretext', '')
def format_body(self, body):
return body
def get_aggregation_summary_text__maximum_width(self):
width = super(SlackAlerter, self).get_aggregation_summary_text__maximum_width()
return min(width, 75)
def get_aggregation_summary_text(self, matches):
text = super(SlackAlerter, self).get_aggregation_summary_text(matches)
if text:
text = '```\n{0}```\n'.format(text)
return text
def populate_fields(self, matches):
alert_fields = []
for arg in self.slack_alert_fields:
arg = copy.copy(arg)
arg['value'] = lookup_es_key(matches[0], arg['value'])
alert_fields.append(arg)
return alert_fields
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
headers = {'content-type': 'application/json'}
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'mrkdwn_in': ['text', 'pretext'],
'fields': []
}
]
}
if self.slack_alert_fields != '':
payload['attachments'][0]['fields'] = self.populate_fields(matches)
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
if self.slack_title != '':
payload['attachments'][0]['title'] = self.slack_title
if self.slack_title_link != '':
payload['attachments'][0]['title_link'] = self.slack_title_link
if self.slack_footer != '':
payload['attachments'][0]['footer'] = self.slack_footer
if self.slack_footer_icon != '':
payload['attachments'][0]['footer_icon'] = self.slack_footer_icon
if self.slack_image_url != '':
payload['attachments'][0]['image_url'] = self.slack_image_url
if self.slack_thumb_url != '':
payload['attachments'][0]['thumb_url'] = self.slack_thumb_url
if self.slack_author_name != '':
payload['attachments'][0]['author_name'] = self.slack_author_name
if self.slack_author_link != '':
payload['attachments'][0]['author_link'] = self.slack_author_link
if self.slack_author_icon != '':
payload['attachments'][0]['author_icon'] = self.slack_author_icon
if self.slack_msg_pretext != '':
payload['attachments'][0]['pretext'] = self.slack_msg_pretext
if self.slack_attach_kibana_discover_url:
kibana_discover_url = lookup_es_key(matches[0], 'kibana_discover_url')
if kibana_discover_url:
payload['attachments'].append({
'color': self.slack_kibana_discover_color,
'title': self.slack_kibana_discover_title,
'title_link': kibana_discover_url
})
for url in self.slack_webhook_url:
for channel_override in self.slack_channel_override:
try:
if self.slack_ca_certs:
verify = self.slack_ca_certs
else:
verify = not self.slack_ignore_ssl_errors
if self.slack_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
payload['channel'] = channel_override
response = requests.post(
url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, verify=verify,
proxies=proxies,
timeout=self.slack_timeout)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert '%s' sent to Slack" % self.rule['name'])
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override}
class MattermostAlerter(Alerter):
required_options = frozenset(['mattermost_webhook_url'])
def __init__(self, rule):
super(MattermostAlerter, self).__init__(rule)
self.mattermost_webhook_url = self.rule['mattermost_webhook_url']
if isinstance(self.mattermost_webhook_url, str):
self.mattermost_webhook_url = [self.mattermost_webhook_url]
self.mattermost_proxy = self.rule.get('mattermost_proxy', None)
self.mattermost_ignore_ssl_errors = self.rule.get('mattermost_ignore_ssl_errors', False)
self.mattermost_username_override = self.rule.get('mattermost_username_override', 'elastalert')
self.mattermost_channel_override = self.rule.get('mattermost_channel_override', '')
self.mattermost_icon_url_override = self.rule.get('mattermost_icon_url_override', '')
self.mattermost_msg_pretext = self.rule.get('mattermost_msg_pretext', '')
self.mattermost_msg_color = self.rule.get('mattermost_msg_color', 'danger')
self.mattermost_msg_fields = self.rule.get('mattermost_msg_fields', '')
self.mattermost_image_url = self.rule.get('mattermost_image_url', '')
self.mattermost_title_link = self.rule.get('mattermost_title_link', '')
self.mattermost_footer = self.rule.get('mattermost_footer', '')
self.mattermost_footer_icon = self.rule.get('mattermost_footer_icon', '')
self.mattermost_image_url = self.rule.get('mattermost_image_url', '')
self.mattermost_thumb_url = self.rule.get('mattermost_thumb_url', '')
self.mattermost_author_name = self.rule.get('mattermost_author_name', '')
self.mattermost_author_link = self.rule.get('mattermost_author_link', '')
self.mattermost_author_icon = self.rule.get('mattermost_author_icon', '')
def get_aggregation_summary_text__maximum_width(self):
width = super(MattermostAlerter, self).get_aggregation_summary_text__maximum_width()
return min(width, 75)
def get_aggregation_summary_text(self, matches):
text = super(MattermostAlerter, self).get_aggregation_summary_text(matches)
if text:
text = '```\n{0}```\n'.format(text)
return text
def populate_fields(self, matches):
alert_fields = []
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
for field in self.mattermost_msg_fields:
field = copy.copy(field)
if 'args' in field:
args_values = [lookup_es_key(matches[0], arg) or missing for arg in field['args']]
if 'value' in field:
field['value'] = field['value'].format(*args_values)
else:
field['value'] = "\n".join(str(arg) for arg in args_values)
del(field['args'])
alert_fields.append(field)
return alert_fields
def alert(self, matches):
body = self.create_alert_body(matches)
title = self.create_title(matches)
headers = {'content-type': 'application/json'}
proxies = {'https': self.mattermost_proxy} if self.mattermost_proxy else None
payload = {
'attachments': [
{
'fallback': "{0}: {1}".format(title, self.mattermost_msg_pretext),
'color': self.mattermost_msg_color,
'title': title,
'pretext': self.mattermost_msg_pretext,
'fields': []
}
]
}
if self.rule.get('alert_text_type') == 'alert_text_only':
payload['attachments'][0]['text'] = body
else:
payload['text'] = body
if self.mattermost_msg_fields != '':
payload['attachments'][0]['fields'] = self.populate_fields(matches)
if self.mattermost_icon_url_override != '':
payload['icon_url'] = self.mattermost_icon_url_override
if self.mattermost_username_override != '':
payload['username'] = self.mattermost_username_override
if self.mattermost_channel_override != '':
payload['channel'] = self.mattermost_channel_override
if self.mattermost_title_link != '':
payload['attachments'][0]['title_link'] = self.mattermost_title_link
if self.mattermost_footer != '':
payload['attachments'][0]['footer'] = self.mattermost_footer
if self.mattermost_footer_icon != '':
payload['attachments'][0]['footer_icon'] = self.mattermost_footer_icon
if self.mattermost_image_url != '':
payload['attachments'][0]['image_url'] = self.mattermost_image_url
if self.mattermost_thumb_url != '':
payload['attachments'][0]['thumb_url'] = self.mattermost_thumb_url
if self.mattermost_author_name != '':
payload['attachments'][0]['author_name'] = self.mattermost_author_name
if self.mattermost_author_link != '':
payload['attachments'][0]['author_link'] = self.mattermost_author_link
if self.mattermost_author_icon != '':
payload['attachments'][0]['author_icon'] = self.mattermost_author_icon
for url in self.mattermost_webhook_url:
try:
if self.mattermost_ignore_ssl_errors:
requests.urllib3.disable_warnings()
response = requests.post(
url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, verify=not self.mattermost_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Mattermost: %s" % e)
elastalert_logger.info("Alert sent to Mattermost")
def get_info(self):
return {'type': 'mattermost',
'mattermost_username_override': self.mattermost_username_override,
'mattermost_webhook_url': self.mattermost_webhook_url}
class PagerDutyAlerter(Alerter):
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None)
self.pagerduty_event_type = self.rule.get('pagerduty_event_type', 'trigger')
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.pagerduty_api_version = self.rule.get('pagerduty_api_version', 'v1')
self.pagerduty_v2_payload_class = self.rule.get('pagerduty_v2_payload_class', '')
self.pagerduty_v2_payload_class_args = self.rule.get('pagerduty_v2_payload_class_args', None)
self.pagerduty_v2_payload_component = self.rule.get('pagerduty_v2_payload_component', '')
self.pagerduty_v2_payload_component_args = self.rule.get('pagerduty_v2_payload_component_args', None)
self.pagerduty_v2_payload_group = self.rule.get('pagerduty_v2_payload_group', '')
self.pagerduty_v2_payload_group_args = self.rule.get('pagerduty_v2_payload_group_args', None)
self.pagerduty_v2_payload_severity = self.rule.get('pagerduty_v2_payload_severity', 'critical')
self.pagerduty_v2_payload_source = self.rule.get('pagerduty_v2_payload_source', 'ElastAlert')
self.pagerduty_v2_payload_source_args = self.rule.get('pagerduty_v2_payload_source_args', None)
self.pagerduty_v2_payload_custom_details = self.rule.get('pagerduty_v2_payload_custom_details', {})
self.pagerduty_v2_payload_include_all_info = self.rule.get('pagerduty_v2_payload_include_all_info', True)
if self.pagerduty_api_version == 'v2':
self.url = 'https://events.pagerduty.com/v2/enqueue'
else:
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
headers = {'content-type': 'application/json'}
if self.pagerduty_api_version == 'v2':
custom_details_payload = {'information': body} if self.pagerduty_v2_payload_include_all_info else {}
if self.pagerduty_v2_payload_custom_details:
for match in matches:
for custom_details_key, es_key in list(self.pagerduty_v2_payload_custom_details.items()):
custom_details_payload[custom_details_key] = lookup_es_key(match, es_key)
payload = {
'routing_key': self.pagerduty_service_key,
'event_action': self.pagerduty_event_type,
'dedup_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'payload': {
'class': self.resolve_formatted_key(self.pagerduty_v2_payload_class,
self.pagerduty_v2_payload_class_args,
matches),
'component': self.resolve_formatted_key(self.pagerduty_v2_payload_component,
self.pagerduty_v2_payload_component_args,
matches),
'group': self.resolve_formatted_key(self.pagerduty_v2_payload_group,
self.pagerduty_v2_payload_group_args,
matches),
'severity': self.pagerduty_v2_payload_severity,
'source': self.resolve_formatted_key(self.pagerduty_v2_payload_source,
self.pagerduty_v2_payload_source_args,
matches),
'summary': self.create_title(matches),
'custom_details': custom_details_payload,
},
}
match_timestamp = lookup_es_key(matches[0], self.rule.get('timestamp_field', '@timestamp'))
if match_timestamp:
payload['payload']['timestamp'] = match_timestamp
else:
payload = {
'service_key': self.pagerduty_service_key,
'description': self.create_title(matches),
'event_type': self.pagerduty_event_type,
'incident_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'details': {
"information": body,
},
}
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(
self.url,
data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False),
headers=headers,
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
if self.pagerduty_event_type == 'trigger':
elastalert_logger.info("Trigger sent to PagerDuty")
elif self.pagerduty_event_type == 'resolve':
elastalert_logger.info("Resolve sent to PagerDuty")
elif self.pagerduty_event_type == 'acknowledge':
elastalert_logger.info("acknowledge sent to PagerDuty")
def resolve_formatted_key(self, key, args, matches):
if args:
key_values = [lookup_es_key(matches[0], arg) for arg in args]
for i in range(len(key_values)):
if key_values[i] is None:
key_value = self.rule.get(args[i])
if key_value:
key_values[i] = key_value
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
key_values = [missing if val is None else val for val in key_values]
return key.format(*key_values)
else:
return key
def get_incident_key(self, matches):
if self.pagerduty_incident_key_args:
incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args]
for i in range(len(incident_key_values)):
if incident_key_values[i] is None:
key_value = self.rule.get(self.pagerduty_incident_key_args[i])
if key_value:
incident_key_values[i] = key_value
missing = self.rule.get('alert_missing_value', '<MISSING VALUE>')
incident_key_values = [missing if val is None else val for val in incident_key_values]
return self.pagerduty_incident_key.format(*incident_key_values)
else:
return self.pagerduty_incident_key
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class PagerTreeAlerter(Alerter):
required_options = frozenset(['pagertree_integration_url'])
def __init__(self, rule):
super(PagerTreeAlerter, self).__init__(rule)
self.url = self.rule['pagertree_integration_url']
self.pagertree_proxy = self.rule.get('pagertree_proxy', None)
def alert(self, matches):
headers = {'content-type': 'application/json'}
proxies = {'https': self.pagertree_proxy} if self.pagertree_proxy else None
payload = {
"event_type": "create",
"Id": str(uuid.uuid4()),
"Title": self.create_title(matches),
"Description": self.create_alert_body(matches)
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to PagerTree: %s" % e)
elastalert_logger.info("Trigger sent to PagerTree")
def get_info(self):
return {'type': 'pagertree',
'pagertree_integration_url': self.url}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except RequestException:
raise EAException("Error posting to Exotel").with_traceback(sys.exc_info()[2])
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_account_sid = self.rule['twilio_account_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule.get('twilio_from_number')
self.twilio_message_service_sid = self.rule.get('twilio_message_service_sid')
self.twilio_use_copilot = self.rule.get('twilio_use_copilot', False)
def alert(self, matches):
client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token)
try:
if self.twilio_use_copilot:
if self.twilio_message_service_sid is None:
raise EAException("Twilio Copilot requires the 'twilio_message_service_sid' option")
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
messaging_service_sid=self.twilio_message_service_sid)
else:
if self.twilio_from_number is None:
raise EAException("Twilio SMS requires the 'twilio_from_number' option")
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_from_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_id = self.rule.get('victorops_entity_id', None)
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
headers = {'content-type': 'application/json'}
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
if self.victorops_entity_id:
payload["entity_id"] = self.victorops_entity_id
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
self.telegram_proxy_login = self.rule.get('telegram_proxy_login', None)
self.telegram_proxy_password = self.rule.get('telegram_proxy_pass', None)
def alert(self, matches):
body = '⚠ *%s* ⚠ ```\n' % (self.create_title(matches))
for match in matches:
body += str(BasicMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
if len(body) > 4095:
body = body[0:4000] + "\n⚠ *message was cropped according to telegram limits!* ⚠"
body += ' ```'
headers = {'content-type': 'application/json'}
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
auth = HTTPProxyAuth(self.telegram_proxy_login, self.telegram_proxy_password) if self.telegram_proxy_login else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s. Details: %s" % (e, "" if e.response is None else e.response.text))
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GoogleChatAlerter(Alerter):
required_options = frozenset(['googlechat_webhook_url'])
def __init__(self, rule):
super(GoogleChatAlerter, self).__init__(rule)
self.googlechat_webhook_url = self.rule['googlechat_webhook_url']
if isinstance(self.googlechat_webhook_url, str):
self.googlechat_webhook_url = [self.googlechat_webhook_url]
self.googlechat_format = self.rule.get('googlechat_format', 'basic')
self.googlechat_header_title = self.rule.get('googlechat_header_title', None)
self.googlechat_header_subtitle = self.rule.get('googlechat_header_subtitle', None)
self.googlechat_header_image = self.rule.get('googlechat_header_image', None)
self.googlechat_footer_kibanalink = self.rule.get('googlechat_footer_kibanalink', None)
def create_header(self):
header = None
if self.googlechat_header_title:
header = {
"title": self.googlechat_header_title,
"subtitle": self.googlechat_header_subtitle,
"imageUrl": self.googlechat_header_image
}
return header
def create_footer(self):
footer = None
if self.googlechat_footer_kibanalink:
footer = {"widgets": [{
"buttons": [{
"textButton": {
"text": "VISIT KIBANA",
"onClick": {
"openLink": {
"url": self.googlechat_footer_kibanalink
}
}
}
}]
}]
}
return footer
def create_card(self, matches):
card = {"cards": [{
"sections": [{
"widgets": [
{"textParagraph": {"text": self.create_alert_body(matches)}}
]}
]}
]}
header = self.create_header()
if header:
card['cards'][0]['header'] = header
footer = self.create_footer()
if footer:
card['cards'][0]['sections'].append(footer)
return card
def create_basic(self, matches):
body = self.create_alert_body(matches)
return {'text': body}
def alert(self, matches):
if self.googlechat_format == 'card':
message = self.create_card(matches)
else:
message = self.create_basic(matches)
headers = {'content-type': 'application/json'}
for url in self.googlechat_webhook_url:
try:
response = requests.post(url, data=json.dumps(message), headers=headers)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to google chat: {}".format(e))
elastalert_logger.info("Alert sent to Google Chat!")
def get_info(self):
return {'type': 'googlechat',
'googlechat_webhook_url': self.googlechat_webhook_url}
class GitterAlerter(Alerter):
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
headers = {'content-type': 'application/json'}
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
required_options = set([
'username',
'password',
'servicenow_rest_url',
'short_description',
'comments',
'assignment_group',
'category',
'subcategory',
'cmdb_ci',
'caller_id'
])
def __init__(self, rule):
super(ServiceNowAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
description = str(BasicMatchString(self.rule, match))
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(
self.servicenow_rest_url,
auth=(self.rule['username'], self.rule['password']),
headers=headers,
data=json.dumps(payload, cls=DateTimeEncoder),
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class AlertaAlerter(Alerter):
required_options = frozenset(['alerta_api_url'])
def __init__(self, rule):
super(AlertaAlerter, self).__init__(rule)
self.url = self.rule.get('alerta_api_url', None)
self.api_key = self.rule.get('alerta_api_key', None)
self.timeout = self.rule.get('alerta_timeout', 86400)
self.use_match_timestamp = self.rule.get('alerta_use_match_timestamp', False)
self.use_qk_as_resource = self.rule.get('alerta_use_qk_as_resource', False)
self.verify_ssl = not self.rule.get('alerta_api_skip_ssl', False)
self.missing_text = self.rule.get('alert_missing_value', '<MISSING VALUE>')
self.severity = self.rule.get('alerta_severity', 'warning')
self.resource = self.rule.get('alerta_resource', 'elastalert')
self.environment = self.rule.get('alerta_environment', 'Production')
self.origin = self.rule.get('alerta_origin', 'elastalert')
self.service = self.rule.get('alerta_service', ['elastalert'])
self.text = self.rule.get('alerta_text', 'elastalert')
self.type = self.rule.get('alerta_type', 'elastalert')
self.event = self.rule.get('alerta_event', 'elastalert')
self.correlate = self.rule.get('alerta_correlate', [])
self.tags = self.rule.get('alerta_tags', [])
self.group = self.rule.get('alerta_group', '')
self.attributes_keys = self.rule.get('alerta_attributes_keys', [])
self.attributes_values = self.rule.get('alerta_attributes_values', [])
self.value = self.rule.get('alerta_value', '')
def alert(self, matches):
if self.use_qk_as_resource and 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
self.resource = lookup_es_key(matches[0], self.rule['query_key'])
headers = {'content-type': 'application/json'}
if self.api_key is not None:
headers['Authorization'] = 'Key %s' % (self.rule['alerta_api_key'])
alerta_payload = self.get_json_payload(matches[0])
try:
response = requests.post(self.url, data=alerta_payload, headers=headers, verify=self.verify_ssl)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Alerta: %s" % e)
elastalert_logger.info("Alert sent to Alerta")
def create_default_title(self, matches):
title = '%s' % (self.rule['name'])
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
title += '.%s' % (qk)
return title
def get_info(self):
return {'type': 'alerta',
'alerta_url': self.url}
def get_json_payload(self, match):
alerta_text = self.rule['type'].get_match_str([match]) if self.text == '' else resolve_string(self.text, match, self.missing_text)
alerta_event = self.create_default_title([match]) if self.event == '' else resolve_string(self.event, match, self.missing_text)
match_timestamp = lookup_es_key(match, self.rule.get('timestamp_field', '@timestamp'))
if match_timestamp is None:
match_timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if self.use_match_timestamp:
createTime = ts_to_dt(match_timestamp).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
else:
createTime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
alerta_payload_dict = {
'resource': resolve_string(self.resource, match, self.missing_text),
'severity': resolve_string(self.severity, match),
'timeout': self.timeout,
'createTime': createTime,
'type': self.type,
'environment': resolve_string(self.environment, match, self.missing_text),
'origin': resolve_string(self.origin, match, self.missing_text),
'group': resolve_string(self.group, match, self.missing_text),
'event': alerta_event,
'text': alerta_text,
'value': resolve_string(self.value, match, self.missing_text),
'service': [resolve_string(a_service, match, self.missing_text) for a_service in self.service],
'tags': [resolve_string(a_tag, match, self.missing_text) for a_tag in self.tags],
'correlate': [resolve_string(an_event, match, self.missing_text) for an_event in self.correlate],
'attributes': dict(list(zip(self.attributes_keys,
[resolve_string(a_value, match, self.missing_text) for a_value in self.attributes_values]))),
'rawData': self.create_alert_body([match]),
}
try:
payload = json.dumps(alerta_payload_dict, cls=DateTimeEncoder)
except Exception as e:
raise Exception("Error building Alerta request: %s" % e)
return payload
class HTTPPostAlerter(Alerter):
required_options = frozenset(['http_post_url'])
def __init__(self, rule):
super(HTTPPostAlerter, self).__init__(rule)
post_url = self.rule.get('http_post_url')
if isinstance(post_url, str):
post_url = [post_url]
self.post_url = post_url
self.post_proxy = self.rule.get('http_post_proxy')
self.post_payload = self.rule.get('http_post_payload', {})
self.post_static_payload = self.rule.get('http_post_static_payload', {})
self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)
self.post_http_headers = self.rule.get('http_post_headers', {})
self.post_ca_certs = self.rule.get('http_post_ca_certs')
self.post_ignore_ssl_errors = self.rule.get('http_post_ignore_ssl_errors', False)
self.timeout = self.rule.get('http_post_timeout', 10)
def alert(self, matches):
for match in matches:
payload = match if self.post_all_values else {}
payload.update(self.post_static_payload)
for post_key, es_key in list(self.post_payload.items()):
payload[post_key] = lookup_es_key(match, es_key)
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
if self.post_ca_certs:
verify = self.post_ca_certs
else:
verify = not self.post_ignore_ssl_errors
headers.update(self.post_http_headers)
proxies = {'https': self.post_proxy} if self.post_proxy else None
for url in self.post_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, proxies=proxies, timeout=self.timeout,
verify=verify)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting HTTP Post alert: %s" % e)
elastalert_logger.info("HTTP Post alert sent.")
def get_info(self):
return {'type': 'http_post',
'http_post_webhook_url': self.post_url}
class LineNotifyAlerter(Alerter):
required_option = frozenset(["linenotify_access_token"])
def __init__(self, rule):
super(LineNotifyAlerter, self).__init__(rule)
self.linenotify_access_token = self.rule["linenotify_access_token"]
def alert(self, matches):
body = self.create_alert_body(matches)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer {}".format(self.linenotify_access_token)
}
payload = {
"message": body
}
try:
response = requests.post("https://notify-api.line.me/api/notify", data=payload, headers=headers)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Line Notify: %s" % e)
elastalert_logger.info("Alert sent to Line Notify")
def get_info(self):
return {"type": "linenotify", "linenotify_access_token": self.linenotify_access_token}
class HiveAlerter(Alerter):
required_options = set(['hive_connection', 'hive_alert_config'])
def lookup_field(self, match: dict, field_name: str, default):
field_value = lookup_es_key(match, field_name)
if field_value is None:
field_value = self.rule.get(field_name, default)
return field_value
def load_observable_artifacts(self, match: dict):
artifacts = []
for mapping in self.rule.get('hive_observable_data_mapping', []):
for observable_type, mapping_key in mapping.items():
data = self.lookup_field(match, mapping_key, '')
artifact = {'tlp': 2,
'tags': [],
'message': None,
'dataType': observable_type,
'data': data}
artifacts.append(artifact)
return artifacts
def load_custom_fields(self, custom_fields_raw: list, match: dict):
custom_fields = {}
position = 0
for field in custom_fields_raw:
if (isinstance(field['value'], str)):
value = self.lookup_field(match, field['value'], field['value'])
else:
value = field['value']
custom_fields[field['name']] = {'order': position, field['type']: value}
position += 1
return custom_fields
def load_tags(self, tag_names: list, match: dict):
tag_values = set()
for tag in tag_names:
tag_value = self.lookup_field(match, tag, tag)
if isinstance(tag_value, list):
for sub_tag in tag_value:
tag_values.add(sub_tag)
else:
tag_values.add(tag_value)
return tag_values
def alert(self, matches):
alert_config = {
'artifacts': [],
'customFields': {},
'date': int(time.time()) * 1000,
'description': self.create_alert_body(matches),
'sourceRef': str(uuid.uuid4())[0:6],
'tags': [],
'title': self.create_title(matches),
}
alert_config.update(self.rule.get('hive_alert_config', {}))
tags = set()
artifacts = []
for match in matches:
artifacts = artifacts + self.load_observable_artifacts(match)
tags.update(self.load_tags(alert_config['tags'], match))
alert_config['artifacts'] = artifacts
alert_config['tags'] = list(tags)
alert_config['customFields'] = self.load_custom_fields(alert_config['customFields'],
matches[0])
connection_details = self.rule['hive_connection']
api_key = connection_details.get('hive_apikey', '')
hive_host = connection_details.get('hive_host', 'http://localhost')
hive_port = connection_details.get('hive_port', 9000)
proxies = connection_details.get('hive_proxies', {'http': '', 'https': ''})
verify = connection_details.get('hive_verify', False)
alert_body = json.dumps(alert_config, indent=4, sort_keys=True)
req = f'{hive_host}:{hive_port}/api/alert'
headers = {'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'}
try:
response = requests.post(req,
headers=headers,
data=alert_body,
proxies=proxies,
verify=verify)
response.raise_for_status()
except RequestException as e:
raise EAException(f"Error posting to TheHive: {e}")
def get_info(self):
return {
'type': 'hivealerter',
'hive_host': self.rule.get('hive_connection', {}).get('hive_host', '')
}
class DiscordAlerter(Alerter):
required_options = frozenset(['discord_webhook_url'])
def __init__(self, rule):
super(DiscordAlerter, self).__init__(rule)
self.discord_webhook_url = self.rule['discord_webhook_url']
self.discord_emoji_title = self.rule.get('discord_emoji_title', ':warning:')
self.discord_proxy = self.rule.get('discord_proxy', None)
self.discord_proxy_login = self.rule.get('discord_proxy_login', None)
self.discord_proxy_password = self.rule.get('discord_proxy_password', None)
self.discord_embed_color = self.rule.get('discord_embed_color', 0xffffff)
self.discord_embed_footer = self.rule.get('discord_embed_footer', None)
self.discord_embed_icon_url = self.rule.get('discord_embed_icon_url', None)
def alert(self, matches):
body = ''
title = u'%s' % (self.create_title(matches))
for match in matches:
body += str(BasicMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
if len(body) > 2047:
body = body[0:1950] + '\n *message was cropped according to discord embed description limits!* '
body += '```'
proxies = {'https': self.discord_proxy} if self.discord_proxy else None
auth = HTTPProxyAuth(self.discord_proxy_login, self.discord_proxy_password) if self.discord_proxy_login else None
headers = {"Content-Type": "application/json"}
data = {}
data["content"] = "%s %s %s" % (self.discord_emoji_title, title, self.discord_emoji_title)
data["embeds"] = []
embed = {}
embed["description"] = "%s" % (body)
embed["color"] = (self.discord_embed_color)
if self.discord_embed_footer:
embed["footer"] = {}
embed["footer"]["text"] = (self.discord_embed_footer) if self.discord_embed_footer else None
embed["footer"]["icon_url"] = (self.discord_embed_icon_url) if self.discord_embed_icon_url else None
else:
None
data["embeds"].append(embed)
try:
response = requests.post(self.discord_webhook_url, data=json.dumps(data), headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Discord: %s. Details: %s" % (e, "" if e.response is None else e.response.text))
elastalert_logger.info(
"Alert sent to the webhook %s" % self.discord_webhook_url)
def get_info(self):
return {'type': 'discord',
'discord_webhook_url': self.discord_webhook_url}
class DingTalkAlerter(Alerter):
required_options = frozenset(['dingtalk_access_token', 'dingtalk_msgtype'])
def __init__(self, rule):
super(DingTalkAlerter, self).__init__(rule)
self.dingtalk_access_token = self.rule.get('dingtalk_access_token')
self.dingtalk_webhook_url = 'https://oapi.dingtalk.com/robot/send?access_token=%s' % (self.dingtalk_access_token)
self.dingtalk_msgtype = self.rule.get('dingtalk_msgtype')
self.dingtalk_single_title = self.rule.get('dingtalk_single_title', 'elastalert')
self.dingtalk_single_url = self.rule.get('dingtalk_single_url', '')
self.dingtalk_btn_orientation = self.rule.get('dingtalk_btn_orientation', '')
self.dingtalk_btns = self.rule.get('dingtalk_btns', [])
self.dingtalk_proxy = self.rule.get('dingtalk_proxy', None)
self.dingtalk_proxy_login = self.rule.get('dingtalk_proxy_login', None)
self.dingtalk_proxy_password = self.rule.get('dingtalk_proxy_pass', None)
def format_body(self, body):
return body.encode('utf8')
def alert(self, matches):
title = self.create_title(matches)
body = self.create_alert_body(matches)
proxies = {'https': self.dingtalk_proxy} if self.dingtalk_proxy else None
auth = HTTPProxyAuth(self.dingtalk_proxy_login, self.dingtalk_proxy_password) if self.dingtalk_proxy_login else None
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=utf-8'
}
if self.dingtalk_msgtype == 'text':
payload = {
'msgtype': self.dingtalk_msgtype,
'text': {
'content': body
}
}
elif self.dingtalk_msgtype == 'markdown':
payload = {
'msgtype': self.dingtalk_msgtype,
'markdown': {
'title': title,
'text': body
}
}
elif self.dingtalk_msgtype == 'single_action_card':
payload = {
'msgtype': 'actionCard',
'actionCard': {
'title': title,
'text': body,
'singleTitle': self.dingtalk_single_title,
'singleURL': self.dingtalk_single_url
}
}
elif self.dingtalk_msgtype == 'action_card':
payload = {
'msgtype': 'actionCard',
'actionCard': {
'title': title,
'text': body
}
}
if self.dingtalk_btn_orientation != '':
payload['actionCard']['btnOrientation'] = self.dingtalk_btn_orientation
if self.dingtalk_btns:
payload['actionCard']['btns'] = self.dingtalk_btns
try:
response = requests.post(self.dingtalk_webhook_url, data=json.dumps(payload,
cls=DateTimeEncoder), headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to dingtalk: %s" % e)
elastalert_logger.info("Trigger sent to dingtalk")
def get_info(self):
return {
"type": "dingtalk",
"dingtalk_webhook_url": self.dingtalk_webhook_url
}
class ChatworkAlerter(Alerter):
required_options = frozenset(['chatwork_apikey', 'chatwork_room_id'])
def __init__(self, rule):
super(ChatworkAlerter, self).__init__(rule)
self.chatwork_apikey = self.rule.get('chatwork_apikey')
self.chatwork_room_id = self.rule.get('chatwork_room_id')
self.url = 'https://api.chatwork.com/v2/rooms/%s/messages' % (self.chatwork_room_id)
self.chatwork_proxy = self.rule.get('chatwork_proxy', None)
self.chatwork_proxy_login = self.rule.get('chatwork_proxy_login', None)
self.chatwork_proxy_pass = self.rule.get('chatwork_proxy_pass', None)
def alert(self, matches):
body = self.create_alert_body(matches)
headers = {'X-ChatWorkToken': self.chatwork_apikey}
proxies = {'https': self.chatwork_proxy} if self.chatwork_proxy else None
auth = HTTPProxyAuth(self.chatwork_proxy_login, self.chatwork_proxy_pass) if self.chatwork_proxy_login else None
params = {'body': body}
try:
response = requests.post(self.url, params=params, headers=headers, proxies=proxies, auth=auth)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Chattwork: %s. Details: %s" % (e, "" if e.response is None else e.response.text))
elastalert_logger.info(
"Alert sent to Chatwork room %s" % self.chatwork_room_id)
def get_info(self):
return {
"type": "chatwork",
"chatwork_room_id": self.chatwork_room_id
}
class DatadogAlerter(Alerter):
required_options = frozenset(['datadog_api_key', 'datadog_app_key'])
def __init__(self, rule):
super(DatadogAlerter, self).__init__(rule)
self.dd_api_key = self.rule.get('datadog_api_key', None)
self.dd_app_key = self.rule.get('datadog_app_key', None)
def alert(self, matches):
url = 'https://api.datadoghq.com/api/v1/events'
headers = {
'Content-Type': 'application/json',
'DD-API-KEY': self.dd_api_key,
'DD-APPLICATION-KEY': self.dd_app_key
}
payload = {
'title': self.create_title(matches),
'text': self.create_alert_body(matches)
}
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers)
response.raise_for_status()
except RequestException as e:
raise EAException('Error posting event to Datadog: %s' % e)
elastalert_logger.info('Alert sent to Datadog')
def get_info(self):
return {'type': 'datadog'}
class SesAlerter(Alerter):
required_options = frozenset(['ses_email', 'ses_from_addr'])
def __init__(self, *args):
super(SesAlerter, self).__init__(*args)
self.aws_access_key_id = self.rule.get('ses_aws_access_key_id')
self.aws_secret_access_key = self.rule.get('ses_aws_secret_access_key')
self.aws_region = self.rule.get('ses_aws_region', 'us-east-1')
self.aws_profile = self.rule.get('ses_aws_profile', '')
self.from_addr = self.rule.get('ses_from_addr')
if isinstance(self.rule['ses_email'], str):
self.rule['ses_email'] = [self.rule['ses_email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('ses_cc')
if cc and isinstance(cc, str):
self.rule['ses_cc'] = [self.rule['ses_cc']]
bcc = self.rule.get('ses_bcc')
if bcc and isinstance(bcc, str):
self.rule['ses_bcc'] = [self.rule['ses_bcc']]
# If there is a email_reply_to then also convert it to a list if it isn't
reply_to = self.rule.get('ses_email_reply_to')
if reply_to and isinstance(reply_to, str):
self.rule['ses_email_reply_to'] = [self.rule['ses_email_reply_to']]
add_suffix = self.rule.get('ses_email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['ses_email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
to_addr = self.rule['ses_email']
if 'ses_email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['ses_email_from_field'])
if isinstance(recipient, str):
if '@' in recipient:
to_addr = [recipient]
elif 'ses_email_add_domain' in self.rule:
to_addr = [recipient + self.rule['ses_email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'ses_email_add_domain' in self.rule:
to_addr = [name + self.rule['ses_email_add_domain'] for name in to_addr]
if self.aws_profile != '':
session = boto3.Session(profile_name=self.aws_profile)
else:
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region
)
client = session.client('ses')
try:
client.send_email(
Source=self.from_addr,
Destination={
'ToAddresses': to_addr,
'CcAddresses': self.rule.get('ses_cc', []),
'BccAddresses': self.rule.get('ses_bcc', [])
},
Message={
'Subject': {
'Charset': 'UTF-8',
'Data': self.create_title(matches),
},
'Body': {
'Text': {
'Charset': 'UTF-8',
'Data': body,
}
}
},
ReplyToAddresses=self.rule.get('ses_email_reply_to', []))
except Exception as e:
raise EAException("Error sending ses: %s" % (e,))
elastalert_logger.info("Sent ses to %s" % (to_addr,))
def create_default_title(self, matches):
subject = 'ElastAlert 2: %s' % (self.rule['name'])
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'ses',
'recipients': self.rule['ses_email']}
| true | true |
f7fd1f0d60b5e0ffae9f966c2cd4da5f6d436edf | 266 | py | Python | HW7/AndriiBabii/CW_7.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW7/AndriiBabii/CW_7.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | null | null | null | HW7/AndriiBabii/CW_7.py | kolyasalubov/Lv-677.PythonCore | c9f9107c734a61e398154a90b8a3e249276c2704 | [
"MIT"
] | 6 | 2022-02-22T22:30:49.000Z | 2022-03-28T12:51:19.000Z | #https://www.codewars.com/kata/counting-sheep-dot-dot-dot/train/python
def count_sheeps(sheep):
how_many = 0
for i in range(len(sheep)):
if sheep[i] == True: how_many += 1
return how_many
#def count_sheeps(sheep):
# return sheep.count(True)
| 24.181818 | 70 | 0.672932 |
def count_sheeps(sheep):
how_many = 0
for i in range(len(sheep)):
if sheep[i] == True: how_many += 1
return how_many
| true | true |
f7fd205662587df3005c39c0ec6d73c6eba725f9 | 15,445 | py | Python | tensorflow2/ABC_COVID-19/ABC_IPU.py | waterfallhyb/portfolio-examples | 46ff40f593c88979495f0987f857d056f8addaf7 | [
"MIT"
] | 28 | 2020-09-26T21:36:32.000Z | 2021-07-04T03:40:45.000Z | tensorflow2/ABC_COVID-19/ABC_IPU.py | waterfallhyb/portfolio-examples | 46ff40f593c88979495f0987f857d056f8addaf7 | [
"MIT"
] | 8 | 2021-12-11T11:08:29.000Z | 2022-03-30T10:50:55.000Z | tensorflow2/ABC_COVID-19/ABC_IPU.py | waterfallhyb/portfolio-examples | 46ff40f593c88979495f0987f857d056f8addaf7 | [
"MIT"
] | 5 | 2021-11-29T07:55:08.000Z | 2022-03-21T06:38:51.000Z | # Copyright 2020 Graphcore Ltd.
"""
ABC algorithm for COVID-19 modelling, replicated across multiple IPUs.
See README for model background.
"""
import numpy as np
import os
import time as time
from tensorflow.python import ipu
import tensorflow as tf
import tensorflow_probability as tfp
import covid_data
from argparser import get_argparser
tfd = tfp.distributions
# Parse the CLI args
ap = get_argparser()
args = ap.parse_args()
assert (not args.enqueue_chunk_size or
args.n_samples_per_batch % args.enqueue_chunk_size == 0), \
"--enqueue-chunk-size must divide into --n-samples-per-batch exactly"
if args.samples_filepath:
assert os.path.exists(os.path.dirname(os.path.abspath(args.samples_filepath))), \
"Path to save samples (--samples-fn) does not exist."
# Mapping to tf constants to avoid graph recompilation.
args.tolerance = tf.constant(args.tolerance, dtype=tf.float32)
args.n_samples_target = tf.constant(args.n_samples_target, dtype=tf.int32)
args.max_n_runs = tf.constant(args.max_n_runs, dtype=tf.int32)
# The parameters args.enqueue_chunk_size and n_samples_per_batch are not mapped
# to constants since they change the data structure and respective
# layout of convolutions on the IPU.
# Modelling constants
COUNTRY_DATA_TRAIN, POPULATION = covid_data.get_data(args.country)
# Casting population to tf.constant avoids recompilation but increases
# processing time by around 15%
# POPULATION = tf.constant(POPULATION, dtype=tf.float32)
MIXING_MATRIX = tf.constant([[-1, 1, 0, 0, 0, 0],
[0, -1, 1, 0, 0, 0],
[0, 0, -1, 1, 0, 0],
[0, 0, -1, 0, 1, 0],
[0, -1, 0, 0, 0, 1]],
dtype=tf.float32)
UNIFORM_PRIOR_UPPER_LIMIT = tf.constant(
[1.0, 100.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0])
# Run args
MAX_REPORT_SIZE = int(5e9)
if args.n_days is None:
country_data_train = COUNTRY_DATA_TRAIN
else:
country_data_train = COUNTRY_DATA_TRAIN[:, :args.n_days]
def configure_ipu():
"""Reserve IPUs and setup profiling."""
if args.profile:
print(f'Writing profile to {args.profile_dir}.')
cfg = ipu.utils.create_ipu_config(
profiling=args.profile,
use_poplar_cbor_report=args.profile,
profile_execution=ipu.utils.ExecutionProfileType.TILE_PROFILE
if args.profile else False,
report_directory=args.profile_dir if args.profile else '',
max_report_size=MAX_REPORT_SIZE
)
cfg = ipu.utils.auto_select_ipus(cfg, args.replication_factor)
ipu.utils.configure_ipu_system(cfg)
configure_ipu()
# Create an IPU distribution strategy.
strategy = ipu.ipu_strategy.IPUStrategy()
# Create outfeed for streaming data to host
outfeed_data = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
'outfeed_data', replication_factor=args.replication_factor)
def conditional_enqueue_op(params, n_accs, dists, gain):
"""Enqueue only if relevant samples are included."""
def _enq_fn(to_enq):
return tf.no_op() if args.no_outfeed_ops \
else outfeed_data.enqueue(to_enq)
if args.outfeed_num_samples:
maybe_enqueue_op = tf.cond(
tf.math.greater(gain, 0),
lambda: _enq_fn([params, dists, n_accs]),
lambda: tf.no_op()
)
else:
maybe_enqueue_op = tf.cond(
tf.math.greater(gain, 0),
lambda: _enq_fn([params, dists]),
lambda: tf.no_op()
)
return maybe_enqueue_op
def chunked_outfeed_enqueue(chunk_id, total_gain, p_vec, d_vec, acc_mask):
"""Enqueue only relevant chunks.
Iterate over chunks of param vector samples,
only enqueue the host to outfeed if it has an
accepted sample in it
"""
# sync between replicas
g = ipu.cross_replica_ops.cross_replica_sum(
acc_mask[chunk_id], name="accumulated_sum")
maybe_enqueue = \
conditional_enqueue_op(params=tf.gather(p_vec, chunk_id, axis=1),
dists=tf.gather(d_vec, chunk_id),
n_accs=acc_mask[chunk_id],
gain=g)
with tf.control_dependencies([maybe_enqueue]):
g = tf.identity(g)
return chunk_id + 1, total_gain + g, p_vec, d_vec, acc_mask
@tf.function(experimental_compile=True)
def build_graph(accumulated_number_of_samples, run_number, local_tolerance):
"""Run full simulation over all days."""
# init of the simulation
n_days = tf.cast(country_data_train.shape[1], tf.int32)
P = tf.ones(args.n_samples_per_batch) * POPULATION
A_0 = tf.ones(args.n_samples_per_batch) * country_data_train[0, 0]
R_0 = tf.ones(args.n_samples_per_batch) * country_data_train[1, 0]
D_0 = tf.ones(args.n_samples_per_batch) * country_data_train[2, 0]
# param_vector elements are
# [alpha_0, alpha, n, beta, gamma, delta, eta, kappa]
param_vector = tf.transpose(tfd.Uniform(
tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
UNIFORM_PRIOR_UPPER_LIMIT,
).sample(args.n_samples_per_batch))
summary = tf.zeros([n_days, 3, args.n_samples_per_batch])
S_store = P - param_vector[7] * A_0 - (A_0 + R_0 + D_0)
I_store = param_vector[7] * A_0
A_store = A_0
R_store = R_0
D_store = D_0
Ru_store = tf.zeros(args.n_samples_per_batch)
summary = tf.tensor_scatter_nd_add(
tensor=summary,
indices=[[0, 0], [0, 1], [0, 2]],
updates=tf.stack([A_store, R_store, D_store]))
init_idx = tf.zeros([], dtype=tf.int32) + 1
init_vars = \
[init_idx, summary, S_store, I_store,
A_store, R_store, D_store, Ru_store]
def body(i, s, S, I, A, R, D, Ru):
"""Single update for one day."""
U = A + R + D
alpha_t = param_vector[0] + (
param_vector[1] / (tf.constant(1.0) + tf.pow(U, param_vector[2])))
h_1 = (S * I / P) * alpha_t
h_2 = I * param_vector[4]
h_3 = A * param_vector[3]
h_4 = A * param_vector[5]
h_5 = I * param_vector[6] * param_vector[3]
h = tf.stack([h_1, h_2, h_3, h_4, h_5])
normal_sample = tfd.Normal(loc=h, scale=tf.sqrt(h)).sample()
Y_store = tf.clip_by_value(tf.math.floor(normal_sample), 0.0, P)
m = tf.matmul(tf.transpose(MIXING_MATRIX), Y_store)
# Note: Simple vectorisation suppresses parameter update in loop.
S = tf.clip_by_value(S + m[0, :], 0.0, P)
I = tf.clip_by_value(I + m[1, :], 0.0, P)
A = tf.clip_by_value(A + m[2, :], 0.0, P)
R = tf.clip_by_value(R + m[3, :], 0.0, P)
D = tf.clip_by_value(D + m[4, :], 0.0, P)
Ru = tf.clip_by_value(Ru + m[5, :], 0.0, P)
s = tf.tensor_scatter_nd_add(tensor=s,
indices=[[i, 0], [i, 1], [i, 2]],
updates=tf.stack([A, R, D]))
return i+1, s, S, I, A, R, D, Ru
# populate summary with data from different days
k, summary, *_ = tf.while_loop(
cond=lambda k, *_: k < n_days,
body=body,
loop_vars=init_vars
)
# calculate Euclid distances between real and simulated data
t_summary = tf.transpose(summary, perm=[2, 1, 0])
distances = tf.norm(tf.broadcast_to(country_data_train, tf.constant(
[args.n_samples_per_batch,
country_data_train.shape[0], country_data_train.shape[1]],
dtype=tf.int32)) - t_summary, axis=2)
reduced_distances = tf.reduce_sum(distances, axis=1)
# calculate which simulations were successful
acceptance_vector = tf.cast(
reduced_distances <= local_tolerance, dtype=tf.int32)
if args.enqueue_chunk_size:
# split simulations into chunks, iterate over each chunk, counting
# num. accepted and enqueueing chunk to outfeed if any accepted
n_chunk = tf.constant(args.n_samples_per_batch // int(args.enqueue_chunk_size))
acc_chunk_shp = [n_chunk, int(args.enqueue_chunk_size)]
acc_chunk = \
tf.reduce_sum(tf.reshape(acceptance_vector, acc_chunk_shp), axis=1)
param_chunk_shp = [param_vector.shape[0]] + acc_chunk_shp
init_vars = [tf.constant(0),
tf.constant(0),
tf.reshape(param_vector, param_chunk_shp),
tf.reshape(reduced_distances, acc_chunk_shp),
acc_chunk]
_, gain, _, _, _ = tf.while_loop(cond=lambda n, *_: tf.less(n, n_chunk),
body=chunked_outfeed_enqueue,
loop_vars=init_vars)
else:
num_accepted_samples = tf.reduce_sum(
acceptance_vector, name="num_accepted_samples")
# sync between replicas
gain = ipu.cross_replica_ops.cross_replica_sum(
num_accepted_samples, name="accumulated_sum")
# transfer stats for simulations with at least once success
maybe_enq = conditional_enqueue_op(params=param_vector,
dists=reduced_distances,
n_accs=num_accepted_samples,
gain=gain)
total_number_of_samples = accumulated_number_of_samples + gain
return total_number_of_samples, run_number + 1, local_tolerance
@tf.function(experimental_compile=True)
def loop_collect_samples(local_samples_target, local_max_num_runs, local_tolerance):
"""Repeat batch simulations until target condition is reached."""
a = tf.zeros([], dtype=tf.int32) # Number of accepted samples
n = tf.zeros([], dtype=tf.int32) # Number of runs
a, n, *_ = tf.while_loop(
lambda a, n, *_:
tf.logical_and(
tf.less(a, local_samples_target),
tf.less(n, local_max_num_runs)),
build_graph, [a, n, local_tolerance])
return a, n
def dequeue_and_postproc(time_it=False):
"""Dequeue the outfeed data stream and filter out the relevant data."""
if time_it and not args.sparse_output:
start_time = time.time()
deq_out = outfeed_data.dequeue()
deq_end_time = time.time()
if deq_out[0].shape[0] > 0: # Only process if something dequeued
if args.outfeed_num_samples:
(param_vector, reduced_distances, num_accepted_samples) = \
deq_out
print(f"Samples per IPU = {np.sum(num_accepted_samples, axis=0)}")
else:
(param_vector, reduced_distances) = deq_out
if time_it and not args.sparse_output:
print(f'Dequeue-only time: {deq_end_time - start_time}')
# Filtering relevant samples
if args.replication_factor > 1:
s = tf.shape(param_vector)
pv = param_vector
param_vector = tf.reshape(
pv, tf.concat([[s[0] * s[1]], s[2:]], axis=0))
t = reduced_distances.shape
rd = reduced_distances
reduced_distances = tf.reshape(
rd, tf.concat([[t[0] * t[1]], [t[2]]], axis=0))
acceptance_vector = tf.cast(
reduced_distances <= args.tolerance, dtype=tf.bool)
t_param_vector = tf.transpose(param_vector, perm=[1, 0, 2])
eval_param_vector = tf.boolean_mask(
t_param_vector, acceptance_vector, axis=1)
if time_it and not args.sparse_output:
proc_end_time = time.time()
print(f'Process dequeued samples time: {proc_end_time - deq_end_time}')
return param_vector, reduced_distances, eval_param_vector
else:
return None, None, None
def main():
"""Warmup, timing, and stats output handling."""
with strategy.scope():
# Warm-up
if not args.sparse_output:
print("Warming up...")
strategy.experimental_run_v2(
loop_collect_samples,
[args.n_samples_target,
tf.constant(1, dtype=tf.int32),
args.tolerance])
if not args.no_outfeed_ops:
outfeed_data.dequeue()
# Time the compute
if not args.sparse_output:
print("Running...")
start_time = time.time()
num_accepted_samples, num_runs = strategy.experimental_run_v2(
loop_collect_samples,
[args.n_samples_target,
10 if args.profile else args.max_n_runs,
args.tolerance])
end_time = time.time()
samples_collected = np.int(num_accepted_samples)
num_runs = np.int(num_runs)
run_duration = end_time - start_time
# Dequeue the data
if args.no_outfeed_ops:
start_time = end_time = time.time()
else:
start_time = time.time()
param_vector, reduced_distances, eval_param_vector = \
dequeue_and_postproc(time_it=True)
end_time = time.time()
deq_proc_duration = end_time - start_time
duration = run_duration + deq_proc_duration
if args.sparse_output:
print(f"{duration:.3f} \t {1e3*duration/num_runs:.3f} \t "
f"{run_duration:.3f} \t {1e3*run_duration/num_runs:.3f}")
else:
print(f"Running ABC inference for {args.country}\n"
f"\tBatch size: {args.n_samples_per_batch}\n"
f"\tTolerance: {args.tolerance}"
f"\tTarget number of samples: {args.n_samples_target}"
f"\tEnqueue chunk size: {args.enqueue_chunk_size}")
print("=========================================")
print("IPU runs completed in {0:.3f} seconds\n".format(
run_duration))
print(f"Samples collected: {samples_collected:.0f}")
print(f"Number of runs: {num_runs:.0f} "
f"with {args.replication_factor} replica(s)")
print("Time per run: {0:.3f} milliseconds\n".format(
1e3*run_duration/num_runs))
print("Debug: Time for dequeue and processing: "
"{0:.3f} second\n".format(deq_proc_duration))
print("Debug: Total Time (inc dequeue): {0:.3f} second\n".format(
duration))
print("Debug: Time per run (inc dequeue): "
"{0:.3f} milliseconds\n".format(1e3*duration/num_runs))
if not args.no_outfeed_ops:
print(f"param_vector.shape = {param_vector.shape}")
print(f"reduced_distances.shape = {reduced_distances.shape}")
print(f"eval_param_vector.shape = {eval_param_vector.shape}")
if samples_collected < args.n_samples_target and not args.profile:
raise NotImplementedError(
"Too few iterations. Increase max_num_runs parameter.")
if args.samples_filepath:
# Save the accepted samples if filepath given
np.savetxt(args.samples_filepath,
eval_param_vector.numpy(),
delimiter=",")
if __name__ == '__main__':
main()
| 39.70437 | 88 | 0.600388 |
import numpy as np
import os
import time as time
from tensorflow.python import ipu
import tensorflow as tf
import tensorflow_probability as tfp
import covid_data
from argparser import get_argparser
tfd = tfp.distributions
ap = get_argparser()
args = ap.parse_args()
assert (not args.enqueue_chunk_size or
args.n_samples_per_batch % args.enqueue_chunk_size == 0), \
"--enqueue-chunk-size must divide into --n-samples-per-batch exactly"
if args.samples_filepath:
assert os.path.exists(os.path.dirname(os.path.abspath(args.samples_filepath))), \
"Path to save samples (--samples-fn) does not exist."
args.tolerance = tf.constant(args.tolerance, dtype=tf.float32)
args.n_samples_target = tf.constant(args.n_samples_target, dtype=tf.int32)
args.max_n_runs = tf.constant(args.max_n_runs, dtype=tf.int32)
COUNTRY_DATA_TRAIN, POPULATION = covid_data.get_data(args.country)
MIXING_MATRIX = tf.constant([[-1, 1, 0, 0, 0, 0],
[0, -1, 1, 0, 0, 0],
[0, 0, -1, 1, 0, 0],
[0, 0, -1, 0, 1, 0],
[0, -1, 0, 0, 0, 1]],
dtype=tf.float32)
UNIFORM_PRIOR_UPPER_LIMIT = tf.constant(
[1.0, 100.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0])
MAX_REPORT_SIZE = int(5e9)
if args.n_days is None:
country_data_train = COUNTRY_DATA_TRAIN
else:
country_data_train = COUNTRY_DATA_TRAIN[:, :args.n_days]
def configure_ipu():
if args.profile:
print(f'Writing profile to {args.profile_dir}.')
cfg = ipu.utils.create_ipu_config(
profiling=args.profile,
use_poplar_cbor_report=args.profile,
profile_execution=ipu.utils.ExecutionProfileType.TILE_PROFILE
if args.profile else False,
report_directory=args.profile_dir if args.profile else '',
max_report_size=MAX_REPORT_SIZE
)
cfg = ipu.utils.auto_select_ipus(cfg, args.replication_factor)
ipu.utils.configure_ipu_system(cfg)
configure_ipu()
strategy = ipu.ipu_strategy.IPUStrategy()
outfeed_data = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
'outfeed_data', replication_factor=args.replication_factor)
def conditional_enqueue_op(params, n_accs, dists, gain):
def _enq_fn(to_enq):
return tf.no_op() if args.no_outfeed_ops \
else outfeed_data.enqueue(to_enq)
if args.outfeed_num_samples:
maybe_enqueue_op = tf.cond(
tf.math.greater(gain, 0),
lambda: _enq_fn([params, dists, n_accs]),
lambda: tf.no_op()
)
else:
maybe_enqueue_op = tf.cond(
tf.math.greater(gain, 0),
lambda: _enq_fn([params, dists]),
lambda: tf.no_op()
)
return maybe_enqueue_op
def chunked_outfeed_enqueue(chunk_id, total_gain, p_vec, d_vec, acc_mask):
g = ipu.cross_replica_ops.cross_replica_sum(
acc_mask[chunk_id], name="accumulated_sum")
maybe_enqueue = \
conditional_enqueue_op(params=tf.gather(p_vec, chunk_id, axis=1),
dists=tf.gather(d_vec, chunk_id),
n_accs=acc_mask[chunk_id],
gain=g)
with tf.control_dependencies([maybe_enqueue]):
g = tf.identity(g)
return chunk_id + 1, total_gain + g, p_vec, d_vec, acc_mask
@tf.function(experimental_compile=True)
def build_graph(accumulated_number_of_samples, run_number, local_tolerance):
n_days = tf.cast(country_data_train.shape[1], tf.int32)
P = tf.ones(args.n_samples_per_batch) * POPULATION
A_0 = tf.ones(args.n_samples_per_batch) * country_data_train[0, 0]
R_0 = tf.ones(args.n_samples_per_batch) * country_data_train[1, 0]
D_0 = tf.ones(args.n_samples_per_batch) * country_data_train[2, 0]
param_vector = tf.transpose(tfd.Uniform(
tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
UNIFORM_PRIOR_UPPER_LIMIT,
).sample(args.n_samples_per_batch))
summary = tf.zeros([n_days, 3, args.n_samples_per_batch])
S_store = P - param_vector[7] * A_0 - (A_0 + R_0 + D_0)
I_store = param_vector[7] * A_0
A_store = A_0
R_store = R_0
D_store = D_0
Ru_store = tf.zeros(args.n_samples_per_batch)
summary = tf.tensor_scatter_nd_add(
tensor=summary,
indices=[[0, 0], [0, 1], [0, 2]],
updates=tf.stack([A_store, R_store, D_store]))
init_idx = tf.zeros([], dtype=tf.int32) + 1
init_vars = \
[init_idx, summary, S_store, I_store,
A_store, R_store, D_store, Ru_store]
def body(i, s, S, I, A, R, D, Ru):
U = A + R + D
alpha_t = param_vector[0] + (
param_vector[1] / (tf.constant(1.0) + tf.pow(U, param_vector[2])))
h_1 = (S * I / P) * alpha_t
h_2 = I * param_vector[4]
h_3 = A * param_vector[3]
h_4 = A * param_vector[5]
h_5 = I * param_vector[6] * param_vector[3]
h = tf.stack([h_1, h_2, h_3, h_4, h_5])
normal_sample = tfd.Normal(loc=h, scale=tf.sqrt(h)).sample()
Y_store = tf.clip_by_value(tf.math.floor(normal_sample), 0.0, P)
m = tf.matmul(tf.transpose(MIXING_MATRIX), Y_store)
S = tf.clip_by_value(S + m[0, :], 0.0, P)
I = tf.clip_by_value(I + m[1, :], 0.0, P)
A = tf.clip_by_value(A + m[2, :], 0.0, P)
R = tf.clip_by_value(R + m[3, :], 0.0, P)
D = tf.clip_by_value(D + m[4, :], 0.0, P)
Ru = tf.clip_by_value(Ru + m[5, :], 0.0, P)
s = tf.tensor_scatter_nd_add(tensor=s,
indices=[[i, 0], [i, 1], [i, 2]],
updates=tf.stack([A, R, D]))
return i+1, s, S, I, A, R, D, Ru
k, summary, *_ = tf.while_loop(
cond=lambda k, *_: k < n_days,
body=body,
loop_vars=init_vars
)
t_summary = tf.transpose(summary, perm=[2, 1, 0])
distances = tf.norm(tf.broadcast_to(country_data_train, tf.constant(
[args.n_samples_per_batch,
country_data_train.shape[0], country_data_train.shape[1]],
dtype=tf.int32)) - t_summary, axis=2)
reduced_distances = tf.reduce_sum(distances, axis=1)
acceptance_vector = tf.cast(
reduced_distances <= local_tolerance, dtype=tf.int32)
if args.enqueue_chunk_size:
n_chunk = tf.constant(args.n_samples_per_batch // int(args.enqueue_chunk_size))
acc_chunk_shp = [n_chunk, int(args.enqueue_chunk_size)]
acc_chunk = \
tf.reduce_sum(tf.reshape(acceptance_vector, acc_chunk_shp), axis=1)
param_chunk_shp = [param_vector.shape[0]] + acc_chunk_shp
init_vars = [tf.constant(0),
tf.constant(0),
tf.reshape(param_vector, param_chunk_shp),
tf.reshape(reduced_distances, acc_chunk_shp),
acc_chunk]
_, gain, _, _, _ = tf.while_loop(cond=lambda n, *_: tf.less(n, n_chunk),
body=chunked_outfeed_enqueue,
loop_vars=init_vars)
else:
num_accepted_samples = tf.reduce_sum(
acceptance_vector, name="num_accepted_samples")
gain = ipu.cross_replica_ops.cross_replica_sum(
num_accepted_samples, name="accumulated_sum")
maybe_enq = conditional_enqueue_op(params=param_vector,
dists=reduced_distances,
n_accs=num_accepted_samples,
gain=gain)
total_number_of_samples = accumulated_number_of_samples + gain
return total_number_of_samples, run_number + 1, local_tolerance
@tf.function(experimental_compile=True)
def loop_collect_samples(local_samples_target, local_max_num_runs, local_tolerance):
a = tf.zeros([], dtype=tf.int32)
n = tf.zeros([], dtype=tf.int32)
a, n, *_ = tf.while_loop(
lambda a, n, *_:
tf.logical_and(
tf.less(a, local_samples_target),
tf.less(n, local_max_num_runs)),
build_graph, [a, n, local_tolerance])
return a, n
def dequeue_and_postproc(time_it=False):
if time_it and not args.sparse_output:
start_time = time.time()
deq_out = outfeed_data.dequeue()
deq_end_time = time.time()
if deq_out[0].shape[0] > 0:
if args.outfeed_num_samples:
(param_vector, reduced_distances, num_accepted_samples) = \
deq_out
print(f"Samples per IPU = {np.sum(num_accepted_samples, axis=0)}")
else:
(param_vector, reduced_distances) = deq_out
if time_it and not args.sparse_output:
print(f'Dequeue-only time: {deq_end_time - start_time}')
if args.replication_factor > 1:
s = tf.shape(param_vector)
pv = param_vector
param_vector = tf.reshape(
pv, tf.concat([[s[0] * s[1]], s[2:]], axis=0))
t = reduced_distances.shape
rd = reduced_distances
reduced_distances = tf.reshape(
rd, tf.concat([[t[0] * t[1]], [t[2]]], axis=0))
acceptance_vector = tf.cast(
reduced_distances <= args.tolerance, dtype=tf.bool)
t_param_vector = tf.transpose(param_vector, perm=[1, 0, 2])
eval_param_vector = tf.boolean_mask(
t_param_vector, acceptance_vector, axis=1)
if time_it and not args.sparse_output:
proc_end_time = time.time()
print(f'Process dequeued samples time: {proc_end_time - deq_end_time}')
return param_vector, reduced_distances, eval_param_vector
else:
return None, None, None
def main():
with strategy.scope():
if not args.sparse_output:
print("Warming up...")
strategy.experimental_run_v2(
loop_collect_samples,
[args.n_samples_target,
tf.constant(1, dtype=tf.int32),
args.tolerance])
if not args.no_outfeed_ops:
outfeed_data.dequeue()
if not args.sparse_output:
print("Running...")
start_time = time.time()
num_accepted_samples, num_runs = strategy.experimental_run_v2(
loop_collect_samples,
[args.n_samples_target,
10 if args.profile else args.max_n_runs,
args.tolerance])
end_time = time.time()
samples_collected = np.int(num_accepted_samples)
num_runs = np.int(num_runs)
run_duration = end_time - start_time
if args.no_outfeed_ops:
start_time = end_time = time.time()
else:
start_time = time.time()
param_vector, reduced_distances, eval_param_vector = \
dequeue_and_postproc(time_it=True)
end_time = time.time()
deq_proc_duration = end_time - start_time
duration = run_duration + deq_proc_duration
if args.sparse_output:
print(f"{duration:.3f} \t {1e3*duration/num_runs:.3f} \t "
f"{run_duration:.3f} \t {1e3*run_duration/num_runs:.3f}")
else:
print(f"Running ABC inference for {args.country}\n"
f"\tBatch size: {args.n_samples_per_batch}\n"
f"\tTolerance: {args.tolerance}"
f"\tTarget number of samples: {args.n_samples_target}"
f"\tEnqueue chunk size: {args.enqueue_chunk_size}")
print("=========================================")
print("IPU runs completed in {0:.3f} seconds\n".format(
run_duration))
print(f"Samples collected: {samples_collected:.0f}")
print(f"Number of runs: {num_runs:.0f} "
f"with {args.replication_factor} replica(s)")
print("Time per run: {0:.3f} milliseconds\n".format(
1e3*run_duration/num_runs))
print("Debug: Time for dequeue and processing: "
"{0:.3f} second\n".format(deq_proc_duration))
print("Debug: Total Time (inc dequeue): {0:.3f} second\n".format(
duration))
print("Debug: Time per run (inc dequeue): "
"{0:.3f} milliseconds\n".format(1e3*duration/num_runs))
if not args.no_outfeed_ops:
print(f"param_vector.shape = {param_vector.shape}")
print(f"reduced_distances.shape = {reduced_distances.shape}")
print(f"eval_param_vector.shape = {eval_param_vector.shape}")
if samples_collected < args.n_samples_target and not args.profile:
raise NotImplementedError(
"Too few iterations. Increase max_num_runs parameter.")
if args.samples_filepath:
np.savetxt(args.samples_filepath,
eval_param_vector.numpy(),
delimiter=",")
if __name__ == '__main__':
main()
| true | true |
f7fd20877ed0eb552a777f5f4cbc0df13b888167 | 27,052 | py | Python | azure_rm_deployment.py | lmazuel/azurerm | 5fa38ae5b21b0eef8536ec81bafd7f646ba7b560 | [
"MIT"
] | 3 | 2017-10-20T14:12:53.000Z | 2022-03-30T22:39:04.000Z | azure_rm_deployment.py | lmazuel/azurerm | 5fa38ae5b21b0eef8536ec81bafd7f646ba7b560 | [
"MIT"
] | null | null | null | azure_rm_deployment.py | lmazuel/azurerm | 5fa38ae5b21b0eef8536ec81bafd7f646ba7b560 | [
"MIT"
] | 4 | 2017-12-07T12:05:43.000Z | 2022-01-28T16:05:39.000Z | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: azure_rm_deployment
short_description: Create or destroy Azure Resource Manager template deployments
version_added: "2.1"
description:
- "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
options:
resource_group_name:
description:
- The resource group name to use or create to host the deployed template
required: true
location:
description:
- The geo-locations in which the resource group will be located.
required: false
default: westus
state:
description:
- If state is "present", template will be created. If state is "present" and if deployment exists, it will be
updated. If state is "absent", stack will be removed.
default: present
choices:
- present
- absent
template:
description:
- A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
Either one of them is required if "state" parameter is "present".
required: false
default: None
template_link:
description:
- Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
of them is required if "state" parameter is "present".
required: false
default: None
parameters:
description:
- A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
with 'parameters_link'. Either one of them is required if "state" parameter is "present".
required: false
default: None
parameters_link:
description:
- Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
one of them is required if "state" parameter is "present".
required: false
default: None
extends_documentation_fragment:
- azure
author:
- David Justice (@devigned)
- Laurent Mazuel (@lmazuel)
- Andre Price (@obsoleted)
'''
EXAMPLES = '''
# Destroy a template deployment
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
# Create or update a template deployment based on uris using parameter and template links
- name: Create Azure Deploy
azure_rm_deployment:
state: present
resource_group_name: dev-ops-cle
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
# Create or update a template deployment based on a uri to the template and parameters specified inline.
# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
parameters:
newStorageAccountName:
value: devopsclestorage1
adminUsername:
value: devopscle
dnsNameForPublicIP:
value: devopscleazure
location:
value: West US
vmSize:
value: Standard_A2
vmName:
value: ansibleSshVm
sshKeyData:
value: YOUR_SSH_PUBLIC_KEY
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
register: azure
- name: Add new instance to host group
add_host: hostname={{ item['ips'][0].public_ip }} groupname=azure_vms
with_items: azure.deployment.instances
- hosts: azure_vms
user: devopscle
tasks:
- name: Wait for SSH to come up
wait_for: port=22 timeout=2000 state=started
- name: echo the hostname of the vm
shell: hostname
# Deploy an Azure WebApp running a hello world'ish node app
- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
azure_rm_deployment:
state: present
subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
resource_group_name: dev-ops-cle-webapp
parameters:
repoURL:
value: 'https://github.com/devigned/az-roadshow-oss.git'
siteName:
value: devopscleweb
hostingPlanName:
value: someplan
siteLocation:
value: westus
sku:
value: Standard
template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
# Create or update a template deployment based on an inline template and parameters
- name: Create Azure Deploy
azure_rm_deploy:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
template:
$schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
contentVersion: "1.0.0.0"
parameters:
newStorageAccountName:
type: "string"
metadata:
description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
adminUsername:
type: "string"
metadata:
description: "User name for the Virtual Machine."
adminPassword:
type: "securestring"
metadata:
description: "Password for the Virtual Machine."
dnsNameForPublicIP:
type: "string"
metadata:
description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
ubuntuOSVersion:
type: "string"
defaultValue: "14.04.2-LTS"
allowedValues:
- "12.04.5-LTS"
- "14.04.2-LTS"
- "15.04"
metadata:
description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables:
location: "West US"
imagePublisher: "Canonical"
imageOffer: "UbuntuServer"
OSDiskName: "osdiskforlinuxsimple"
nicName: "myVMNic"
addressPrefix: "10.0.0.0/16"
subnetName: "Subnet"
subnetPrefix: "10.0.0.0/24"
storageAccountType: "Standard_LRS"
publicIPAddressName: "myPublicIP"
publicIPAddressType: "Dynamic"
vmStorageAccountContainerName: "vhds"
vmName: "MyUbuntuVM"
vmSize: "Standard_D1"
virtualNetworkName: "MyVNET"
vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
resources:
-
type: "Microsoft.Storage/storageAccounts"
name: "[parameters('newStorageAccountName')]"
apiVersion: "2015-05-01-preview"
location: "[variables('location')]"
properties:
accountType: "[variables('storageAccountType')]"
-
apiVersion: "2015-05-01-preview"
type: "Microsoft.Network/publicIPAddresses"
name: "[variables('publicIPAddressName')]"
location: "[variables('location')]"
properties:
publicIPAllocationMethod: "[variables('publicIPAddressType')]"
dnsSettings:
domainNameLabel: "[parameters('dnsNameForPublicIP')]"
-
type: "Microsoft.Network/virtualNetworks"
apiVersion: "2015-05-01-preview"
name: "[variables('virtualNetworkName')]"
location: "[variables('location')]"
properties:
addressSpace:
addressPrefixes:
- "[variables('addressPrefix')]"
subnets:
-
name: "[variables('subnetName')]"
properties:
addressPrefix: "[variables('subnetPrefix')]"
-
type: "Microsoft.Network/networkInterfaces"
apiVersion: "2015-05-01-preview"
name: "[variables('nicName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
- "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
properties:
ipConfigurations:
-
name: "ipconfig1"
properties:
privateIPAllocationMethod: "Dynamic"
publicIPAddress:
id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
subnet:
id: "[variables('subnetRef')]"
-
type: "Microsoft.Compute/virtualMachines"
apiVersion: "2015-06-15"
name: "[variables('vmName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
- "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
properties:
hardwareProfile:
vmSize: "[variables('vmSize')]"
osProfile:
computername: "[variables('vmName')]"
adminUsername: "[parameters('adminUsername')]"
adminPassword: "[parameters('adminPassword')]"
storageProfile:
imageReference:
publisher: "[variables('imagePublisher')]"
offer: "[variables('imageOffer')]"
sku: "[parameters('ubuntuOSVersion')]"
version: "latest"
osDisk:
name: "osdisk"
vhd:
uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]"
caching: "ReadWrite"
createOption: "FromImage"
networkProfile:
networkInterfaces:
-
id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
diagnosticsProfile:
bootDiagnostics:
enabled: "true"
storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
parameters:
newStorageAccountName:
value: devopsclestorage
adminUsername:
value: devopscle
adminPassword:
value: Password1!
dnsNameForPublicIP:
value: devopscleazure
'''
RETURN = '''
msg:
description: String indicating if the deployment was created or deleted
returned: always
type: string
sample: "deployment created"
deployment:
description: Deployment details
type: dict
returned: always
sample:{
"group_name": "Test_Deployment",
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Test_Deployment/providers/Microsoft.Resources/deployments/ansible-arm",
"instances": [
{
"ips": [
{
"dns_settings": {
"domain_name_label": "testvm9910001",
"fqdn": "testvm9910001.westus.cloudapp.azure.com"
},
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Test_Deployment/providers/Microsoft.Network/publicIPAddresses/myPublicIP",
"name": "myPublicIP",
"public_ip": "13.91.99.232",
"public_ip_allocation_method": "IPAllocationMethod.dynamic"
}
],
"vm_name": "MyUbuntuVM"
}
],
"name": "ansible-arm",
"outputs": {
"hostname": {
"type": "String",
"value": "testvm9910001.westus.cloudapp.azure.com"
},
"sshCommand": {
"type": "String",
"value": "ssh chouseknecht@testvm9910001.westus.cloudapp.azure.com"
}
}
}
'''
import time
import yaml
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from itertools import chain
from azure.common.credentials import ServicePrincipalCredentials
from azure.common.exceptions import CloudError
from azure.mgmt.resource.resources.models import (DeploymentProperties,
ParametersLink,
TemplateLink,
Deployment,
ResourceGroup,
Dependency)
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDeploymentManager(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
state=dict(type='str', default='present', choices=['present', 'absent']),
template=dict(type='dict', default=None),
parameters=dict(type='dict', default=None),
template_link=dict(type='str', default=None),
parameters_link=dict(type='str', default=None),
location=dict(type='str', default="westus"),
deployment_mode=dict(type='str', default='complete', choices=['complete', 'incremental']),
deployment_name=dict(type='str', default="ansible-arm"),
wait_for_deployment_completion=dict(type='bool', default=True),
wait_for_deployment_polling_period=dict(type='int', default=30)
)
mutually_exclusive = [('template', 'template_link'),
('parameters', 'parameters_link')]
self.resource_group_name = None
self.state = None
self.template = None
self.parameters = None
self.template_link = None
self.parameters_link = None
self.location = None
self.deployment_mode = None
self.deployment_name = None
self.wait_for_deployment_completion = None
self.wait_for_deployment_polling_period = None
self.tags = None
self.results = dict(
deployment=dict(),
changed=False,
msg=""
)
super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
if self.state == 'present':
deployment = self.deploy_template()
self.results['deployment'] = dict(
name=deployment.name,
group_name=self.resource_group_name,
id=deployment.id,
outputs=deployment.properties.outputs,
instances=self._get_instances(deployment)
)
self.results['changed'] = True
self.results['msg'] = 'deployment created'
else:
if self.resource_group_exists(self.resource_group_name):
self.destroy_resource_group()
self.results['changed'] = True
self.results['msg'] = "deployment deleted"
return self.results
def deploy_template(self):
"""
Deploy the targeted template and parameters
:param module: Ansible module containing the validated configuration for the deployment template
:param client: resource management client for azure
:param conn_info: connection info needed
:return:
"""
deploy_parameter = DeploymentProperties()
deploy_parameter.mode = self.deployment_mode
if not self.parameters_link:
deploy_parameter.parameters = self.parameters
else:
deploy_parameter.parameters_link = ParametersLink(
uri=self.parameters_link
)
if not self.template_link:
deploy_parameter.template = self.template
else:
deploy_parameter.template_link = TemplateLink(
uri=self.template_link
)
params = ResourceGroup(location=self.location, tags=self.tags)
try:
self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
except CloudError as exc:
self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
result = self.rm_client.deployments.create_or_update(self.resource_group_name,
self.deployment_name,
deploy_parameter)
deployment_result = self.get_poller_result(result)
if self.wait_for_deployment_completion:
while deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
failed_deployment_operations=failed_deployment_operations)
if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
failed_deployment_operations=failed_deployment_operations)
return deployment_result
def destroy_resource_group(self):
"""
Destroy the targeted resource group
"""
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
result.wait() # Blocking wait till the delete is finished
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
else:
self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
(e.status_code, e.message))
def resource_group_exists(self, resource_group):
'''
Return True/False based on existence of requested resource group.
:param resource_group: string. Name of a resource group.
:return: boolean
'''
try:
self.rm_client.resource_groups.get(resource_group)
except CloudError:
return False
return True
def _get_failed_nested_operations(self, current_operations):
new_operations = []
for operation in current_operations:
if operation.properties.provisioning_state == 'Failed':
new_operations.append(operation)
if operation.properties.target_resource and \
'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
nested_deployment = operation.properties.target_resource.resource_name
try:
nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
(e.status_code, e.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
def _get_failed_deployment_operations(self, deployment_name):
results = []
# time.sleep(15) # there is a race condition between when we ask for deployment status and when the
# # status is available.
try:
operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name)
except CloudError as exc:
self.fail("Get deployment failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
results = [
dict(
id=op.id,
operation_id=op.operation_id,
status_code=op.properties.status_code,
status_message=op.properties.status_message,
target_resource=dict(
id=op.properties.target_resource.id,
resource_name=op.properties.target_resource.resource_name,
resource_type=op.properties.target_resource.resource_type
) if op.properties.target_resource else None,
provisioning_state=op.properties.provisioning_state,
)
for op in self._get_failed_nested_operations(operations)
]
except:
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
pass
self.log(dict(failed_deployment_operations=results), pretty_print=True)
return results
def _get_instances(self, deployment):
dep_tree = self._build_hierarchy(deployment.properties.dependencies)
vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
for vm in vms]
vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
for vm, nics in vms_and_nics]
return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
def _get_dependencies(self, dep_tree, resource_type):
matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
for child_tree in [value['children'] for value in dep_tree.values()]:
matches += self._get_dependencies(child_tree, resource_type)
return matches
def _build_hierarchy(self, dependencies, tree=None):
tree = dict(top=True) if tree is None else tree
for dep in dependencies:
if dep.resource_name not in tree:
tree[dep.resource_name] = dict(dep=dep, children=dict())
if isinstance(dep, Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
if 'top' in tree:
tree.pop('top', None)
keys = list(tree.keys())
for key1 in keys:
for key2 in keys:
if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
tree[key2]['children'][key1] = tree[key1]
tree.pop(key1)
return tree
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label':ip.dns_settings.domain_name_label,
'fqdn':ip.dns_settings.fqdn
}
return ip_dict
def _nic_to_public_ips_instance(self, nics):
return [self.network_client.public_ip_addresses.get(self.resource_group_name, public_ip_id.split('/')[-1])
for nic_obj in [self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics]
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
def main():
AzureRMDeploymentManager()
if __name__ == '__main__':
main() | 41.876161 | 184 | 0.61123 |
DOCUMENTATION = '''
---
module: azure_rm_deployment
short_description: Create or destroy Azure Resource Manager template deployments
version_added: "2.1"
description:
- "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
options:
resource_group_name:
description:
- The resource group name to use or create to host the deployed template
required: true
location:
description:
- The geo-locations in which the resource group will be located.
required: false
default: westus
state:
description:
- If state is "present", template will be created. If state is "present" and if deployment exists, it will be
updated. If state is "absent", stack will be removed.
default: present
choices:
- present
- absent
template:
description:
- A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
Either one of them is required if "state" parameter is "present".
required: false
default: None
template_link:
description:
- Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
of them is required if "state" parameter is "present".
required: false
default: None
parameters:
description:
- A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
with 'parameters_link'. Either one of them is required if "state" parameter is "present".
required: false
default: None
parameters_link:
description:
- Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
one of them is required if "state" parameter is "present".
required: false
default: None
extends_documentation_fragment:
- azure
author:
- David Justice (@devigned)
- Laurent Mazuel (@lmazuel)
- Andre Price (@obsoleted)
'''
EXAMPLES = '''
# Destroy a template deployment
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
# Create or update a template deployment based on uris using parameter and template links
- name: Create Azure Deploy
azure_rm_deployment:
state: present
resource_group_name: dev-ops-cle
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
# Create or update a template deployment based on a uri to the template and parameters specified inline.
# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
parameters:
newStorageAccountName:
value: devopsclestorage1
adminUsername:
value: devopscle
dnsNameForPublicIP:
value: devopscleazure
location:
value: West US
vmSize:
value: Standard_A2
vmName:
value: ansibleSshVm
sshKeyData:
value: YOUR_SSH_PUBLIC_KEY
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
register: azure
- name: Add new instance to host group
add_host: hostname={{ item['ips'][0].public_ip }} groupname=azure_vms
with_items: azure.deployment.instances
- hosts: azure_vms
user: devopscle
tasks:
- name: Wait for SSH to come up
wait_for: port=22 timeout=2000 state=started
- name: echo the hostname of the vm
shell: hostname
# Deploy an Azure WebApp running a hello world'ish node app
- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
azure_rm_deployment:
state: present
subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
resource_group_name: dev-ops-cle-webapp
parameters:
repoURL:
value: 'https://github.com/devigned/az-roadshow-oss.git'
siteName:
value: devopscleweb
hostingPlanName:
value: someplan
siteLocation:
value: westus
sku:
value: Standard
template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
# Create or update a template deployment based on an inline template and parameters
- name: Create Azure Deploy
azure_rm_deploy:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
template:
$schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
contentVersion: "1.0.0.0"
parameters:
newStorageAccountName:
type: "string"
metadata:
description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
adminUsername:
type: "string"
metadata:
description: "User name for the Virtual Machine."
adminPassword:
type: "securestring"
metadata:
description: "Password for the Virtual Machine."
dnsNameForPublicIP:
type: "string"
metadata:
description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
ubuntuOSVersion:
type: "string"
defaultValue: "14.04.2-LTS"
allowedValues:
- "12.04.5-LTS"
- "14.04.2-LTS"
- "15.04"
metadata:
description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables:
location: "West US"
imagePublisher: "Canonical"
imageOffer: "UbuntuServer"
OSDiskName: "osdiskforlinuxsimple"
nicName: "myVMNic"
addressPrefix: "10.0.0.0/16"
subnetName: "Subnet"
subnetPrefix: "10.0.0.0/24"
storageAccountType: "Standard_LRS"
publicIPAddressName: "myPublicIP"
publicIPAddressType: "Dynamic"
vmStorageAccountContainerName: "vhds"
vmName: "MyUbuntuVM"
vmSize: "Standard_D1"
virtualNetworkName: "MyVNET"
vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
resources:
-
type: "Microsoft.Storage/storageAccounts"
name: "[parameters('newStorageAccountName')]"
apiVersion: "2015-05-01-preview"
location: "[variables('location')]"
properties:
accountType: "[variables('storageAccountType')]"
-
apiVersion: "2015-05-01-preview"
type: "Microsoft.Network/publicIPAddresses"
name: "[variables('publicIPAddressName')]"
location: "[variables('location')]"
properties:
publicIPAllocationMethod: "[variables('publicIPAddressType')]"
dnsSettings:
domainNameLabel: "[parameters('dnsNameForPublicIP')]"
-
type: "Microsoft.Network/virtualNetworks"
apiVersion: "2015-05-01-preview"
name: "[variables('virtualNetworkName')]"
location: "[variables('location')]"
properties:
addressSpace:
addressPrefixes:
- "[variables('addressPrefix')]"
subnets:
-
name: "[variables('subnetName')]"
properties:
addressPrefix: "[variables('subnetPrefix')]"
-
type: "Microsoft.Network/networkInterfaces"
apiVersion: "2015-05-01-preview"
name: "[variables('nicName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
- "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
properties:
ipConfigurations:
-
name: "ipconfig1"
properties:
privateIPAllocationMethod: "Dynamic"
publicIPAddress:
id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
subnet:
id: "[variables('subnetRef')]"
-
type: "Microsoft.Compute/virtualMachines"
apiVersion: "2015-06-15"
name: "[variables('vmName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
- "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
properties:
hardwareProfile:
vmSize: "[variables('vmSize')]"
osProfile:
computername: "[variables('vmName')]"
adminUsername: "[parameters('adminUsername')]"
adminPassword: "[parameters('adminPassword')]"
storageProfile:
imageReference:
publisher: "[variables('imagePublisher')]"
offer: "[variables('imageOffer')]"
sku: "[parameters('ubuntuOSVersion')]"
version: "latest"
osDisk:
name: "osdisk"
vhd:
uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]"
caching: "ReadWrite"
createOption: "FromImage"
networkProfile:
networkInterfaces:
-
id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
diagnosticsProfile:
bootDiagnostics:
enabled: "true"
storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
parameters:
newStorageAccountName:
value: devopsclestorage
adminUsername:
value: devopscle
adminPassword:
value: Password1!
dnsNameForPublicIP:
value: devopscleazure
'''
RETURN = '''
msg:
description: String indicating if the deployment was created or deleted
returned: always
type: string
sample: "deployment created"
deployment:
description: Deployment details
type: dict
returned: always
sample:{
"group_name": "Test_Deployment",
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Test_Deployment/providers/Microsoft.Resources/deployments/ansible-arm",
"instances": [
{
"ips": [
{
"dns_settings": {
"domain_name_label": "testvm9910001",
"fqdn": "testvm9910001.westus.cloudapp.azure.com"
},
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Test_Deployment/providers/Microsoft.Network/publicIPAddresses/myPublicIP",
"name": "myPublicIP",
"public_ip": "13.91.99.232",
"public_ip_allocation_method": "IPAllocationMethod.dynamic"
}
],
"vm_name": "MyUbuntuVM"
}
],
"name": "ansible-arm",
"outputs": {
"hostname": {
"type": "String",
"value": "testvm9910001.westus.cloudapp.azure.com"
},
"sshCommand": {
"type": "String",
"value": "ssh chouseknecht@testvm9910001.westus.cloudapp.azure.com"
}
}
}
'''
import time
import yaml
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from itertools import chain
from azure.common.credentials import ServicePrincipalCredentials
from azure.common.exceptions import CloudError
from azure.mgmt.resource.resources.models import (DeploymentProperties,
ParametersLink,
TemplateLink,
Deployment,
ResourceGroup,
Dependency)
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
except ImportError:
pass
class AzureRMDeploymentManager(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
state=dict(type='str', default='present', choices=['present', 'absent']),
template=dict(type='dict', default=None),
parameters=dict(type='dict', default=None),
template_link=dict(type='str', default=None),
parameters_link=dict(type='str', default=None),
location=dict(type='str', default="westus"),
deployment_mode=dict(type='str', default='complete', choices=['complete', 'incremental']),
deployment_name=dict(type='str', default="ansible-arm"),
wait_for_deployment_completion=dict(type='bool', default=True),
wait_for_deployment_polling_period=dict(type='int', default=30)
)
mutually_exclusive = [('template', 'template_link'),
('parameters', 'parameters_link')]
self.resource_group_name = None
self.state = None
self.template = None
self.parameters = None
self.template_link = None
self.parameters_link = None
self.location = None
self.deployment_mode = None
self.deployment_name = None
self.wait_for_deployment_completion = None
self.wait_for_deployment_polling_period = None
self.tags = None
self.results = dict(
deployment=dict(),
changed=False,
msg=""
)
super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
if self.state == 'present':
deployment = self.deploy_template()
self.results['deployment'] = dict(
name=deployment.name,
group_name=self.resource_group_name,
id=deployment.id,
outputs=deployment.properties.outputs,
instances=self._get_instances(deployment)
)
self.results['changed'] = True
self.results['msg'] = 'deployment created'
else:
if self.resource_group_exists(self.resource_group_name):
self.destroy_resource_group()
self.results['changed'] = True
self.results['msg'] = "deployment deleted"
return self.results
def deploy_template(self):
deploy_parameter = DeploymentProperties()
deploy_parameter.mode = self.deployment_mode
if not self.parameters_link:
deploy_parameter.parameters = self.parameters
else:
deploy_parameter.parameters_link = ParametersLink(
uri=self.parameters_link
)
if not self.template_link:
deploy_parameter.template = self.template
else:
deploy_parameter.template_link = TemplateLink(
uri=self.template_link
)
params = ResourceGroup(location=self.location, tags=self.tags)
try:
self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
except CloudError as exc:
self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
result = self.rm_client.deployments.create_or_update(self.resource_group_name,
self.deployment_name,
deploy_parameter)
deployment_result = self.get_poller_result(result)
if self.wait_for_deployment_completion:
while deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
failed_deployment_operations=failed_deployment_operations)
if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
failed_deployment_operations=failed_deployment_operations)
return deployment_result
def destroy_resource_group(self):
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
result.wait()
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
else:
self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
(e.status_code, e.message))
def resource_group_exists(self, resource_group):
try:
self.rm_client.resource_groups.get(resource_group)
except CloudError:
return False
return True
def _get_failed_nested_operations(self, current_operations):
new_operations = []
for operation in current_operations:
if operation.properties.provisioning_state == 'Failed':
new_operations.append(operation)
if operation.properties.target_resource and \
'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
nested_deployment = operation.properties.target_resource.resource_name
try:
nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
(e.status_code, e.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
def _get_failed_deployment_operations(self, deployment_name):
results = []
_group_name, deployment_name)
except CloudError as exc:
self.fail("Get deployment failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
results = [
dict(
id=op.id,
operation_id=op.operation_id,
status_code=op.properties.status_code,
status_message=op.properties.status_message,
target_resource=dict(
id=op.properties.target_resource.id,
resource_name=op.properties.target_resource.resource_name,
resource_type=op.properties.target_resource.resource_type
) if op.properties.target_resource else None,
provisioning_state=op.properties.provisioning_state,
)
for op in self._get_failed_nested_operations(operations)
]
except:
pass
self.log(dict(failed_deployment_operations=results), pretty_print=True)
return results
def _get_instances(self, deployment):
dep_tree = self._build_hierarchy(deployment.properties.dependencies)
vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
for vm in vms]
vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
for vm, nics in vms_and_nics]
return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
def _get_dependencies(self, dep_tree, resource_type):
matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
for child_tree in [value['children'] for value in dep_tree.values()]:
matches += self._get_dependencies(child_tree, resource_type)
return matches
def _build_hierarchy(self, dependencies, tree=None):
tree = dict(top=True) if tree is None else tree
for dep in dependencies:
if dep.resource_name not in tree:
tree[dep.resource_name] = dict(dep=dep, children=dict())
if isinstance(dep, Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
if 'top' in tree:
tree.pop('top', None)
keys = list(tree.keys())
for key1 in keys:
for key2 in keys:
if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
tree[key2]['children'][key1] = tree[key1]
tree.pop(key1)
return tree
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label':ip.dns_settings.domain_name_label,
'fqdn':ip.dns_settings.fqdn
}
return ip_dict
def _nic_to_public_ips_instance(self, nics):
return [self.network_client.public_ip_addresses.get(self.resource_group_name, public_ip_id.split('/')[-1])
for nic_obj in [self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics]
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
def main():
AzureRMDeploymentManager()
if __name__ == '__main__':
main() | true | true |
f7fd20ad4b143ab38c36c3a93233672e2e38b92a | 10,545 | py | Python | datafunctions/datafunctions.py | alexmojaki/datafunctions | 54274cce6fbe96f31dacb6fba44ea072b52c75fe | [
"MIT"
] | 3 | 2020-09-23T01:21:13.000Z | 2021-05-15T06:06:01.000Z | datafunctions/datafunctions.py | alexmojaki/datafunctions | 54274cce6fbe96f31dacb6fba44ea072b52c75fe | [
"MIT"
] | null | null | null | datafunctions/datafunctions.py | alexmojaki/datafunctions | 54274cce6fbe96f31dacb6fba44ea072b52c75fe | [
"MIT"
] | null | null | null | import functools
import inspect
from dataclasses import make_dataclass
from functools import lru_cache, partial
from typing import get_type_hints, NamedTuple, Type, Callable, Dict, Any, Tuple
import marshmallow
import marshmallow_dataclass
from marshmallow import ValidationError
class ArgumentError(Exception):
"""
Raised by datafunction.load/dump_arguments when the arguments are invalid,
e.g. if they cannot be bound to the function parameters
or they fail marshmallow validation.
There will always be an underlying exception in the __cause__ attribute.
"""
class ReturnError(Exception):
"""
Raised by datafunction.load/dump_result when the return value is invalid,
e.g. if fail marshmallows validation.
There will always be an underlying exception in the __cause__ attribute.
"""
class Schemas(NamedTuple):
dataclass: type
schema_class: Type[marshmallow.Schema]
schema_instance: marshmallow.Schema
class _datafunction_meta(type):
"""
Metaclass which allows datafunction to be used as a decorator
with or without arguments.
"""
def __call__(self, func=None, *, is_method=False):
if func is not None:
return super().__call__(func, is_method=is_method)
return partial(datafunction, is_method=is_method)
@lru_cache()
class datafunction(metaclass=_datafunction_meta):
"""
@datafunction is a decorator
which automatically deserializes incoming arguments of the decorated function and
serializes the return value. For example::
from datetime import datetime
@datafunction
def next_year(dt: datetime) -> datetime:
return dt.replace(year=dt.year + 1)
assert next_year("2019-01-02T00:00:00") == "2020-01-02T00:00:00"
@datafunction automatically converts the string argument to a datetime object, and then
converts the returned datetime back to a string.
More generally, the arguments and return value as seen from the outside the function
are basic JSON serializable objects - strings, dicts, etc.
They are converted to and from the correct types (as indicated by type annotations)
by marshmallow. Common Python types as well as dataclasses (which may be nested)
are supported. For example::
@dataclass
class Point:
x: int
y: int
@datafunction
def translate(p: Point, dx: int, dy: int) -> Point:
return Point(p.x + dx, p.y + dy)
assert translate({"x": 1, "y": 2}, 3, 4) == {"x": 4, "y": 6}
To decorate a method, pass is_method=True, e.g::
class MyClass:
@datafunction(is_method=True)
def method(self, x: int) -> int:
...
All parameters and the return value must have a type annotation,
except for the first argument when is_method=True.
Variadic parameters (*args, **kwargs) and positional-only parameters (before /)
are not allowed.
If there is an exception deserializing or binding the arguments an ArgumentError
will be raised with the underlying exception attached to __cause__.
Similarly a ReturnError may be raised when trying to serialize the return value.
For more manual control, use the methods:
load_arguments
dump_arguments
load_result
dump_result
Under the hood, the type annotations are gathered into a dataclass which is then
converted into a marshmallow schema
using https://github.com/lovasoa/marshmallow_dataclass
which handles the (de)serialization.
Instances of this class have attributes params_schemas and return_schemas,
each of which have the following attributes:
dataclass
schema_class: the marshmallow schema class
schema_instance: a no-args instance of schema_class
"""
def __init__(self, func: Callable = None, *, is_method: bool = False):
self.func = func
self.is_method = is_method
functools.update_wrapper(self, func)
self.hints = get_type_hints(self.func)
self.signature = inspect.signature(self.func)
self.hinted_names = list(self.signature.parameters)
if self.is_method:
# The first argument of a method (e.g. self)
# does not need to be hinted as it will not be deserialized
del self.hinted_names[0]
for name in [*self.hinted_names, "return"]:
if name not in self.hints:
raise TypeError(f"Missing annotation for {name} in function {func.__name__}")
for name in self.hinted_names:
param = self.signature.parameters[name]
if param.kind not in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
):
raise TypeError(f"Parameter {name} in function {func.__name__} is of invalid kind: {param.kind.name}")
def make_schema(label, fields):
datacls = make_dataclass(f"{self.func.__name__}_{label}_schema", fields.items())
schema = marshmallow_dataclass.class_schema(datacls)
schema_instance = schema()
return Schemas(datacls, schema, schema_instance)
self.params_schemas = make_schema("params", {k: self.hints[k] for k in self.hinted_names})
self.return_schemas = make_schema("return", {"_return": self.hints["return"]}) \
if self.hints["return"] != type(None) else None
def __call__(self, *args, **kwargs):
data = self.load_arguments(*args, **kwargs)
result = self.func(**data)
return self.dump_result(result)
def __get__(self, instance, owner):
# Ensure method binding works correctly
@functools.wraps(self)
def method(instance_self, *args, **kwargs):
return self(instance_self, *args, **kwargs)
return method.__get__(instance, owner)
def dump_arguments(self, *args, **kwargs) -> Dict[str, Any]:
"""
Returns a dictionary containing JSON serializable values converted
from the arguments by the .dump() method of the marshmallow schema
derived from the parameter annotations.
For example:
@dataclass
class Point:
x: int
y: int
@datafunction
def translate(p: Point, dx: int, dy: int) -> Point:
return Point(p.x + dx, p.y + dy)
assert (
translate.dump_arguments(Point(1, 2), 3, 4) ==
translate.dump_arguments(p=Point(1, 2), dx=3, dy=4) ==
{"p": {"x": 1, "y": 2}, "dx": 3, "dy": 4}
)
"""
try:
hinted_arguments, all_arguments = self._arguments_dicts(args, kwargs)
# Only the hinted_arguments (i.e. not 'self') can be serialized
return self.params_schemas.schema_instance.dump(hinted_arguments)
except Exception as e:
raise ArgumentError from e
def load_arguments(self, *args, **kwargs) -> Dict[str, Any]:
"""
Returns a dictionary of named deserialized arguments converted
from the given serialized arguments.
The conversion is done by the .load() method of the marshmallow schema
derived from the parameter annotations.
For example::
@dataclass
class Point:
x: int
y: int
@datafunction
def translate(p: Point, dx: int, dy: int) -> Point:
return Point(p.x + dx, p.y + dy)
assert (
translate.load_arguments({"x": 1, "y": 2}, 3, 4) ==
translate.load_arguments(p={"x": 1, "y": 2}, dx=3, dy=4) ==
{"p": Point(1, 2), "dx": 3, "dy": 4}
)
"""
try:
hinted_arguments, all_arguments = self._arguments_dicts(args, kwargs)
datacls_instance = self.params_schemas.schema_instance.load(hinted_arguments)
return {
**all_arguments,
**{
field: getattr(datacls_instance, field)
for field in self.hinted_names
}
}
except (TypeError, ValidationError) as e:
raise ArgumentError from e
def _arguments_dicts(self, args, kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]:
bound_arguments = self.signature.bind(*args, **kwargs)
bound_arguments.apply_defaults()
all_arguments = bound_arguments.arguments
hinted_arguments = {
k: all_arguments[k]
for k in self.hinted_names
}
return hinted_arguments, all_arguments
def dump_result(self, result):
"""
Returns a JSON serializable version of the given value
returned from the decorated function.
The conversion is done by the .dump() method of the marshmallow schema
derived from the return annotation.
For example::
@dataclass
class Point:
x: int
y: int
@datafunction
def translate(p: Point, dx: int, dy: int) -> Point:
return Point(p.x + dx, p.y + dy)
assert translate.dump_result(Point(1, 2)) == {"x": 1, "y": 2}
"""
if self.return_schemas is None:
return None
try:
result_data = self.return_schemas.schema_instance.dump({"_return": result})
except Exception as e:
raise ReturnError from e
return result_data["_return"]
def load_result(self, result):
"""
Deserializes the given serialized value representing a return from the function.
The conversion is done by the .load() method of the marshmallow schema
derived from the return annotation.
For example::
@dataclass
class Point:
x: int
y: int
@datafunction
def translate(p: Point, dx: int, dy: int) -> Point:
return Point(p.x + dx, p.y + dy)
assert translate.load_result({"x": 1, "y": 2}) == Point(1, 2)
"""
if self.return_schemas is None:
return None
try:
datacls_instance = self.return_schemas.schema_instance.load({"_return": result})
except ValidationError as e:
raise ReturnError from e
return datacls_instance._return
| 34.6875 | 118 | 0.615837 | import functools
import inspect
from dataclasses import make_dataclass
from functools import lru_cache, partial
from typing import get_type_hints, NamedTuple, Type, Callable, Dict, Any, Tuple
import marshmallow
import marshmallow_dataclass
from marshmallow import ValidationError
class ArgumentError(Exception):
class ReturnError(Exception):
class Schemas(NamedTuple):
dataclass: type
schema_class: Type[marshmallow.Schema]
schema_instance: marshmallow.Schema
class _datafunction_meta(type):
def __call__(self, func=None, *, is_method=False):
if func is not None:
return super().__call__(func, is_method=is_method)
return partial(datafunction, is_method=is_method)
@lru_cache()
class datafunction(metaclass=_datafunction_meta):
def __init__(self, func: Callable = None, *, is_method: bool = False):
self.func = func
self.is_method = is_method
functools.update_wrapper(self, func)
self.hints = get_type_hints(self.func)
self.signature = inspect.signature(self.func)
self.hinted_names = list(self.signature.parameters)
if self.is_method:
del self.hinted_names[0]
for name in [*self.hinted_names, "return"]:
if name not in self.hints:
raise TypeError(f"Missing annotation for {name} in function {func.__name__}")
for name in self.hinted_names:
param = self.signature.parameters[name]
if param.kind not in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
):
raise TypeError(f"Parameter {name} in function {func.__name__} is of invalid kind: {param.kind.name}")
def make_schema(label, fields):
datacls = make_dataclass(f"{self.func.__name__}_{label}_schema", fields.items())
schema = marshmallow_dataclass.class_schema(datacls)
schema_instance = schema()
return Schemas(datacls, schema, schema_instance)
self.params_schemas = make_schema("params", {k: self.hints[k] for k in self.hinted_names})
self.return_schemas = make_schema("return", {"_return": self.hints["return"]}) \
if self.hints["return"] != type(None) else None
def __call__(self, *args, **kwargs):
data = self.load_arguments(*args, **kwargs)
result = self.func(**data)
return self.dump_result(result)
def __get__(self, instance, owner):
@functools.wraps(self)
def method(instance_self, *args, **kwargs):
return self(instance_self, *args, **kwargs)
return method.__get__(instance, owner)
def dump_arguments(self, *args, **kwargs) -> Dict[str, Any]:
try:
hinted_arguments, all_arguments = self._arguments_dicts(args, kwargs)
return self.params_schemas.schema_instance.dump(hinted_arguments)
except Exception as e:
raise ArgumentError from e
def load_arguments(self, *args, **kwargs) -> Dict[str, Any]:
try:
hinted_arguments, all_arguments = self._arguments_dicts(args, kwargs)
datacls_instance = self.params_schemas.schema_instance.load(hinted_arguments)
return {
**all_arguments,
**{
field: getattr(datacls_instance, field)
for field in self.hinted_names
}
}
except (TypeError, ValidationError) as e:
raise ArgumentError from e
def _arguments_dicts(self, args, kwargs) -> Tuple[Dict[str, Any], Dict[str, Any]]:
bound_arguments = self.signature.bind(*args, **kwargs)
bound_arguments.apply_defaults()
all_arguments = bound_arguments.arguments
hinted_arguments = {
k: all_arguments[k]
for k in self.hinted_names
}
return hinted_arguments, all_arguments
def dump_result(self, result):
if self.return_schemas is None:
return None
try:
result_data = self.return_schemas.schema_instance.dump({"_return": result})
except Exception as e:
raise ReturnError from e
return result_data["_return"]
def load_result(self, result):
if self.return_schemas is None:
return None
try:
datacls_instance = self.return_schemas.schema_instance.load({"_return": result})
except ValidationError as e:
raise ReturnError from e
return datacls_instance._return
| true | true |
f7fd2117907141c22a4b65c009c99379172b734b | 1,201 | py | Python | varparam.py | lamhacker/VPL-Compiler | c54850f95342504e962b990c5cac17679e7069a9 | [
"Apache-2.0"
] | 2 | 2020-01-28T12:41:09.000Z | 2020-04-25T13:31:43.000Z | varparam.py | lamhacker/VPL-Compiler | c54850f95342504e962b990c5cac17679e7069a9 | [
"Apache-2.0"
] | null | null | null | varparam.py | lamhacker/VPL-Compiler | c54850f95342504e962b990c5cac17679e7069a9 | [
"Apache-2.0"
] | null | null | null | VAR_ASSEM = \
"""
# place address of {N}th local variable into {destreg}
movq %rdi, {destreg}
imulq $4, {destreg}, {destreg}
addq $16, {destreg}
imulq ${N}, {destreg}, {destreg}
subq %rbp, {destreg}
negq {destreg}
andq $-16, {destreg}
"""
REGISTERS = ["%rdi", "%rsi", "%rdx", "%rcx", "%r8", "%r9"]
PARA_ASSEM = \
"""
# place address of parameter into {destreg}
movq {argreg}, {destreg}
"""
# Base Class for Variable and Parameter Class
class BaseVar(object):
def __init__(self, ident, nth):
self.ident = ident
self.nth = nth
def __str__(self):
return "{ident}: {nth}".format(ident=self.ident, nth=self.nth)
# extend from BaseVar Class
class Variable(BaseVar):
def __init__(self, ident, nth):
BaseVar.__init__(self, ident, nth)
def load(self, destreg):
return VAR_ASSEM.format(N=self.nth, destreg=destreg)
# extend from BaseVar Class
class Parameter(BaseVar):
def __init__(self, ident, nth):
BaseVar.__init__(self, ident, nth)
# assign regester to it
self.reg = REGISTERS[nth]
def load(self, destreg):
return PARA_ASSEM.format(argreg=self.reg, destreg=destreg)
| 25.020833 | 70 | 0.625312 | VAR_ASSEM = \
"""
# place address of {N}th local variable into {destreg}
movq %rdi, {destreg}
imulq $4, {destreg}, {destreg}
addq $16, {destreg}
imulq ${N}, {destreg}, {destreg}
subq %rbp, {destreg}
negq {destreg}
andq $-16, {destreg}
"""
REGISTERS = ["%rdi", "%rsi", "%rdx", "%rcx", "%r8", "%r9"]
PARA_ASSEM = \
"""
# place address of parameter into {destreg}
movq {argreg}, {destreg}
"""
class BaseVar(object):
def __init__(self, ident, nth):
self.ident = ident
self.nth = nth
def __str__(self):
return "{ident}: {nth}".format(ident=self.ident, nth=self.nth)
class Variable(BaseVar):
def __init__(self, ident, nth):
BaseVar.__init__(self, ident, nth)
def load(self, destreg):
return VAR_ASSEM.format(N=self.nth, destreg=destreg)
class Parameter(BaseVar):
def __init__(self, ident, nth):
BaseVar.__init__(self, ident, nth)
self.reg = REGISTERS[nth]
def load(self, destreg):
return PARA_ASSEM.format(argreg=self.reg, destreg=destreg)
| true | true |
f7fd21502e5e69392a637c339298efdbea3621d0 | 21,887 | py | Python | src/rubrix/client/models.py | davidkartchner/rubrix | 33faa006d7498a806a9fd594036d4a42c7d70da2 | [
"Apache-2.0"
] | 1 | 2022-01-06T09:05:06.000Z | 2022-01-06T09:05:06.000Z | src/rubrix/client/models.py | davidkartchner/rubrix | 33faa006d7498a806a9fd594036d4a42c7d70da2 | [
"Apache-2.0"
] | null | null | null | src/rubrix/client/models.py | davidkartchner/rubrix | 33faa006d7498a806a9fd594036d4a42c7d70da2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the data models for the interface
"""
import datetime
import logging
import warnings
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union
import pandas as pd
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from rubrix._constants import MAX_KEYWORD_LENGTH
from rubrix.server.commons.helpers import limit_value_length
_LOGGER = logging.getLogger(__name__)
class _Validators(BaseModel):
"""Base class for our record models that takes care of general validations"""
@validator("metadata", check_fields=False)
def _check_value_length(cls, v):
"""Checks metadata values length and apply value truncation for large values"""
new_metadata = limit_value_length(v, max_length=MAX_KEYWORD_LENGTH)
if new_metadata != v:
warnings.warn(
"Some metadata values exceed the max length. "
f"Those values will be truncated by keeping only the last {MAX_KEYWORD_LENGTH} characters."
)
return new_metadata
@validator("metadata", check_fields=False)
def _none_to_empty_dict(cls, v):
if v is None:
return {}
return v
@validator("prediction_agent", check_fields=False)
def _check_prediction_agent(cls, v, values):
"""Triggers a warning when ONLY prediction agent is provided"""
if v and values["prediction"] is None:
warnings.warn(
"You provided an `prediction_agent`, but no `prediction`. "
"The `prediction_agent` will not be logged to the server."
)
return v
@validator("annotation_agent", check_fields=False)
def _check_annotation_agent(cls, v, values):
"""Triggers a warning when ONLY annotation agent is provided"""
if v and values["annotation"] is None:
warnings.warn(
"You provided an `annotation_agent`, but no `annotation`. "
"The `annotation_agent` will not be logged to the server."
)
return v
@validator("event_timestamp", check_fields=False)
def _nat_to_none(cls, v):
"""Converts pandas `NaT`s to `None`s"""
if v is pd.NaT:
return None
return v
@root_validator
def _check_and_update_status(cls, values):
"""Updates the status if an annotation is provided and no status is specified."""
values["status"] = values.get("status") or (
"Default" if values.get("annotation") is None else "Validated"
)
return values
class Config:
extra = "forbid"
class BulkResponse(BaseModel):
"""Summary response when logging records to the Rubrix server.
Args:
dataset: The dataset name.
processed: Number of records in bulk.
failed: Number of failed records.
"""
dataset: str
processed: int
failed: Optional[int] = 0
class TokenAttributions(BaseModel):
"""Attribution of the token to the predicted label.
In the Rubrix app this is only supported for ``TextClassificationRecord`` and the ``multi_label=False`` case.
Args:
token: The input token.
attributions: A dictionary containing label-attribution pairs.
"""
token: str
attributions: Dict[str, float] = Field(default_factory=dict)
class TextClassificationRecord(_Validators):
"""Record for text classification
Args:
text:
The input of the record. Provide either 'text' or 'inputs'.
inputs:
Various inputs of the record (see examples below).
Provide either 'text' or 'inputs'.
prediction:
A list of tuples containing the predictions for the record.
The first entry of the tuple is the predicted label, the second entry is its corresponding score.
prediction_agent:
Name of the prediction agent. By default, this is set to the hostname of your machine.
annotation:
A string or a list of strings (multilabel) corresponding to the annotation (gold label) for the record.
annotation_agent:
Name of the prediction agent. By default, this is set to the hostname of your machine.
multi_label:
Is the prediction/annotation for a multi label classification task? Defaults to `False`.
explanation:
A dictionary containing the attributions of each token to the prediction.
The keys map the input of the record (see `inputs`) to the `TokenAttributions`.
id:
The id of the record. By default (`None`), we will generate a unique ID for you.
metadata:
Meta data for the record. Defaults to `{}`.
status:
The status of the record. Options: 'Default', 'Edited', 'Discarded', 'Validated'.
If an annotation is provided, this defaults to 'Validated', otherwise 'Default'.
event_timestamp:
The timestamp of the record.
metrics:
READ ONLY! Metrics at record level provided by the server when using `rb.load`.
This attribute will be ignored when using `rb.log`.
search_keywords:
READ ONLY! Relevant record keywords/terms for provided query when using `rb.load`.
This attribute will be ignored when using `rb.log`.
Examples:
>>> # Single text input
>>> import rubrix as rb
>>> record = rb.TextClassificationRecord(
... text="My first rubrix example",
... prediction=[('eng', 0.9), ('esp', 0.1)]
... )
>>>
>>> # Various inputs
>>> record = rb.TextClassificationRecord(
... inputs={
... "subject": "Has ganado 1 million!",
... "body": "Por usar Rubrix te ha tocado este premio: <link>"
... },
... prediction=[('spam', 0.99), ('ham', 0.01)],
... annotation="spam"
... )
"""
text: Optional[str] = None
inputs: Optional[Union[str, List[str], Dict[str, Union[str, List[str]]]]] = None
prediction: Optional[List[Tuple[str, float]]] = None
prediction_agent: Optional[str] = None
annotation: Optional[Union[str, List[str]]] = None
annotation_agent: Optional[str] = None
multi_label: bool = False
explanation: Optional[Dict[str, List[TokenAttributions]]] = None
id: Optional[Union[int, str]] = None
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
status: Optional[str] = None
event_timestamp: Optional[datetime.datetime] = None
metrics: Optional[Dict[str, Any]] = None
search_keywords: Optional[List[str]] = None
@root_validator
def _check_text_and_inputs(cls, values):
"""Check if either text or inputs were provided. Copy text to inputs."""
if isinstance(values.get("inputs"), str):
warnings.warn(
"In the future, the `inputs` argument of the `TextClassificationRecord` will not accept strings."
"Please use the `text` argument in that case. Make sure to adapt your code accordingly.",
category=FutureWarning,
)
if values.get("inputs") is not None and not isinstance(values["inputs"], dict):
values["inputs"] = dict(text=values["inputs"])
if (values.get("text") is None and values.get("inputs") is None) or (
values.get("text") is not None
and values.get("inputs") is not None
and values["text"] != values["inputs"].get("text")
):
raise ValueError(
"For a TextClassificationRecord you must provide either 'text' or 'inputs'"
)
if values.get("text") is not None:
values["inputs"] = dict(text=values["text"])
elif len(values["inputs"]) == 1 and "text" in values["inputs"]:
values["text"] = values["inputs"]["text"]
return values
def __setattr__(self, name: str, value: Any):
"""Make text and inputs immutable"""
if name in ["text", "inputs"]:
raise AttributeError(f"You cannot assign a new value to `{name}`")
super().__setattr__(name, value)
class TokenClassificationRecord(_Validators):
"""Record for a token classification task
Args:
text:
The input of the record
tokens:
The tokenized input of the record. We use this to guide the annotation process
and to cross-check the spans of your `prediction`/`annotation`.
prediction:
A list of tuples containing the predictions for the record. The first entry of the tuple is the name of
predicted entity, the second and third entry correspond to the start and stop character index of the entity.
The fourth entry is optional and corresponds to the score of the entity (a float number between 0 and 1).
prediction_agent:
Name of the prediction agent. By default, this is set to the hostname of your machine.
annotation:
A list of tuples containing annotations (gold labels) for the record. The first entry of the tuple is the
name of the entity, the second and third entry correspond to the start and stop char index of the entity.
annotation_agent:
Name of the prediction agent. By default, this is set to the hostname of your machine.
id:
The id of the record. By default (None), we will generate a unique ID for you.
metadata:
Meta data for the record. Defaults to `{}`.
status:
The status of the record. Options: 'Default', 'Edited', 'Discarded', 'Validated'.
If an annotation is provided, this defaults to 'Validated', otherwise 'Default'.
event_timestamp:
The timestamp of the record.
metrics:
READ ONLY! Metrics at record level provided by the server when using `rb.load`.
This attribute will be ignored when using `rb.log`.
search_keywords:
READ ONLY! Relevant record keywords/terms for provided query when using `rb.load`.
This attribute will be ignored when using `rb.log`.
Examples:
>>> import rubrix as rb
>>> record = rb.TokenClassificationRecord(
... text = "Michael is a professor at Harvard",
... tokens = ["Michael", "is", "a", "professor", "at", "Harvard"],
... prediction = [('NAME', 0, 7), ('LOC', 26, 33)]
... )
"""
text: Optional[str] = Field(None, min_length=1)
tokens: Optional[Union[List[str], Tuple[str, ...]]] = None
prediction: Optional[
List[Union[Tuple[str, int, int], Tuple[str, int, int, float]]]
] = None
prediction_agent: Optional[str] = None
annotation: Optional[List[Tuple[str, int, int]]] = None
annotation_agent: Optional[str] = None
id: Optional[Union[int, str]] = None
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
status: Optional[str] = None
event_timestamp: Optional[datetime.datetime] = None
metrics: Optional[Dict[str, Any]] = None
search_keywords: Optional[List[str]] = None
__chars2tokens__: Dict[int, int] = PrivateAttr(default=None)
__tokens2chars__: Dict[int, Tuple[int, int]] = PrivateAttr(default=None)
def __init__(
self,
text: str = None,
tokens: List[str] = None,
tags: Optional[List[str]] = None,
**data,
):
if text is None and tokens is None:
raise AssertionError(
"Missing fields: At least one of `text` or `tokens` argument must be provided!"
)
if (data.get("annotation") or data.get("prediction")) and text is None:
raise AssertionError(
"Missing field `text`: "
"char level spans must be provided with a raw text sentence"
)
if text is None:
text = " ".join(tokens)
super().__init__(text=text, tokens=tokens, **data)
if self.annotation and tags:
_LOGGER.warning("Annotation already provided, `tags` won't be used")
return
if tags:
self.annotation = self.__tags2entities__(tags)
def __tags2entities__(self, tags: List[str]) -> List[Tuple[str, int, int]]:
idx = 0
entities = []
entity_starts = False
while idx < len(tags):
tag = tags[idx]
if tag == "O":
entity_starts = False
if tag != "O":
prefix, entity = tag.split("-")
if prefix in ["B", "U"]:
if prefix == "B":
entity_starts = True
char_start, char_end = self.token_span(token_idx=idx)
entities.append(
{"entity": entity, "start": char_start, "end": char_end + 1}
)
elif prefix in ["I", "L"]:
if not entity_starts:
_LOGGER.warning(
"Detected non-starting tag and first entity token was not found."
f"Assuming {tag} as first entity token"
)
entity_starts = True
char_start, char_end = self.token_span(token_idx=idx)
entities.append(
{"entity": entity, "start": char_start, "end": char_end + 1}
)
_, char_end = self.token_span(token_idx=idx)
entities[-1]["end"] = char_end + 1
idx += 1
return [(value["entity"], value["start"], value["end"]) for value in entities]
def __setattr__(self, name: str, value: Any):
"""Make text and tokens immutable"""
if name in ["text", "tokens"]:
raise AttributeError(f"You cannot assign a new value to `{name}`")
super().__setattr__(name, value)
@validator("tokens", pre=True)
def _normalize_tokens(cls, value):
if isinstance(value, list):
value = tuple(value)
assert len(value) > 0, "At least one token should be provided"
return value
@validator("prediction")
def add_default_score(
cls,
prediction: Optional[
List[Union[Tuple[str, int, int], Tuple[str, int, int, float]]]
],
):
"""Adds the default score to the predictions if it is missing"""
if prediction is None:
return prediction
return [
(pred[0], pred[1], pred[2], 1.0) if len(pred) == 3 else pred
for pred in prediction
]
@staticmethod
def __build_indices_map__(
text: str, tokens: Tuple[str, ...]
) -> Tuple[Dict[int, int], Dict[int, Tuple[int, int]]]:
"""
Build the indices mapping between text characters and tokens where belongs to,
and vice versa.
chars2tokens index contains is the token idx where i char is contained (if any).
Out-of-token characters won't be included in this map,
so access should be using ``chars2tokens_map.get(i)``
instead of ``chars2tokens_map[i]``.
"""
def chars2tokens_index(text_, tokens_):
chars_map = {}
current_token = 0
current_token_char_start = 0
for idx, char in enumerate(text_):
relative_idx = idx - current_token_char_start
if (
relative_idx < len(tokens_[current_token])
and char == tokens_[current_token][relative_idx]
):
chars_map[idx] = current_token
elif (
current_token + 1 < len(tokens_)
and relative_idx >= len(tokens_[current_token])
and char == tokens_[current_token + 1][0]
):
current_token += 1
current_token_char_start += relative_idx
chars_map[idx] = current_token
return chars_map
def tokens2chars_index(
chars2tokens: Dict[int, int]
) -> Dict[int, Tuple[int, int]]:
tokens2chars_map = defaultdict(list)
for c, t in chars2tokens.items():
tokens2chars_map[t].append(c)
return {
token_idx: (min(chars), max(chars))
for token_idx, chars in tokens2chars_map.items()
}
chars2tokens_idx = chars2tokens_index(text_=text, tokens_=tokens)
return chars2tokens_idx, tokens2chars_index(chars2tokens_idx)
def char_id2token_id(self, char_idx: int) -> Optional[int]:
"""
Given a character id, returns the token id it belongs to.
``None`` otherwise
"""
if self.__chars2tokens__ is None:
self.__chars2tokens__, self.__tokens2chars__ = self.__build_indices_map__(
self.text, tuple(self.tokens)
)
return self.__chars2tokens__.get(char_idx)
def token_span(self, token_idx: int) -> Tuple[int, int]:
"""
Given a token id, returns the start and end characters.
Raises an ``IndexError`` if token id is out of tokens list indices
"""
if self.__tokens2chars__ is None:
self.__chars2tokens__, self.__tokens2chars__ = self.__build_indices_map__(
self.text, tuple(self.tokens)
)
if token_idx not in self.__tokens2chars__:
raise IndexError(f"Token id {token_idx} out of bounds")
return self.__tokens2chars__[token_idx]
def spans2iob(
self, spans: Optional[List[Tuple[str, int, int]]] = None
) -> Optional[List[str]]:
"""Build the iob tags sequence for a list of spans annoations"""
if spans is None:
return None
tags = ["O"] * len(self.tokens)
for label, start, end in spans:
token_start = self.char_id2token_id(start)
token_end = self.char_id2token_id(end - 1)
assert (
token_start is not None and token_end is not None
), "Provided spans are missaligned at token level"
tags[token_start] = f"B-{label}"
for idx in range(token_start + 1, token_end + 1):
tags[idx] = f"I-{label}"
return tags
class Text2TextRecord(_Validators):
"""Record for a text to text task
Args:
text:
The input of the record
prediction:
A list of strings or tuples containing predictions for the input text.
If tuples, the first entry is the predicted text, the second entry is its corresponding score.
prediction_agent:
Name of the prediction agent. By default, this is set to the hostname of your machine.
annotation:
A string representing the expected output text for the given input text.
annotation_agent:
Name of the prediction agent. By default, this is set to the hostname of your machine.
id:
The id of the record. By default (None), we will generate a unique ID for you.
metadata:
Meta data for the record. Defaults to `{}`.
status:
The status of the record. Options: 'Default', 'Edited', 'Discarded', 'Validated'.
If an annotation is provided, this defaults to 'Validated', otherwise 'Default'.
event_timestamp:
The timestamp of the record.
metrics:
READ ONLY! Metrics at record level provided by the server when using `rb.load`.
This attribute will be ignored when using `rb.log`.
search_keywords:
READ ONLY! Relevant record keywords/terms for provided query when using `rb.load`.
This attribute will be ignored when using `rb.log`.
Examples:
>>> import rubrix as rb
>>> record = rb.Text2TextRecord(
... text="My name is Sarah and I love my dog.",
... prediction=["Je m'appelle Sarah et j'aime mon chien."]
... )
"""
text: str
prediction: Optional[List[Union[str, Tuple[str, float]]]] = None
prediction_agent: Optional[str] = None
annotation: Optional[str] = None
annotation_agent: Optional[str] = None
id: Optional[Union[int, str]] = None
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
status: Optional[str] = None
event_timestamp: Optional[datetime.datetime] = None
metrics: Optional[Dict[str, Any]] = None
search_keywords: Optional[List[str]] = None
@validator("prediction")
def prediction_as_tuples(
cls, prediction: Optional[List[Union[str, Tuple[str, float]]]]
):
"""Preprocess the predictions and wraps them in a tuple if needed"""
if prediction is None:
return prediction
return [(pred, 1.0) if isinstance(pred, str) else pred for pred in prediction]
Record = Union[TextClassificationRecord, TokenClassificationRecord, Text2TextRecord]
| 39.365108 | 120 | 0.602367 |
import datetime
import logging
import warnings
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union
import pandas as pd
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from rubrix._constants import MAX_KEYWORD_LENGTH
from rubrix.server.commons.helpers import limit_value_length
_LOGGER = logging.getLogger(__name__)
class _Validators(BaseModel):
@validator("metadata", check_fields=False)
def _check_value_length(cls, v):
new_metadata = limit_value_length(v, max_length=MAX_KEYWORD_LENGTH)
if new_metadata != v:
warnings.warn(
"Some metadata values exceed the max length. "
f"Those values will be truncated by keeping only the last {MAX_KEYWORD_LENGTH} characters."
)
return new_metadata
@validator("metadata", check_fields=False)
def _none_to_empty_dict(cls, v):
if v is None:
return {}
return v
@validator("prediction_agent", check_fields=False)
def _check_prediction_agent(cls, v, values):
if v and values["prediction"] is None:
warnings.warn(
"You provided an `prediction_agent`, but no `prediction`. "
"The `prediction_agent` will not be logged to the server."
)
return v
@validator("annotation_agent", check_fields=False)
def _check_annotation_agent(cls, v, values):
if v and values["annotation"] is None:
warnings.warn(
"You provided an `annotation_agent`, but no `annotation`. "
"The `annotation_agent` will not be logged to the server."
)
return v
@validator("event_timestamp", check_fields=False)
def _nat_to_none(cls, v):
if v is pd.NaT:
return None
return v
@root_validator
def _check_and_update_status(cls, values):
values["status"] = values.get("status") or (
"Default" if values.get("annotation") is None else "Validated"
)
return values
class Config:
extra = "forbid"
class BulkResponse(BaseModel):
dataset: str
processed: int
failed: Optional[int] = 0
class TokenAttributions(BaseModel):
token: str
attributions: Dict[str, float] = Field(default_factory=dict)
class TextClassificationRecord(_Validators):
text: Optional[str] = None
inputs: Optional[Union[str, List[str], Dict[str, Union[str, List[str]]]]] = None
prediction: Optional[List[Tuple[str, float]]] = None
prediction_agent: Optional[str] = None
annotation: Optional[Union[str, List[str]]] = None
annotation_agent: Optional[str] = None
multi_label: bool = False
explanation: Optional[Dict[str, List[TokenAttributions]]] = None
id: Optional[Union[int, str]] = None
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
status: Optional[str] = None
event_timestamp: Optional[datetime.datetime] = None
metrics: Optional[Dict[str, Any]] = None
search_keywords: Optional[List[str]] = None
@root_validator
def _check_text_and_inputs(cls, values):
if isinstance(values.get("inputs"), str):
warnings.warn(
"In the future, the `inputs` argument of the `TextClassificationRecord` will not accept strings."
"Please use the `text` argument in that case. Make sure to adapt your code accordingly.",
category=FutureWarning,
)
if values.get("inputs") is not None and not isinstance(values["inputs"], dict):
values["inputs"] = dict(text=values["inputs"])
if (values.get("text") is None and values.get("inputs") is None) or (
values.get("text") is not None
and values.get("inputs") is not None
and values["text"] != values["inputs"].get("text")
):
raise ValueError(
"For a TextClassificationRecord you must provide either 'text' or 'inputs'"
)
if values.get("text") is not None:
values["inputs"] = dict(text=values["text"])
elif len(values["inputs"]) == 1 and "text" in values["inputs"]:
values["text"] = values["inputs"]["text"]
return values
def __setattr__(self, name: str, value: Any):
if name in ["text", "inputs"]:
raise AttributeError(f"You cannot assign a new value to `{name}`")
super().__setattr__(name, value)
class TokenClassificationRecord(_Validators):
text: Optional[str] = Field(None, min_length=1)
tokens: Optional[Union[List[str], Tuple[str, ...]]] = None
prediction: Optional[
List[Union[Tuple[str, int, int], Tuple[str, int, int, float]]]
] = None
prediction_agent: Optional[str] = None
annotation: Optional[List[Tuple[str, int, int]]] = None
annotation_agent: Optional[str] = None
id: Optional[Union[int, str]] = None
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
status: Optional[str] = None
event_timestamp: Optional[datetime.datetime] = None
metrics: Optional[Dict[str, Any]] = None
search_keywords: Optional[List[str]] = None
__chars2tokens__: Dict[int, int] = PrivateAttr(default=None)
__tokens2chars__: Dict[int, Tuple[int, int]] = PrivateAttr(default=None)
def __init__(
self,
text: str = None,
tokens: List[str] = None,
tags: Optional[List[str]] = None,
**data,
):
if text is None and tokens is None:
raise AssertionError(
"Missing fields: At least one of `text` or `tokens` argument must be provided!"
)
if (data.get("annotation") or data.get("prediction")) and text is None:
raise AssertionError(
"Missing field `text`: "
"char level spans must be provided with a raw text sentence"
)
if text is None:
text = " ".join(tokens)
super().__init__(text=text, tokens=tokens, **data)
if self.annotation and tags:
_LOGGER.warning("Annotation already provided, `tags` won't be used")
return
if tags:
self.annotation = self.__tags2entities__(tags)
def __tags2entities__(self, tags: List[str]) -> List[Tuple[str, int, int]]:
idx = 0
entities = []
entity_starts = False
while idx < len(tags):
tag = tags[idx]
if tag == "O":
entity_starts = False
if tag != "O":
prefix, entity = tag.split("-")
if prefix in ["B", "U"]:
if prefix == "B":
entity_starts = True
char_start, char_end = self.token_span(token_idx=idx)
entities.append(
{"entity": entity, "start": char_start, "end": char_end + 1}
)
elif prefix in ["I", "L"]:
if not entity_starts:
_LOGGER.warning(
"Detected non-starting tag and first entity token was not found."
f"Assuming {tag} as first entity token"
)
entity_starts = True
char_start, char_end = self.token_span(token_idx=idx)
entities.append(
{"entity": entity, "start": char_start, "end": char_end + 1}
)
_, char_end = self.token_span(token_idx=idx)
entities[-1]["end"] = char_end + 1
idx += 1
return [(value["entity"], value["start"], value["end"]) for value in entities]
def __setattr__(self, name: str, value: Any):
if name in ["text", "tokens"]:
raise AttributeError(f"You cannot assign a new value to `{name}`")
super().__setattr__(name, value)
@validator("tokens", pre=True)
def _normalize_tokens(cls, value):
if isinstance(value, list):
value = tuple(value)
assert len(value) > 0, "At least one token should be provided"
return value
@validator("prediction")
def add_default_score(
cls,
prediction: Optional[
List[Union[Tuple[str, int, int], Tuple[str, int, int, float]]]
],
):
if prediction is None:
return prediction
return [
(pred[0], pred[1], pred[2], 1.0) if len(pred) == 3 else pred
for pred in prediction
]
@staticmethod
def __build_indices_map__(
text: str, tokens: Tuple[str, ...]
) -> Tuple[Dict[int, int], Dict[int, Tuple[int, int]]]:
def chars2tokens_index(text_, tokens_):
chars_map = {}
current_token = 0
current_token_char_start = 0
for idx, char in enumerate(text_):
relative_idx = idx - current_token_char_start
if (
relative_idx < len(tokens_[current_token])
and char == tokens_[current_token][relative_idx]
):
chars_map[idx] = current_token
elif (
current_token + 1 < len(tokens_)
and relative_idx >= len(tokens_[current_token])
and char == tokens_[current_token + 1][0]
):
current_token += 1
current_token_char_start += relative_idx
chars_map[idx] = current_token
return chars_map
def tokens2chars_index(
chars2tokens: Dict[int, int]
) -> Dict[int, Tuple[int, int]]:
tokens2chars_map = defaultdict(list)
for c, t in chars2tokens.items():
tokens2chars_map[t].append(c)
return {
token_idx: (min(chars), max(chars))
for token_idx, chars in tokens2chars_map.items()
}
chars2tokens_idx = chars2tokens_index(text_=text, tokens_=tokens)
return chars2tokens_idx, tokens2chars_index(chars2tokens_idx)
def char_id2token_id(self, char_idx: int) -> Optional[int]:
if self.__chars2tokens__ is None:
self.__chars2tokens__, self.__tokens2chars__ = self.__build_indices_map__(
self.text, tuple(self.tokens)
)
return self.__chars2tokens__.get(char_idx)
def token_span(self, token_idx: int) -> Tuple[int, int]:
if self.__tokens2chars__ is None:
self.__chars2tokens__, self.__tokens2chars__ = self.__build_indices_map__(
self.text, tuple(self.tokens)
)
if token_idx not in self.__tokens2chars__:
raise IndexError(f"Token id {token_idx} out of bounds")
return self.__tokens2chars__[token_idx]
def spans2iob(
self, spans: Optional[List[Tuple[str, int, int]]] = None
) -> Optional[List[str]]:
if spans is None:
return None
tags = ["O"] * len(self.tokens)
for label, start, end in spans:
token_start = self.char_id2token_id(start)
token_end = self.char_id2token_id(end - 1)
assert (
token_start is not None and token_end is not None
), "Provided spans are missaligned at token level"
tags[token_start] = f"B-{label}"
for idx in range(token_start + 1, token_end + 1):
tags[idx] = f"I-{label}"
return tags
class Text2TextRecord(_Validators):
text: str
prediction: Optional[List[Union[str, Tuple[str, float]]]] = None
prediction_agent: Optional[str] = None
annotation: Optional[str] = None
annotation_agent: Optional[str] = None
id: Optional[Union[int, str]] = None
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
status: Optional[str] = None
event_timestamp: Optional[datetime.datetime] = None
metrics: Optional[Dict[str, Any]] = None
search_keywords: Optional[List[str]] = None
@validator("prediction")
def prediction_as_tuples(
cls, prediction: Optional[List[Union[str, Tuple[str, float]]]]
):
if prediction is None:
return prediction
return [(pred, 1.0) if isinstance(pred, str) else pred for pred in prediction]
Record = Union[TextClassificationRecord, TokenClassificationRecord, Text2TextRecord]
| true | true |
f7fd21e62231f65c9c5562bb47fe7bdd8fa19e4a | 2,803 | py | Python | dipy/reconst/tests/test_shore_odf.py | oesteban/dipy | eb6ea58028959f4a507d70160dad770193449d66 | [
"BSD-3-Clause"
] | 1 | 2016-09-08T19:23:51.000Z | 2016-09-08T19:23:51.000Z | dipy/reconst/tests/test_shore_odf.py | jyeatman/dipy | 57f7ec926f914d72f7f2f8feb8ccb51ab827895d | [
"BSD-3-Clause"
] | null | null | null | dipy/reconst/tests/test_shore_odf.py | jyeatman/dipy | 57f7ec926f914d72f7f2f8feb8ccb51ab827895d | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from dipy.data import get_sphere, get_3shell_gtab, get_isbi2013_2shell_gtab
from dipy.reconst.shore import ShoreModel
from dipy.reconst.shm import QballModel, sh_to_sf
from dipy.reconst.peaks import gfa, peak_directions
from numpy.testing import (assert_equal,
assert_almost_equal,
run_module_suite,
assert_array_equal,
assert_raises)
from dipy.sims.voxel import SticksAndBall
from dipy.core.subdivide_octahedron import create_unit_sphere
from dipy.core.sphere_stats import angular_similarity
from dipy.reconst.tests.test_dsi import sticks_and_ball_dummies
def test_shore_odf():
gtab = get_isbi2013_2shell_gtab()
# load symmetric 724 sphere
sphere = get_sphere('symmetric724')
# load icosahedron sphere
sphere2 = create_unit_sphere(5)
data, golden_directions = SticksAndBall(gtab, d=0.0015,
S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=None)
asm = ShoreModel(gtab,radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8)
# symmetric724
asmfit = asm.fit(data)
odf = asmfit.odf(sphere)
odf_sh = asmfit.odf_sh()
odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None)
assert_almost_equal(odf, odf_from_sh, 10)
directions, _ , _ = peak_directions(odf, sphere, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)
# 5 subdivisions
odf = asmfit.odf(sphere2)
directions, _ , _ = peak_directions(odf, sphere2, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)
sb_dummies = sticks_and_ball_dummies(gtab)
for sbd in sb_dummies:
data, golden_directions = sb_dummies[sbd]
asmfit = asm.fit(data)
odf = asmfit.odf(sphere2)
directions, _ , _ = peak_directions(odf, sphere2, .35, 25)
if len(directions) <= 3:
assert_equal(len(directions), len(golden_directions))
if len(directions) > 3:
assert_equal(gfa(odf) < 0.1, True)
def test_multivox_shore():
gtab = get_3shell_gtab()
data = np.random.random([20, 30, 1, gtab.gradients.shape[0]])
radial_order = 4
zeta = 700
asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=1e-8, lambdaL=1e-8)
asmfit = asm.fit(data)
c_shore=asmfit.shore_coeff
assert_equal(c_shore.shape[0:3], data.shape[0:3])
assert_equal(np.alltrue(np.isreal(c_shore)), True)
if __name__ == '__main__':
run_module_suite()
| 36.881579 | 93 | 0.646093 | import numpy as np
from dipy.data import get_sphere, get_3shell_gtab, get_isbi2013_2shell_gtab
from dipy.reconst.shore import ShoreModel
from dipy.reconst.shm import QballModel, sh_to_sf
from dipy.reconst.peaks import gfa, peak_directions
from numpy.testing import (assert_equal,
assert_almost_equal,
run_module_suite,
assert_array_equal,
assert_raises)
from dipy.sims.voxel import SticksAndBall
from dipy.core.subdivide_octahedron import create_unit_sphere
from dipy.core.sphere_stats import angular_similarity
from dipy.reconst.tests.test_dsi import sticks_and_ball_dummies
def test_shore_odf():
gtab = get_isbi2013_2shell_gtab()
sphere = get_sphere('symmetric724')
sphere2 = create_unit_sphere(5)
data, golden_directions = SticksAndBall(gtab, d=0.0015,
S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=None)
asm = ShoreModel(gtab,radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8)
asmfit = asm.fit(data)
odf = asmfit.odf(sphere)
odf_sh = asmfit.odf_sh()
odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None)
assert_almost_equal(odf, odf_from_sh, 10)
directions, _ , _ = peak_directions(odf, sphere, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)
odf = asmfit.odf(sphere2)
directions, _ , _ = peak_directions(odf, sphere2, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)
sb_dummies = sticks_and_ball_dummies(gtab)
for sbd in sb_dummies:
data, golden_directions = sb_dummies[sbd]
asmfit = asm.fit(data)
odf = asmfit.odf(sphere2)
directions, _ , _ = peak_directions(odf, sphere2, .35, 25)
if len(directions) <= 3:
assert_equal(len(directions), len(golden_directions))
if len(directions) > 3:
assert_equal(gfa(odf) < 0.1, True)
def test_multivox_shore():
gtab = get_3shell_gtab()
data = np.random.random([20, 30, 1, gtab.gradients.shape[0]])
radial_order = 4
zeta = 700
asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=1e-8, lambdaL=1e-8)
asmfit = asm.fit(data)
c_shore=asmfit.shore_coeff
assert_equal(c_shore.shape[0:3], data.shape[0:3])
assert_equal(np.alltrue(np.isreal(c_shore)), True)
if __name__ == '__main__':
run_module_suite()
| true | true |
f7fd22a15c59e52d74f9b7f5868980ef7e90f162 | 5,428 | py | Python | datazen/environment/manifest_cache.py | vkottler/datazen | e493a0427b0686811f8aeb719bae035f999c8a57 | [
"MIT"
] | 2 | 2021-02-17T00:16:07.000Z | 2022-02-27T00:14:12.000Z | datazen/environment/manifest_cache.py | vkottler/datazen | e493a0427b0686811f8aeb719bae035f999c8a57 | [
"MIT"
] | 53 | 2020-11-02T07:10:21.000Z | 2022-03-29T09:04:01.000Z | datazen/environment/manifest_cache.py | vkottler/datazen | e493a0427b0686811f8aeb719bae035f999c8a57 | [
"MIT"
] | null | null | null | """
datazen - A class for adding caching to the manifest-loading environment.
"""
# built-in
import logging
import os
from typing import List, Dict
# third-party
import jinja2
# internal
from datazen.environment.manifest import ManifestEnvironment
from datazen.paths import get_file_name
from datazen import DEFAULT_MANIFEST, CACHE_SUFFIX, ROOT_NAMESPACE
from datazen.classes.file_info_cache import FileInfoCache, cmp_total_loaded
from datazen.classes.file_info_cache import copy as copy_cache
from datazen.classes.file_info_cache import meld as meld_cache
LOG = logging.getLogger(__name__)
def manifest_cache_dir(path: str, manifest: dict) -> str:
"""Find a manifest cache (path) from its path and data."""
cache_name = f".{get_file_name(path)}{CACHE_SUFFIX}"
default_cache_dir = os.path.join(manifest["dir"], cache_name)
# set 'cache_dir' to the default if it wasn't set already
if "cache_dir" not in manifest["data"]:
manifest["data"]["cache_dir"] = default_cache_dir
return os.path.abspath(manifest["data"]["cache_dir"])
class ManifestCacheEnvironment(ManifestEnvironment):
"""A wrapper for the cache functionality for an environment."""
def __init__(self):
"""Extend the environment with a notion of the cache being loaded."""
super().__init__()
self.cache = None
self.aggregate_cache = None
self.initial_cache = None
self.manifest_changed = True
def load_manifest_with_cache(
self, path: str = DEFAULT_MANIFEST, logger: logging.Logger = LOG
) -> bool:
"""
Load a manifest and its cache, or set up a new cache if one doesn't
exist.
"""
result = self.load_manifest(path)
# if we successfully loaded this manifest, try to load its cache
if result:
self.cache = FileInfoCache(manifest_cache_dir(path, self.manifest))
self.aggregate_cache = copy_cache(self.cache)
# correctly set the state of whether or not this manifest
# has changed
self.manifest_changed = False
for mpath in self.manifest["files"]:
if not self.cache.check_hit(ROOT_NAMESPACE, mpath):
self.manifest_changed = True
# save a copy of the initial cache, so that we can use it to
# determine if state has changed when evaluating targets
self.initial_cache = copy_cache(self.cache)
logger.debug("cache-environment loaded from '%s'", path)
return result and self.cache is not None
def clean_cache(self, purge_data: bool = True) -> None:
"""Remove cached data from the file-system."""
if purge_data:
for name in self.namespaces:
self.unload_all(name)
if self.cache is not None:
self.cache.clean()
self.manifest_changed = True
def write_cache(self) -> None:
"""Commit cached data to the file-system."""
if self.cache is not None:
meld_cache(self.aggregate_cache, self.cache)
self.aggregate_cache.write()
def describe_cache(self) -> None:
"""Describe the [initial] cache for debugging purposes."""
self.initial_cache.describe()
def restore_cache(self) -> None:
"""Return the cache to its initially-loaded state."""
if self.cache is not None:
meld_cache(self.aggregate_cache, self.cache)
self.cache = copy_cache(self.initial_cache)
def get_new_loaded(
self, types: List[str], load_checks: Dict[str, List[str]] = None
) -> int:
"""
Compute the number of new files loaded (since the initial load)
for a set of types;
"""
return cmp_total_loaded(
self.cache, self.initial_cache, types, load_checks
)
def cached_load_variables(self, name: str = ROOT_NAMESPACE) -> dict:
"""Load variables, proxied through the cache."""
return self.load_variables(self.cache.get_data("variables"), name)
def cached_load_schemas(
self, require_all: bool = True, name: str = ROOT_NAMESPACE
) -> dict:
"""Load schemas, proxied through the cache."""
return self.load_schemas(
require_all,
self.cache.get_data("schemas"),
self.cache.get_data("schema_types"),
name,
)
def cached_enforce_schemas(
self, data: dict, require_all: bool = True, name: str = ROOT_NAMESPACE
) -> bool:
"""Enforce schemas, proxied through the cache."""
return self.enforce_schemas(
data,
require_all,
self.cache.get_data("schemas"),
self.cache.get_data("schema_types"),
name,
)
def cached_load_configs(self, name: str = ROOT_NAMESPACE) -> dict:
"""Load configs, proxied through the cache."""
return self.load_configs(
self.cache.get_data("configs"),
self.cache.get_data("variables"),
self.cache.get_data("schemas"),
self.cache.get_data("schema_types"),
name,
)
def cached_load_templates(
self, name: str = ROOT_NAMESPACE
) -> Dict[str, jinja2.Template]:
"""Load templates, proxied through the cache."""
return self.load_templates(self.cache.get_data("templates"), name)
| 32.698795 | 79 | 0.635409 |
import logging
import os
from typing import List, Dict
import jinja2
from datazen.environment.manifest import ManifestEnvironment
from datazen.paths import get_file_name
from datazen import DEFAULT_MANIFEST, CACHE_SUFFIX, ROOT_NAMESPACE
from datazen.classes.file_info_cache import FileInfoCache, cmp_total_loaded
from datazen.classes.file_info_cache import copy as copy_cache
from datazen.classes.file_info_cache import meld as meld_cache
LOG = logging.getLogger(__name__)
def manifest_cache_dir(path: str, manifest: dict) -> str:
cache_name = f".{get_file_name(path)}{CACHE_SUFFIX}"
default_cache_dir = os.path.join(manifest["dir"], cache_name)
if "cache_dir" not in manifest["data"]:
manifest["data"]["cache_dir"] = default_cache_dir
return os.path.abspath(manifest["data"]["cache_dir"])
class ManifestCacheEnvironment(ManifestEnvironment):
def __init__(self):
super().__init__()
self.cache = None
self.aggregate_cache = None
self.initial_cache = None
self.manifest_changed = True
def load_manifest_with_cache(
self, path: str = DEFAULT_MANIFEST, logger: logging.Logger = LOG
) -> bool:
result = self.load_manifest(path)
# if we successfully loaded this manifest, try to load its cache
if result:
self.cache = FileInfoCache(manifest_cache_dir(path, self.manifest))
self.aggregate_cache = copy_cache(self.cache)
# correctly set the state of whether or not this manifest
# has changed
self.manifest_changed = False
for mpath in self.manifest["files"]:
if not self.cache.check_hit(ROOT_NAMESPACE, mpath):
self.manifest_changed = True
# save a copy of the initial cache, so that we can use it to
# determine if state has changed when evaluating targets
self.initial_cache = copy_cache(self.cache)
logger.debug("cache-environment loaded from '%s'", path)
return result and self.cache is not None
def clean_cache(self, purge_data: bool = True) -> None:
if purge_data:
for name in self.namespaces:
self.unload_all(name)
if self.cache is not None:
self.cache.clean()
self.manifest_changed = True
def write_cache(self) -> None:
if self.cache is not None:
meld_cache(self.aggregate_cache, self.cache)
self.aggregate_cache.write()
def describe_cache(self) -> None:
self.initial_cache.describe()
def restore_cache(self) -> None:
if self.cache is not None:
meld_cache(self.aggregate_cache, self.cache)
self.cache = copy_cache(self.initial_cache)
def get_new_loaded(
self, types: List[str], load_checks: Dict[str, List[str]] = None
) -> int:
return cmp_total_loaded(
self.cache, self.initial_cache, types, load_checks
)
def cached_load_variables(self, name: str = ROOT_NAMESPACE) -> dict:
return self.load_variables(self.cache.get_data("variables"), name)
def cached_load_schemas(
self, require_all: bool = True, name: str = ROOT_NAMESPACE
) -> dict:
return self.load_schemas(
require_all,
self.cache.get_data("schemas"),
self.cache.get_data("schema_types"),
name,
)
def cached_enforce_schemas(
self, data: dict, require_all: bool = True, name: str = ROOT_NAMESPACE
) -> bool:
return self.enforce_schemas(
data,
require_all,
self.cache.get_data("schemas"),
self.cache.get_data("schema_types"),
name,
)
def cached_load_configs(self, name: str = ROOT_NAMESPACE) -> dict:
return self.load_configs(
self.cache.get_data("configs"),
self.cache.get_data("variables"),
self.cache.get_data("schemas"),
self.cache.get_data("schema_types"),
name,
)
def cached_load_templates(
self, name: str = ROOT_NAMESPACE
) -> Dict[str, jinja2.Template]:
return self.load_templates(self.cache.get_data("templates"), name)
| true | true |
f7fd23d56f76c972c0b8db8433831ed654ed5b77 | 2,414 | py | Python | filters_forAllData.py | BonizzoniLab/SVD | 95ed967ae385ed0a339030763a07ea7acfa0c1d3 | [
"MIT"
] | null | null | null | filters_forAllData.py | BonizzoniLab/SVD | 95ed967ae385ed0a339030763a07ea7acfa0c1d3 | [
"MIT"
] | null | null | null | filters_forAllData.py | BonizzoniLab/SVD | 95ed967ae385ed0a339030763a07ea7acfa0c1d3 | [
"MIT"
] | null | null | null | import argparse
def txt_reader(txt):
''' read the data set and variants that satisfy the following condition are maintained:
[ abs(length(REF) - length(ALT)) >= 6 & [AF_median > 0.3 OR Num_caller > 1 ] & STRBIAS_median < MaxStrandBias ] for INDELs and
[AF_median > 0.3 OR Num_caller > 1 ] & STRBIAS_median < MaxStrandBias] for SNPs'''
af=0
strb=0
ref=''
alt=''
fb=0
vd=0
pl=0
gk=0
for line in txt:
line=line.rstrip()
row=line.split('\t')
if line.startswith('SAMPLE_ID'):
af=row.index("AF_median")
strb=row.index("STRBIAS_median")
ref=row.index("REF")
alt=row.index("ALT")
fb=row.index("CallFreebayes")
vd=row.index("CallVardict")
pl=row.index("CallPlatypus")
gk=row.index("CallGatk")
print line
else:
if row[strb]=='.':
row[strb]=0
if (opts.variantType=='INDEL' and float(row[strb]) <= float(opts.MaxStrBias) and abs(len(row[ref])-len(row[alt])) >= int(opts.Nt) and ((float(row[af]) > float(opts.MinAf)) or (int(row[fb])+int(row[vd])+int(row[pl])+int(row[gk]) >= int(opts.minCaller)))) :
print line
if (opts.variantType=='SNP' and float(row[strb]) <= float(opts.MaxStrBias) and ((float(row[af]) > float(opts.MinAf)) or (int(row[fb])+int(row[vd])+int(row[pl])+int(row[gk]) >= int(opts.minCaller)))) :
print line
def main():
parser = argparse.ArgumentParser('Parse VCF output to output valid VCF. Output is to stdout.')
parser.add_argument('-i', '--file', help="dataset tab delimited")
parser.add_argument('-f', '--MinAf', help="min allele frequency required to call a snp or an indel in AllData")
parser.add_argument('-sb', '--MaxStrBias', help="max strand bias accepted to call a snp or an indel in AllData")
parser.add_argument('-nt', '--Nt', help="min number of nucleotides required to call a snp or an indel in AllData", default=6)
parser.add_argument('-calls', '--minCaller', help="min number of callers required to define a snp or an indel in AllData")
parser.add_argument('-vt', '--variantType', help="Type of the variant analyzed")
global opts
opts = parser.parse_args()
in_file = open(opts.file)
txt_reader(in_file)
main()
| 34.985507 | 267 | 0.59652 | import argparse
def txt_reader(txt):
''' read the data set and variants that satisfy the following condition are maintained:
[ abs(length(REF) - length(ALT)) >= 6 & [AF_median > 0.3 OR Num_caller > 1 ] & STRBIAS_median < MaxStrandBias ] for INDELs and
[AF_median > 0.3 OR Num_caller > 1 ] & STRBIAS_median < MaxStrandBias] for SNPs'''
af=0
strb=0
ref=''
alt=''
fb=0
vd=0
pl=0
gk=0
for line in txt:
line=line.rstrip()
row=line.split('\t')
if line.startswith('SAMPLE_ID'):
af=row.index("AF_median")
strb=row.index("STRBIAS_median")
ref=row.index("REF")
alt=row.index("ALT")
fb=row.index("CallFreebayes")
vd=row.index("CallVardict")
pl=row.index("CallPlatypus")
gk=row.index("CallGatk")
print line
else:
if row[strb]=='.':
row[strb]=0
if (opts.variantType=='INDEL' and float(row[strb]) <= float(opts.MaxStrBias) and abs(len(row[ref])-len(row[alt])) >= int(opts.Nt) and ((float(row[af]) > float(opts.MinAf)) or (int(row[fb])+int(row[vd])+int(row[pl])+int(row[gk]) >= int(opts.minCaller)))) :
print line
if (opts.variantType=='SNP' and float(row[strb]) <= float(opts.MaxStrBias) and ((float(row[af]) > float(opts.MinAf)) or (int(row[fb])+int(row[vd])+int(row[pl])+int(row[gk]) >= int(opts.minCaller)))) :
print line
def main():
parser = argparse.ArgumentParser('Parse VCF output to output valid VCF. Output is to stdout.')
parser.add_argument('-i', '--file', help="dataset tab delimited")
parser.add_argument('-f', '--MinAf', help="min allele frequency required to call a snp or an indel in AllData")
parser.add_argument('-sb', '--MaxStrBias', help="max strand bias accepted to call a snp or an indel in AllData")
parser.add_argument('-nt', '--Nt', help="min number of nucleotides required to call a snp or an indel in AllData", default=6)
parser.add_argument('-calls', '--minCaller', help="min number of callers required to define a snp or an indel in AllData")
parser.add_argument('-vt', '--variantType', help="Type of the variant analyzed")
global opts
opts = parser.parse_args()
in_file = open(opts.file)
txt_reader(in_file)
main()
| false | true |
f7fd242a9b3de8506a17174cb91b570791fd17c8 | 576 | py | Python | tests/test_tokenizer.py | Filter-Bubble/stanza_wrapper | 04388869cbbe419132628422663e4c7c987cf1d0 | [
"Apache-2.0"
] | null | null | null | tests/test_tokenizer.py | Filter-Bubble/stanza_wrapper | 04388869cbbe419132628422663e4c7c987cf1d0 | [
"Apache-2.0"
] | null | null | null | tests/test_tokenizer.py | Filter-Bubble/stanza_wrapper | 04388869cbbe419132628422663e4c7c987cf1d0 | [
"Apache-2.0"
] | null | null | null | import os
import io
from stanza_wrapper import parse
__here__ = os.path.dirname(os.path.realpath(__file__))
txt = b'''Dit is een tekst. Er zijn twee zinnen.'''
def assert_equal(val1, val2):
assert val1 == val2
def test_tokenize():
my_obj = parse(io.BytesIO(txt))
token_list = list(my_obj.get_tokens())
assert_equal(len(token_list), 10)
last_token = token_list[-1]
assert_equal(last_token.get_offset(), str(len(txt)-1))
# Check linguistic processor layers
layers = list(my_obj.get_linguisticProcessors())
assert_equal(len(layers), 3)
| 24 | 58 | 0.710069 | import os
import io
from stanza_wrapper import parse
__here__ = os.path.dirname(os.path.realpath(__file__))
txt = b'''Dit is een tekst. Er zijn twee zinnen.'''
def assert_equal(val1, val2):
assert val1 == val2
def test_tokenize():
my_obj = parse(io.BytesIO(txt))
token_list = list(my_obj.get_tokens())
assert_equal(len(token_list), 10)
last_token = token_list[-1]
assert_equal(last_token.get_offset(), str(len(txt)-1))
layers = list(my_obj.get_linguisticProcessors())
assert_equal(len(layers), 3)
| true | true |
f7fd24395d39388e023a60068824ac65c82bc271 | 121 | py | Python | Chapter 01/ch1_10.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 01/ch1_10.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | Chapter 01/ch1_10.py | bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE | f6a4194684515495d00aa38347a725dd08f39a0c | [
"MIT"
] | null | null | null | int1=float(input('Enter a floating value: '))
# Output: Enter a floating value:
# Input from user: [23]
print(int1)
| 24.2 | 46 | 0.677686 | int1=float(input('Enter a floating value: '))
print(int1)
| true | true |
f7fd2452519a85cf12ff249f11683c3e7ae5e41e | 14,000 | py | Python | test/functional/p2p_unrequested_blocks.py | bitcoin-global/bitcoin-global | 8f8783245ec209ba1ae4b2c0717f9d8f2d5658ea | [
"MIT"
] | 3 | 2020-09-23T23:55:28.000Z | 2021-07-10T03:21:46.000Z | test/functional/p2p_unrequested_blocks.py | Penny-Admixture/bitcoin-global | 8f8783245ec209ba1ae4b2c0717f9d8f2d5658ea | [
"MIT"
] | 2 | 2020-07-28T08:55:30.000Z | 2021-04-22T10:57:10.000Z | test/functional/p2p_unrequested_blocks.py | Penny-Admixture/bitcoin-global | 8f8783245ec209ba1ae4b2c0717f9d8f2d5658ea | [
"MIT"
] | 1 | 2021-06-12T07:04:55.000Z | 2021-06-12T07:04:55.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Copyright (c) 2020 The Bitcoin Global developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, msg_block, msg_headers, msg_inv
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.444444 | 113 | 0.6825 |
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, msg_block, msg_headers, msg_inv
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
[n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
blocks_h2 = []
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
try:
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| true | true |
f7fd24a1951d2de758d9ba64113792fa048b8145 | 5,679 | py | Python | tests/ext/django/test_middleware.py | musicinmybrain/aws-xray-sdk-python | b8e59423f1891351ceb1a0bd585603e0cd46c74c | [
"Apache-2.0"
] | 294 | 2017-10-10T19:01:04.000Z | 2022-03-18T15:52:19.000Z | tests/ext/django/test_middleware.py | musicinmybrain/aws-xray-sdk-python | b8e59423f1891351ceb1a0bd585603e0cd46c74c | [
"Apache-2.0"
] | 285 | 2017-10-20T09:27:21.000Z | 2022-03-29T15:33:45.000Z | tests/ext/django/test_middleware.py | musicinmybrain/aws-xray-sdk-python | b8e59423f1891351ceb1a0bd585603e0cd46c74c | [
"Apache-2.0"
] | 134 | 2017-10-11T13:55:17.000Z | 2022-03-23T07:21:17.000Z | import django
from aws_xray_sdk import global_sdk_config
from django.urls import reverse
from django.test import TestCase
from aws_xray_sdk.core import xray_recorder, lambda_launcher
from aws_xray_sdk.core.context import Context
from aws_xray_sdk.core.models import http, facade_segment, segment
from aws_xray_sdk.core import patch
from tests.util import get_new_stubbed_recorder
import os
class XRayTestCase(TestCase):
def setUp(self):
django.setup()
xray_recorder.configure(context=Context(),
context_missing='LOG_ERROR')
xray_recorder.clear_trace_entities()
global_sdk_config.set_sdk_enabled(True)
def tearDown(self):
xray_recorder.clear_trace_entities()
def test_ok(self):
url = reverse('200ok')
self.client.get(url)
segment = xray_recorder.emitter.pop()
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 200
def test_error(self):
self.client.get('/notfound/')
segment = xray_recorder.emitter.pop()
assert segment.error
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 404
def test_fault(self):
url = reverse('500fault')
try:
self.client.get(url)
except Exception:
pass
segment = xray_recorder.emitter.pop()
assert segment.fault
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 500
exception = segment.cause['exceptions'][0]
assert exception.type == 'KeyError'
def test_db(self):
patch(('sqlite3',))
url = reverse('call_db')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert len(segment.subsegments) == 1
subsegment = segment.subsegments[0]
assert subsegment.name == ':memory:'
assert not subsegment.in_progress
sql = subsegment.sql
assert sql['database_type'] == 'sqlite3'
assert sql['database_version']
def test_template(self):
url = reverse('template')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert len(segment.subsegments) == 1
subsegment = segment.subsegments[0]
assert subsegment.name == 'index.html'
assert not subsegment.in_progress
assert subsegment.namespace == 'local'
def test_template_block(self):
url = reverse('template_block')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert len(segment.subsegments) == 1
subsegment = segment.subsegments[0]
assert subsegment.name == 'block_user.html'
assert not subsegment.in_progress
assert subsegment.namespace == 'local'
def test_trace_header_data_perservation(self):
url = reverse('200ok')
self.client.get(url, HTTP_X_AMZN_TRACE_ID='k1=v1')
segment = xray_recorder.emitter.pop()
header = segment.get_origin_trace_header()
assert header.data['k1'] == 'v1'
def test_response_header(self):
url = reverse('200ok')
resp = self.client.get(url, HTTP_X_AMZN_TRACE_ID='Sampled=?')
segment = xray_recorder.emitter.pop()
trace_header = resp[http.XRAY_HEADER]
assert 'Sampled=1' in trace_header
assert segment.trace_id in trace_header
def test_disabled_sdk(self):
global_sdk_config.set_sdk_enabled(False)
url = reverse('200ok')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert not segment
def test_lambda_serverless(self):
TRACE_ID = '1-5759e988-bd862e3fe1be46a994272793'
PARENT_ID = '53995c3f42cd8ad8'
HEADER_VAR = "Root=%s;Parent=%s;Sampled=1" % (TRACE_ID, PARENT_ID)
os.environ[lambda_launcher.LAMBDA_TRACE_HEADER_KEY] = HEADER_VAR
lambda_context = lambda_launcher.LambdaContext()
new_recorder = get_new_stubbed_recorder()
new_recorder.configure(service='test', sampling=False, context=lambda_context)
subsegment = new_recorder.begin_subsegment("subsegment")
assert type(subsegment.parent_segment) == facade_segment.FacadeSegment
new_recorder.end_subsegment()
url = reverse('200ok')
self.client.get(url)
segment = new_recorder.emitter.pop()
assert not segment
# Test Fault in Lambda
url = reverse('500fault')
try:
self.client.get(url)
except Exception:
pass
segment = xray_recorder.emitter.pop()
assert segment.fault
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 500
exception = segment.cause['exceptions'][0]
assert exception.type == 'KeyError'
def test_lambda_default_ctx(self):
# Track to make sure that Django will default to generating segments if context is not the lambda context
url = reverse('200ok')
self.client.get(url)
cur_segment = xray_recorder.emitter.pop()
assert type(cur_segment) == segment.Segment
| 32.637931 | 113 | 0.640606 | import django
from aws_xray_sdk import global_sdk_config
from django.urls import reverse
from django.test import TestCase
from aws_xray_sdk.core import xray_recorder, lambda_launcher
from aws_xray_sdk.core.context import Context
from aws_xray_sdk.core.models import http, facade_segment, segment
from aws_xray_sdk.core import patch
from tests.util import get_new_stubbed_recorder
import os
class XRayTestCase(TestCase):
def setUp(self):
django.setup()
xray_recorder.configure(context=Context(),
context_missing='LOG_ERROR')
xray_recorder.clear_trace_entities()
global_sdk_config.set_sdk_enabled(True)
def tearDown(self):
xray_recorder.clear_trace_entities()
def test_ok(self):
url = reverse('200ok')
self.client.get(url)
segment = xray_recorder.emitter.pop()
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 200
def test_error(self):
self.client.get('/notfound/')
segment = xray_recorder.emitter.pop()
assert segment.error
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 404
def test_fault(self):
url = reverse('500fault')
try:
self.client.get(url)
except Exception:
pass
segment = xray_recorder.emitter.pop()
assert segment.fault
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 500
exception = segment.cause['exceptions'][0]
assert exception.type == 'KeyError'
def test_db(self):
patch(('sqlite3',))
url = reverse('call_db')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert len(segment.subsegments) == 1
subsegment = segment.subsegments[0]
assert subsegment.name == ':memory:'
assert not subsegment.in_progress
sql = subsegment.sql
assert sql['database_type'] == 'sqlite3'
assert sql['database_version']
def test_template(self):
url = reverse('template')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert len(segment.subsegments) == 1
subsegment = segment.subsegments[0]
assert subsegment.name == 'index.html'
assert not subsegment.in_progress
assert subsegment.namespace == 'local'
def test_template_block(self):
url = reverse('template_block')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert len(segment.subsegments) == 1
subsegment = segment.subsegments[0]
assert subsegment.name == 'block_user.html'
assert not subsegment.in_progress
assert subsegment.namespace == 'local'
def test_trace_header_data_perservation(self):
url = reverse('200ok')
self.client.get(url, HTTP_X_AMZN_TRACE_ID='k1=v1')
segment = xray_recorder.emitter.pop()
header = segment.get_origin_trace_header()
assert header.data['k1'] == 'v1'
def test_response_header(self):
url = reverse('200ok')
resp = self.client.get(url, HTTP_X_AMZN_TRACE_ID='Sampled=?')
segment = xray_recorder.emitter.pop()
trace_header = resp[http.XRAY_HEADER]
assert 'Sampled=1' in trace_header
assert segment.trace_id in trace_header
def test_disabled_sdk(self):
global_sdk_config.set_sdk_enabled(False)
url = reverse('200ok')
self.client.get(url)
segment = xray_recorder.emitter.pop()
assert not segment
def test_lambda_serverless(self):
TRACE_ID = '1-5759e988-bd862e3fe1be46a994272793'
PARENT_ID = '53995c3f42cd8ad8'
HEADER_VAR = "Root=%s;Parent=%s;Sampled=1" % (TRACE_ID, PARENT_ID)
os.environ[lambda_launcher.LAMBDA_TRACE_HEADER_KEY] = HEADER_VAR
lambda_context = lambda_launcher.LambdaContext()
new_recorder = get_new_stubbed_recorder()
new_recorder.configure(service='test', sampling=False, context=lambda_context)
subsegment = new_recorder.begin_subsegment("subsegment")
assert type(subsegment.parent_segment) == facade_segment.FacadeSegment
new_recorder.end_subsegment()
url = reverse('200ok')
self.client.get(url)
segment = new_recorder.emitter.pop()
assert not segment
url = reverse('500fault')
try:
self.client.get(url)
except Exception:
pass
segment = xray_recorder.emitter.pop()
assert segment.fault
request = segment.http['request']
response = segment.http['response']
assert request['method'] == 'GET'
assert request['client_ip'] == '127.0.0.1'
assert response['status'] == 500
exception = segment.cause['exceptions'][0]
assert exception.type == 'KeyError'
def test_lambda_default_ctx(self):
url = reverse('200ok')
self.client.get(url)
cur_segment = xray_recorder.emitter.pop()
assert type(cur_segment) == segment.Segment
| true | true |
f7fd252ea0293eea40bd41152d63537bbfb1b687 | 581 | py | Python | api/migrations/0001_initial.py | alaasalman/aussieshopper | ac3584e7d0e7cb62d138f6dc9122f44a3a4d264e | [
"MIT"
] | null | null | null | api/migrations/0001_initial.py | alaasalman/aussieshopper | ac3584e7d0e7cb62d138f6dc9122f44a3a4d264e | [
"MIT"
] | 15 | 2021-06-10T22:37:58.000Z | 2022-02-17T04:45:09.000Z | api/migrations/0001_initial.py | alaasalman/aussieshopper | ac3584e7d0e7cb62d138f6dc9122f44a3a4d264e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 08:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LogChatMessages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=100, verbose_name='Text')),
],
),
]
| 24.208333 | 114 | 0.604131 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LogChatMessages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=100, verbose_name='Text')),
],
),
]
| true | true |
f7fd254d829e3a09d5ec12731748d0207bc0ffa9 | 2,920 | py | Python | cloudkitty/api/app.py | jeffrey4l/cloudkitty | c3eecc835db90d5eaf541cbb342149afae526159 | [
"Apache-2.0"
] | 1 | 2015-01-28T22:34:56.000Z | 2015-01-28T22:34:56.000Z | cloudkitty/api/app.py | jeffrey4l/cloudkitty | c3eecc835db90d5eaf541cbb342149afae526159 | [
"Apache-2.0"
] | null | null | null | cloudkitty/api/app.py | jeffrey4l/cloudkitty | c3eecc835db90d5eaf541cbb342149afae526159 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import os
from wsgiref import simple_server
from oslo.config import cfg
from oslo import messaging
from paste import deploy
import pecan
from cloudkitty.api import config as api_config
from cloudkitty.api import hooks
from cloudkitty.common import rpc
from cloudkitty import config # noqa
from cloudkitty.openstack.common import log as logging
LOG = logging.getLogger(__name__)
auth_opts = [
cfg.StrOpt('api_paste_config',
default="api_paste.ini",
help="Configuration file for WSGI definition of API."
),
]
api_opts = [
cfg.StrOpt('host_ip',
default="0.0.0.0",
help="Host serving the API."
),
cfg.IntOpt('port',
default=8888,
help="Host port serving the API."
),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
CONF.register_opts(api_opts, group='api')
def get_pecan_config():
# Set up the pecan configuration
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_conf = get_pecan_config()
target = messaging.Target(topic='cloudkitty',
version='1.0')
client = rpc.get_client(target)
app_hooks = [
hooks.RPCHook(client)
]
return pecan.make_app(
app_conf.app.root,
static_root=app_conf.app.static_root,
template_path=app_conf.app.template_path,
debug=CONF.debug,
force_canonical=getattr(app_conf.app, 'force_canonical', True),
hooks=app_hooks,
guess_content_type_from_ext=False
)
def setup_wsgi():
cfg_file = cfg.CONF.api_paste_config
if not os.path.exists(cfg_file):
raise Exception('api_paste_config file not found')
return deploy.loadapp("config:" + cfg_file)
def build_server():
# Create the WSGI server and start it
host = CONF.api.host_ip
port = CONF.api.port
server_cls = simple_server.WSGIServer
handler_cls = simple_server.WSGIRequestHandler
app = setup_app()
srv = simple_server.make_server(
host,
port,
app,
server_cls,
handler_cls)
return srv
| 25.840708 | 78 | 0.666438 |
import os
from wsgiref import simple_server
from oslo.config import cfg
from oslo import messaging
from paste import deploy
import pecan
from cloudkitty.api import config as api_config
from cloudkitty.api import hooks
from cloudkitty.common import rpc
from cloudkitty import config
from cloudkitty.openstack.common import log as logging
LOG = logging.getLogger(__name__)
auth_opts = [
cfg.StrOpt('api_paste_config',
default="api_paste.ini",
help="Configuration file for WSGI definition of API."
),
]
api_opts = [
cfg.StrOpt('host_ip',
default="0.0.0.0",
help="Host serving the API."
),
cfg.IntOpt('port',
default=8888,
help="Host port serving the API."
),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
CONF.register_opts(api_opts, group='api')
def get_pecan_config():
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_conf = get_pecan_config()
target = messaging.Target(topic='cloudkitty',
version='1.0')
client = rpc.get_client(target)
app_hooks = [
hooks.RPCHook(client)
]
return pecan.make_app(
app_conf.app.root,
static_root=app_conf.app.static_root,
template_path=app_conf.app.template_path,
debug=CONF.debug,
force_canonical=getattr(app_conf.app, 'force_canonical', True),
hooks=app_hooks,
guess_content_type_from_ext=False
)
def setup_wsgi():
cfg_file = cfg.CONF.api_paste_config
if not os.path.exists(cfg_file):
raise Exception('api_paste_config file not found')
return deploy.loadapp("config:" + cfg_file)
def build_server():
host = CONF.api.host_ip
port = CONF.api.port
server_cls = simple_server.WSGIServer
handler_cls = simple_server.WSGIRequestHandler
app = setup_app()
srv = simple_server.make_server(
host,
port,
app,
server_cls,
handler_cls)
return srv
| true | true |
f7fd25cf19ce15f4f313076f86cb90424e186ad0 | 1,164 | py | Python | Python_simple/code.py | surajg163/greyatom-python-for-data-science | a77b3bc8b35a4609f2ce457fda1ef4282ff40698 | [
"MIT"
] | null | null | null | Python_simple/code.py | surajg163/greyatom-python-for-data-science | a77b3bc8b35a4609f2ce457fda1ef4282ff40698 | [
"MIT"
] | null | null | null | Python_simple/code.py | surajg163/greyatom-python-for-data-science | a77b3bc8b35a4609f2ce457fda1ef4282ff40698 | [
"MIT"
] | null | null | null | # --------------
# Code starts here
class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2=['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class=class_1+class_2
#Adding
new_class.append('Peter Warden')
print(new_class)
#remove
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses={'Math':65,'English':70,'History':80,'French':70,'Science':60}
print(courses)
total=sum(courses.values())
print(total)
percentage=(total/500)*100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics={'Geoffrey Hinton':78,'Andrew Ng':95,'Sebastian Raschka':65,'Yoshua Benjio':50,'Hilary Mason':70,'Corinna Cortes':66,'Peter Warden':75}
topper = max(mathematics,key = mathematics.get)
print (topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
first_name=(topper.split()[0])
#print(first_name)
last_name=(topper.split()[1])
#print(first_name)
full_name = last_name +' '+first_name
#certificate_name=str.upper(full_name)
certificate_name=full_name.upper()
# Code starts here
print(certificate_name)
# Code ends here
| 18.774194 | 147 | 0.698454 |
class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2=['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class=class_1+class_2
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
courses={'Math':65,'English':70,'History':80,'French':70,'Science':60}
print(courses)
total=sum(courses.values())
print(total)
percentage=(total/500)*100
print(percentage)
mathematics={'Geoffrey Hinton':78,'Andrew Ng':95,'Sebastian Raschka':65,'Yoshua Benjio':50,'Hilary Mason':70,'Corinna Cortes':66,'Peter Warden':75}
topper = max(mathematics,key = mathematics.get)
print (topper)
topper = 'andrew ng'
first_name=(topper.split()[0])
last_name=(topper.split()[1])
full_name = last_name +' '+first_name
certificate_name=full_name.upper()
print(certificate_name)
| true | true |
f7fd26542b16b695e8eb0600a7e4a557eb3759af | 517 | py | Python | doc/jvsip_book/pyJvsip_examples/example1b.py | rrjudd/jvsip | 56a965fff595b027139ff151d27d434f2480b9e8 | [
"MIT"
] | 10 | 2016-01-16T04:10:13.000Z | 2022-03-22T02:17:44.000Z | doc/jvsip_book/pyJvsip_examples/example1b.py | rrjudd/jvsip | 56a965fff595b027139ff151d27d434f2480b9e8 | [
"MIT"
] | 1 | 2015-09-11T04:48:03.000Z | 2015-09-11T13:44:29.000Z | doc/jvsip_book/pyJvsip_examples/example1b.py | rrjudd/jvsip | 56a965fff595b027139ff151d27d434f2480b9e8 | [
"MIT"
] | 4 | 2017-06-13T21:48:23.000Z | 2020-08-26T15:07:44.000Z | import pyJvsip as pv
N=6
A = pv.create('cvview_d',N).randn(7)
B = A.empty.fill(5.0)
C = A.empty.fill(0.0)
print('A = '+A.mstring('%+.2f'))
print('B = '+B.mstring('%+.2f'))
pv.add(A,B,C)
print('C = A+B')
print('C = '+C.mstring('%+.2f'))
""" OUTPUT
A = [+0.16+0.50i -0.21-0.75i -0.56-0.09i \
+1.15+0.45i +0.10+0.43i +0.63-1.05i]
B = [+5.00+0.00i +5.00+0.00i +5.00+0.00i \
+5.00+0.00i +5.00+0.00i +5.00+0.00i]
C = A+B
C = [+5.16+0.50i +4.79-0.75i +4.44-0.09i \
+6.15+0.45i +5.10+0.43i +5.63-1.05i]
"""
| 23.5 | 42 | 0.524178 | import pyJvsip as pv
N=6
A = pv.create('cvview_d',N).randn(7)
B = A.empty.fill(5.0)
C = A.empty.fill(0.0)
print('A = '+A.mstring('%+.2f'))
print('B = '+B.mstring('%+.2f'))
pv.add(A,B,C)
print('C = A+B')
print('C = '+C.mstring('%+.2f'))
| true | true |
f7fd269f7a11b52775e079e6e27637386e7844b8 | 13,438 | py | Python | kedro/extras/datasets/pandas/sql_dataset.py | andmikey/kedro | 9b4e4135720609d44ffdf5248246fe805f0b5469 | [
"Apache-2.0"
] | null | null | null | kedro/extras/datasets/pandas/sql_dataset.py | andmikey/kedro | 9b4e4135720609d44ffdf5248246fe805f0b5469 | [
"Apache-2.0"
] | null | null | null | kedro/extras/datasets/pandas/sql_dataset.py | andmikey/kedro | 9b4e4135720609d44ffdf5248246fe805f0b5469 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``SQLDataSet`` to load and save data to a SQL backend."""
import copy
import re
from typing import Any, Dict, Optional
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.exc import NoSuchModuleError
from kedro.io.core import AbstractDataSet, DataSetError
__all__ = ["SQLTableDataSet", "SQLQueryDataSet"]
KNOWN_PIP_INSTALL = {
"psycopg2": "psycopg2",
"mysqldb": "mysqlclient",
"cx_Oracle": "cx_Oracle",
}
DRIVER_ERROR_MESSAGE = """
A module/driver is missing when connecting to your SQL server. SQLDataSet
supports SQLAlchemy drivers. Please refer to
https://docs.sqlalchemy.org/en/13/core/engines.html#supported-databases
for more information.
\n\n
"""
def _find_known_drivers(module_import_error: ImportError) -> Optional[str]:
"""Looks up known keywords in a ``ModuleNotFoundError`` so that it can
provide better guideline for the user.
Args:
module_import_error: Error raised while connecting to a SQL server.
Returns:
Instructions for installing missing driver. An empty string is
returned in case error is related to an unknown driver.
"""
# module errors contain string "No module name 'module_name'"
# we are trying to extract module_name surrounded by quotes here
res = re.findall(r"'(.*?)'", str(module_import_error.args[0]).lower())
# in case module import error does not match our expected pattern
# we have no recommendation
if not res:
return None
missing_module = res[0]
if KNOWN_PIP_INSTALL.get(missing_module):
return (
"You can also try installing missing driver with\n"
"\npip install {}".format(KNOWN_PIP_INSTALL.get(missing_module))
)
return None
def _get_missing_module_error(import_error: ImportError) -> DataSetError:
missing_module_instruction = _find_known_drivers(import_error)
if missing_module_instruction is None:
return DataSetError(
"{}Loading failed with error:\n\n{}".format(
DRIVER_ERROR_MESSAGE, str(import_error)
)
)
return DataSetError("{}{}".format(DRIVER_ERROR_MESSAGE, missing_module_instruction))
def _get_sql_alchemy_missing_error() -> DataSetError:
return DataSetError(
"The SQL dialect in your connection is not supported by "
"SQLAlchemy. Please refer to "
"https://docs.sqlalchemy.org/en/13/core/engines.html#supported-databases "
"for more information."
)
class SQLTableDataSet(AbstractDataSet):
"""``SQLTableDataSet`` loads data from a SQL table and saves a pandas
dataframe to a table. It uses ``pandas.DataFrame`` internally,
so it supports all allowed pandas options on ``read_sql_table`` and
``to_sql`` methods. Since Pandas uses SQLAlchemy behind the scenes, when
instantiating ``SQLTableDataSet`` one needs to pass a compatible connection
string either in ``credentials`` (see the example code snippet below) or in
``load_args`` and ``save_args``. Connection string formats supported by
SQLAlchemy can be found here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
``SQLTableDataSet`` modifies the save parameters and stores
the data with no index. This is designed to make load and save methods
symmetric.
Example:
::
>>> from kedro.extras.datasets.pandas import SQLTableDataSet
>>> import pandas as pd
>>>
>>> data = pd.DataFrame({"col1": [1, 2], "col2": [4, 5],
>>> "col3": [5, 6]})
>>> table_name = "table_a"
>>> credentials = {
>>> "con": "postgresql://scott:tiger@localhost/test"
>>> }
>>> data_set = SQLTableDataSet(table_name=table_name,
>>> credentials=credentials)
>>>
>>> data_set.save(data)
>>> reloaded = data_set.load()
>>>
>>> assert data.equals(reloaded)
"""
DEFAULT_LOAD_ARGS = {} # type: Dict[str, Any]
DEFAULT_SAVE_ARGS = {"index": False} # type: Dict[str, Any]
def _describe(self) -> Dict[str, Any]:
load_args = self._load_args.copy()
save_args = self._save_args.copy()
del load_args["table_name"]
del load_args["con"]
del save_args["name"]
del save_args["con"]
return dict(
table_name=self._load_args["table_name"],
load_args=load_args,
save_args=save_args,
)
def __init__(
self,
table_name: str,
credentials: Dict[str, Any],
load_args: Dict[str, Any] = None,
save_args: Dict[str, Any] = None,
) -> None:
"""Creates a new ``SQLTableDataSet``.
Args:
table_name: The table name to load or save data to. It
overwrites name in ``save_args`` and ``table_name``
parameters in ``load_args``.
credentials: A dictionary with a ``SQLAlchemy`` connection string.
Users are supposed to provide the connection string 'con'
through credentials. It overwrites `con` parameter in
``load_args`` and ``save_args`` in case it is provided. To find
all supported connection string formats, see here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
load_args: Provided to underlying pandas ``read_sql_table``
function along with the connection string.
To find all supported arguments, see here:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html
To find all supported connection string formats, see here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
save_args: Provided to underlying pandas ``to_sql`` function along
with the connection string.
To find all supported arguments, see here:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html
To find all supported connection string formats, see here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
It has ``index=False`` in the default parameters.
Raises:
DataSetError: When either ``table_name`` or ``con`` is empty.
"""
if not table_name:
raise DataSetError("`table_name` argument cannot be empty.")
if not (credentials and "con" in credentials and credentials["con"]):
raise DataSetError(
"`con` argument cannot be empty. Please "
"provide a SQLAlchemy connection string."
)
# Handle default load and save arguments
self._load_args = copy.deepcopy(self.DEFAULT_LOAD_ARGS)
if load_args is not None:
self._load_args.update(load_args)
self._save_args = copy.deepcopy(self.DEFAULT_SAVE_ARGS)
if save_args is not None:
self._save_args.update(save_args)
self._load_args["table_name"] = table_name
self._save_args["name"] = table_name
self._load_args["con"] = self._save_args["con"] = credentials["con"]
def _load(self) -> pd.DataFrame:
try:
return pd.read_sql_table(**self._load_args)
except ImportError as import_error:
raise _get_missing_module_error(import_error)
except NoSuchModuleError:
raise _get_sql_alchemy_missing_error()
def _save(self, data: pd.DataFrame) -> None:
try:
data.to_sql(**self._save_args)
except ImportError as import_error:
raise _get_missing_module_error(import_error)
except NoSuchModuleError:
raise _get_sql_alchemy_missing_error()
def _exists(self) -> bool:
eng = create_engine(self._load_args["con"])
schema = self._load_args.get("schema", None)
exists = self._load_args["table_name"] in eng.table_names(schema)
eng.dispose()
return exists
class SQLQueryDataSet(AbstractDataSet):
"""``SQLQueryDataSet`` loads data from a provided SQL query. It
uses ``pandas.DataFrame`` internally, so it supports all allowed
pandas options on ``read_sql_query``. Since Pandas uses SQLAlchemy behind
the scenes, when instantiating ``SQLQueryDataSet`` one needs to pass
a compatible connection string either in ``credentials`` (see the example
code snippet below) or in ``load_args``. Connection string formats supported
by SQLAlchemy can be found here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
It does not support save method so it is a read only data set.
To save data to a SQL server use ``SQLTableDataSet``.
Example:
::
>>> from kedro.extras.datasets.pandas import SQLQueryDataSet
>>> import pandas as pd
>>>
>>> data = pd.DataFrame({"col1": [1, 2], "col2": [4, 5],
>>> "col3": [5, 6]})
>>> sql = "SELECT * FROM table_a"
>>> credentials = {
>>> "con": "postgresql://scott:tiger@localhost/test"
>>> }
>>> data_set = SQLQueryDataSet(sql=sql,
>>> credentials=credentials)
>>>
>>> sql_data = data_set.load()
>>>
"""
def _describe(self) -> Dict[str, Any]:
load_args = self._load_args.copy()
del load_args["sql"]
del load_args["con"]
return dict(sql=self._load_args["sql"], load_args=load_args)
def __init__(
self, sql: str, credentials: Dict[str, Any], load_args: Dict[str, Any] = None
) -> None:
"""Creates a new ``SQLQueryDataSet``.
Args:
sql: The sql query statement.
credentials: A dictionary with a ``SQLAlchemy`` connection string.
Users are supposed to provide the connection string 'con'
through credentials. It overwrites `con` parameter in
``load_args`` and ``save_args`` in case it is provided. To find
all supported connection string formats, see here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
load_args: Provided to underlying pandas ``read_sql_query``
function along with the connection string.
To find all supported arguments, see here:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_query.html
To find all supported connection string formats, see here:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
Raises:
DataSetError: When either ``sql`` or ``con`` parameters is emtpy.
"""
if not sql:
raise DataSetError(
"`sql` argument cannot be empty. Please provide a sql query"
)
if not (credentials and "con" in credentials and credentials["con"]):
raise DataSetError(
"`con` argument cannot be empty. Please "
"provide a SQLAlchemy connection string."
)
default_load_args = {} # type: Dict[str, Any]
self._load_args = (
{**default_load_args, **load_args}
if load_args is not None
else default_load_args
)
self._load_args["sql"] = sql
self._load_args["con"] = credentials["con"]
def _load(self) -> pd.DataFrame:
try:
return pd.read_sql_query(**self._load_args)
except ImportError as import_error:
raise _get_missing_module_error(import_error)
except NoSuchModuleError:
raise _get_sql_alchemy_missing_error()
def _save(self, data: pd.DataFrame) -> None:
raise DataSetError("`save` is not supported on SQLQueryDataSet")
| 38.614943 | 99 | 0.641985 |
import copy
import re
from typing import Any, Dict, Optional
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.exc import NoSuchModuleError
from kedro.io.core import AbstractDataSet, DataSetError
__all__ = ["SQLTableDataSet", "SQLQueryDataSet"]
KNOWN_PIP_INSTALL = {
"psycopg2": "psycopg2",
"mysqldb": "mysqlclient",
"cx_Oracle": "cx_Oracle",
}
DRIVER_ERROR_MESSAGE = """
A module/driver is missing when connecting to your SQL server. SQLDataSet
supports SQLAlchemy drivers. Please refer to
https://docs.sqlalchemy.org/en/13/core/engines.html#supported-databases
for more information.
\n\n
"""
def _find_known_drivers(module_import_error: ImportError) -> Optional[str]:
res = re.findall(r"'(.*?)'", str(module_import_error.args[0]).lower())
if not res:
return None
missing_module = res[0]
if KNOWN_PIP_INSTALL.get(missing_module):
return (
"You can also try installing missing driver with\n"
"\npip install {}".format(KNOWN_PIP_INSTALL.get(missing_module))
)
return None
def _get_missing_module_error(import_error: ImportError) -> DataSetError:
missing_module_instruction = _find_known_drivers(import_error)
if missing_module_instruction is None:
return DataSetError(
"{}Loading failed with error:\n\n{}".format(
DRIVER_ERROR_MESSAGE, str(import_error)
)
)
return DataSetError("{}{}".format(DRIVER_ERROR_MESSAGE, missing_module_instruction))
def _get_sql_alchemy_missing_error() -> DataSetError:
return DataSetError(
"The SQL dialect in your connection is not supported by "
"SQLAlchemy. Please refer to "
"https://docs.sqlalchemy.org/en/13/core/engines.html#supported-databases "
"for more information."
)
class SQLTableDataSet(AbstractDataSet):
DEFAULT_LOAD_ARGS = {}
DEFAULT_SAVE_ARGS = {"index": False}
def _describe(self) -> Dict[str, Any]:
load_args = self._load_args.copy()
save_args = self._save_args.copy()
del load_args["table_name"]
del load_args["con"]
del save_args["name"]
del save_args["con"]
return dict(
table_name=self._load_args["table_name"],
load_args=load_args,
save_args=save_args,
)
def __init__(
self,
table_name: str,
credentials: Dict[str, Any],
load_args: Dict[str, Any] = None,
save_args: Dict[str, Any] = None,
) -> None:
if not table_name:
raise DataSetError("`table_name` argument cannot be empty.")
if not (credentials and "con" in credentials and credentials["con"]):
raise DataSetError(
"`con` argument cannot be empty. Please "
"provide a SQLAlchemy connection string."
)
self._load_args = copy.deepcopy(self.DEFAULT_LOAD_ARGS)
if load_args is not None:
self._load_args.update(load_args)
self._save_args = copy.deepcopy(self.DEFAULT_SAVE_ARGS)
if save_args is not None:
self._save_args.update(save_args)
self._load_args["table_name"] = table_name
self._save_args["name"] = table_name
self._load_args["con"] = self._save_args["con"] = credentials["con"]
def _load(self) -> pd.DataFrame:
try:
return pd.read_sql_table(**self._load_args)
except ImportError as import_error:
raise _get_missing_module_error(import_error)
except NoSuchModuleError:
raise _get_sql_alchemy_missing_error()
def _save(self, data: pd.DataFrame) -> None:
try:
data.to_sql(**self._save_args)
except ImportError as import_error:
raise _get_missing_module_error(import_error)
except NoSuchModuleError:
raise _get_sql_alchemy_missing_error()
def _exists(self) -> bool:
eng = create_engine(self._load_args["con"])
schema = self._load_args.get("schema", None)
exists = self._load_args["table_name"] in eng.table_names(schema)
eng.dispose()
return exists
class SQLQueryDataSet(AbstractDataSet):
def _describe(self) -> Dict[str, Any]:
load_args = self._load_args.copy()
del load_args["sql"]
del load_args["con"]
return dict(sql=self._load_args["sql"], load_args=load_args)
def __init__(
self, sql: str, credentials: Dict[str, Any], load_args: Dict[str, Any] = None
) -> None:
if not sql:
raise DataSetError(
"`sql` argument cannot be empty. Please provide a sql query"
)
if not (credentials and "con" in credentials and credentials["con"]):
raise DataSetError(
"`con` argument cannot be empty. Please "
"provide a SQLAlchemy connection string."
)
default_load_args = {}
self._load_args = (
{**default_load_args, **load_args}
if load_args is not None
else default_load_args
)
self._load_args["sql"] = sql
self._load_args["con"] = credentials["con"]
def _load(self) -> pd.DataFrame:
try:
return pd.read_sql_query(**self._load_args)
except ImportError as import_error:
raise _get_missing_module_error(import_error)
except NoSuchModuleError:
raise _get_sql_alchemy_missing_error()
def _save(self, data: pd.DataFrame) -> None:
raise DataSetError("`save` is not supported on SQLQueryDataSet")
| true | true |
f7fd26c7750a60cb2c4c1b6ee857bd61d19fca4b | 4,044 | py | Python | util/test/tests/Vulkan/VK_Spec_Constants.py | songtm/renderdoc | 7533c6b7ac7cac7cfab2d1a1ddc011c693202a47 | [
"MIT"
] | 1 | 2019-11-14T08:52:26.000Z | 2019-11-14T08:52:26.000Z | util/test/tests/Vulkan/VK_Spec_Constants.py | songtm/renderdoc | 7533c6b7ac7cac7cfab2d1a1ddc011c693202a47 | [
"MIT"
] | null | null | null | util/test/tests/Vulkan/VK_Spec_Constants.py | songtm/renderdoc | 7533c6b7ac7cac7cfab2d1a1ddc011c693202a47 | [
"MIT"
] | null | null | null | import renderdoc as rd
import rdtest
class VK_Spec_Constants(rdtest.TestCase):
demos_test_name = 'VK_Spec_Constants'
def check_capture(self):
# find the first draw
draw = self.find_draw("Draw")
# We should have 4 draws, with spec constant values 0, 1, 2, 3
for num_colors in range(4):
self.check(draw is not None)
self.controller.SetFrameEvent(draw.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
shader: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Pixel)
# uniform buffer and spec constants
self.check(len(shader.constantBlocks) == 2)
self.check(shader.constantBlocks[0].bufferBacked)
self.check(not shader.constantBlocks[1].bufferBacked)
self.check(len(shader.constantBlocks[1].variables) == 1)
# should be an array of num_colors+1 elements
array_len = shader.constantBlocks[0].variables[0].type.descriptor.elements
if not rdtest.value_compare(array_len, num_colors+1):
raise rdtest.TestFailureException("CBuffer variable is array of {}, not {}".format(array_len, num_colors+1))
if num_colors > 0:
cbuf: rd.BoundCBuffer = pipe.GetConstantBuffer(rd.ShaderStage.Pixel, 0, 0)
cb_vars = self.controller.GetCBufferVariableContents(pipe.GetGraphicsPipelineObject(),
pipe.GetShader(rd.ShaderStage.Pixel),
pipe.GetShaderEntryPoint(rd.ShaderStage.Pixel), 0,
cbuf.resourceId, cbuf.byteOffset)
self.check(len(cb_vars) == 1)
if not rdtest.value_compare(len(cb_vars[0].members), num_colors+1):
raise rdtest.TestFailureException("CBuffer variable is array of {}, not {}".format(len(cb_vars[0].members), num_colors+1))
for col in range(num_colors):
expected = [0.0, 0.0, 0.0, 0.0]
expected[col] = 1.0
val = [i for i in cb_vars[0].members[col].value.fv[0:4]]
if not rdtest.value_compare(val, expected):
raise rdtest.TestFailureException("Cbuffer[{}] value {} doesn't match expectation {}".format(col, val, expected))
rdtest.log.success("Draw with {} colors uniform buffer is as expected".format(num_colors))
cbuf: rd.BoundCBuffer = pipe.GetConstantBuffer(rd.ShaderStage.Pixel, 1, 0)
cb_vars = self.controller.GetCBufferVariableContents(pipe.GetGraphicsPipelineObject(),
pipe.GetShader(rd.ShaderStage.Pixel),
pipe.GetShaderEntryPoint(rd.ShaderStage.Pixel), 1,
cbuf.resourceId, cbuf.byteOffset)
self.check(len(cb_vars) == 1)
if not rdtest.value_compare(cb_vars[0].value.i.x, num_colors):
raise rdtest.TestFailureException("Spec constant is {}, not {}".format(cb_vars[0].value.i.x, num_colors))
rdtest.log.success("Draw with {} colors specialisation constant is as expected".format(num_colors))
view = pipe.GetViewport(0)
# the first num_colors components should be 0.6, the rest should be 0.1 (alpha is always 1.0)
expected = [0.0, 0.0, 0.0, 1.0]
for col in range(num_colors):
expected[col] += 1.0
# Sample the centre of the viewport
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, int(view.x) + int(view.width / 2), int(view.height / 2), expected)
rdtest.log.success("Draw with {} colors picked value is as expected".format(num_colors))
draw = draw.next
| 48.142857 | 142 | 0.573442 | import renderdoc as rd
import rdtest
class VK_Spec_Constants(rdtest.TestCase):
demos_test_name = 'VK_Spec_Constants'
def check_capture(self):
draw = self.find_draw("Draw")
for num_colors in range(4):
self.check(draw is not None)
self.controller.SetFrameEvent(draw.eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
shader: rd.ShaderReflection = pipe.GetShaderReflection(rd.ShaderStage.Pixel)
self.check(len(shader.constantBlocks) == 2)
self.check(shader.constantBlocks[0].bufferBacked)
self.check(not shader.constantBlocks[1].bufferBacked)
self.check(len(shader.constantBlocks[1].variables) == 1)
array_len = shader.constantBlocks[0].variables[0].type.descriptor.elements
if not rdtest.value_compare(array_len, num_colors+1):
raise rdtest.TestFailureException("CBuffer variable is array of {}, not {}".format(array_len, num_colors+1))
if num_colors > 0:
cbuf: rd.BoundCBuffer = pipe.GetConstantBuffer(rd.ShaderStage.Pixel, 0, 0)
cb_vars = self.controller.GetCBufferVariableContents(pipe.GetGraphicsPipelineObject(),
pipe.GetShader(rd.ShaderStage.Pixel),
pipe.GetShaderEntryPoint(rd.ShaderStage.Pixel), 0,
cbuf.resourceId, cbuf.byteOffset)
self.check(len(cb_vars) == 1)
if not rdtest.value_compare(len(cb_vars[0].members), num_colors+1):
raise rdtest.TestFailureException("CBuffer variable is array of {}, not {}".format(len(cb_vars[0].members), num_colors+1))
for col in range(num_colors):
expected = [0.0, 0.0, 0.0, 0.0]
expected[col] = 1.0
val = [i for i in cb_vars[0].members[col].value.fv[0:4]]
if not rdtest.value_compare(val, expected):
raise rdtest.TestFailureException("Cbuffer[{}] value {} doesn't match expectation {}".format(col, val, expected))
rdtest.log.success("Draw with {} colors uniform buffer is as expected".format(num_colors))
cbuf: rd.BoundCBuffer = pipe.GetConstantBuffer(rd.ShaderStage.Pixel, 1, 0)
cb_vars = self.controller.GetCBufferVariableContents(pipe.GetGraphicsPipelineObject(),
pipe.GetShader(rd.ShaderStage.Pixel),
pipe.GetShaderEntryPoint(rd.ShaderStage.Pixel), 1,
cbuf.resourceId, cbuf.byteOffset)
self.check(len(cb_vars) == 1)
if not rdtest.value_compare(cb_vars[0].value.i.x, num_colors):
raise rdtest.TestFailureException("Spec constant is {}, not {}".format(cb_vars[0].value.i.x, num_colors))
rdtest.log.success("Draw with {} colors specialisation constant is as expected".format(num_colors))
view = pipe.GetViewport(0)
# the first num_colors components should be 0.6, the rest should be 0.1 (alpha is always 1.0)
expected = [0.0, 0.0, 0.0, 1.0]
for col in range(num_colors):
expected[col] += 1.0
# Sample the centre of the viewport
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, int(view.x) + int(view.width / 2), int(view.height / 2), expected)
rdtest.log.success("Draw with {} colors picked value is as expected".format(num_colors))
draw = draw.next
| true | true |
f7fd277f580a32bad850ed6a6ba1d434b9a0005b | 3,095 | py | Python | src/sentry/api/endpoints/sentry_app_interaction.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/sentry_app_interaction.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/api/endpoints/sentry_app_interaction.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from rest_framework.response import Response
import logging
from sentry import tsdb
from sentry.api.base import StatsMixin
from sentry.api.bases import SentryAppBaseEndpoint, SentryAppStatsPermission
logger = logging.getLogger(__name__)
TSDB_MODELS = [tsdb.models.sentry_app_viewed, tsdb.models.sentry_app_component_interacted]
COMPONENT_TYPES = ["stacktrace-link", "issue-link"]
def get_component_interaction_key(sentry_app, component_type):
return "%s:%s" % (sentry_app.slug, component_type)
class SentryAppInteractionEndpoint(SentryAppBaseEndpoint, StatsMixin):
permission_classes = (SentryAppStatsPermission,)
def get(self, request, sentry_app):
"""
:qparam float since
:qparam float until
:qparam resolution - optional
"""
views = tsdb.get_range(
model=tsdb.models.sentry_app_viewed, keys=[sentry_app.id], **self._parse_args(request)
)[sentry_app.id]
component_interactions = tsdb.get_range(
model=tsdb.models.sentry_app_component_interacted,
keys=[
get_component_interaction_key(sentry_app, component.type)
for component in sentry_app.components.all()
],
**self._parse_args(request)
)
return Response(
{
"views": views,
"componentInteractions": {
k.split(":")[1]: v for k, v in component_interactions.items()
},
}
)
def post(self, request, sentry_app):
"""
Increment a TSDB metric relating to Sentry App interactions
:param string tsdbField the name of the TSDB model to increment
:param string componentType required for 'sentry_app_component_interacted' metric
"""
# Request should have identifier field stored in TSDBModel
tsdb_field = request.data.get("tsdbField", "")
model = getattr(tsdb.models, tsdb_field, None)
if model is None or model not in TSDB_MODELS:
return Response(
{
"detail": "The tsdbField must be one of: sentry_app_viewed, sentry_app_component_interacted"
},
status=400,
)
if model == tsdb.models.sentry_app_component_interacted:
component_type = request.data.get("componentType", None)
if component_type is None or component_type not in COMPONENT_TYPES:
return Response(
{
"detail": "The field componentType is required and must be one of %s"
% (COMPONENT_TYPES)
},
status=400,
)
key = get_component_interaction_key(sentry_app, request.data["componentType"])
elif model == tsdb.models.sentry_app_viewed:
key = sentry_app.id
# Timestamp is automatically created
tsdb.incr(model, key)
return Response({}, status=201)
| 34.010989 | 112 | 0.615832 | from __future__ import absolute_import
from rest_framework.response import Response
import logging
from sentry import tsdb
from sentry.api.base import StatsMixin
from sentry.api.bases import SentryAppBaseEndpoint, SentryAppStatsPermission
logger = logging.getLogger(__name__)
TSDB_MODELS = [tsdb.models.sentry_app_viewed, tsdb.models.sentry_app_component_interacted]
COMPONENT_TYPES = ["stacktrace-link", "issue-link"]
def get_component_interaction_key(sentry_app, component_type):
return "%s:%s" % (sentry_app.slug, component_type)
class SentryAppInteractionEndpoint(SentryAppBaseEndpoint, StatsMixin):
permission_classes = (SentryAppStatsPermission,)
def get(self, request, sentry_app):
views = tsdb.get_range(
model=tsdb.models.sentry_app_viewed, keys=[sentry_app.id], **self._parse_args(request)
)[sentry_app.id]
component_interactions = tsdb.get_range(
model=tsdb.models.sentry_app_component_interacted,
keys=[
get_component_interaction_key(sentry_app, component.type)
for component in sentry_app.components.all()
],
**self._parse_args(request)
)
return Response(
{
"views": views,
"componentInteractions": {
k.split(":")[1]: v for k, v in component_interactions.items()
},
}
)
def post(self, request, sentry_app):
tsdb_field = request.data.get("tsdbField", "")
model = getattr(tsdb.models, tsdb_field, None)
if model is None or model not in TSDB_MODELS:
return Response(
{
"detail": "The tsdbField must be one of: sentry_app_viewed, sentry_app_component_interacted"
},
status=400,
)
if model == tsdb.models.sentry_app_component_interacted:
component_type = request.data.get("componentType", None)
if component_type is None or component_type not in COMPONENT_TYPES:
return Response(
{
"detail": "The field componentType is required and must be one of %s"
% (COMPONENT_TYPES)
},
status=400,
)
key = get_component_interaction_key(sentry_app, request.data["componentType"])
elif model == tsdb.models.sentry_app_viewed:
key = sentry_app.id
tsdb.incr(model, key)
return Response({}, status=201)
| true | true |
f7fd279aeef51f05dc4d9169a5bc770a85137db0 | 2,128 | py | Python | daemon/pidfile.py | khorark/hostingMonitor | d7d401f164185a0499cfcc1312af809e4a52fb6a | [
"PSF-2.0"
] | 15 | 2019-02-25T09:21:28.000Z | 2022-02-13T02:43:36.000Z | daemon/pidfile.py | khorark/hostingMonitor | d7d401f164185a0499cfcc1312af809e4a52fb6a | [
"PSF-2.0"
] | 1 | 2018-07-19T11:17:28.000Z | 2018-08-04T05:40:03.000Z | daemon/pidfile.py | khorark/hostingMonitor | d7d401f164185a0499cfcc1312af809e4a52fb6a | [
"PSF-2.0"
] | 11 | 2019-12-26T15:24:35.000Z | 2022-03-04T03:26:16.000Z | # -*- coding: utf-8 -*-
# daemon/pidfile.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2008–2016 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import (absolute_import, unicode_literals)
from lockfile.pidlockfile import PIDLockFile
class TimeoutPIDLockFile(PIDLockFile, object):
""" Lockfile with default timeout, implemented as a Unix PID file.
This uses the ``PIDLockFile`` implementation, with the
following changes:
* The `acquire_timeout` parameter to the initialiser will be
used as the default `timeout` parameter for the `acquire`
method.
"""
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
""" Set up the parameters of a TimeoutPIDLockFile.
:param path: Filesystem path to the PID file.
:param acquire_timeout: Value to use by default for the
`acquire` call.
:return: ``None``.
"""
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
""" Acquire the lock.
:param timeout: Specifies the timeout; see below for valid
values.
:return: ``None``.
The `timeout` defaults to the value set during
initialisation with the `acquire_timeout` parameter. It is
passed to `PIDLockFile.acquire`; see that method for
details.
"""
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
| 31.294118 | 77 | 0.646617 |
from __future__ import (absolute_import, unicode_literals)
from lockfile.pidlockfile import PIDLockFile
class TimeoutPIDLockFile(PIDLockFile, object):
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
| true | true |
f7fd27a12c8b0532d73202e650be75a041b17fe9 | 26,342 | py | Python | sdk/python/pulumi_azure_native/containerregistry/v20191201preview/registry.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/containerregistry/v20191201preview/registry.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/containerregistry/v20191201preview/registry.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RegistryArgs', 'Registry']
@pulumi.input_type
class RegistryArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['SkuArgs'],
admin_user_enabled: Optional[pulumi.Input[bool]] = None,
data_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input['EncryptionPropertyArgs']] = None,
identity: Optional[pulumi.Input['IdentityPropertiesArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule_bypass_options: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]] = None,
network_rule_set: Optional[pulumi.Input['NetworkRuleSetArgs']] = None,
policies: Optional[pulumi.Input['PoliciesArgs']] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Registry resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input['SkuArgs'] sku: The SKU of the container registry.
:param pulumi.Input[bool] admin_user_enabled: The value that indicates whether the admin user is enabled.
:param pulumi.Input[bool] data_endpoint_enabled: Enable a single data endpoint per region for serving data.
:param pulumi.Input['EncryptionPropertyArgs'] encryption: The encryption settings of container registry.
:param pulumi.Input['IdentityPropertiesArgs'] identity: The identity of the container registry.
:param pulumi.Input[str] location: The location of the resource. This cannot be changed after the resource is created.
:param pulumi.Input[Union[str, 'NetworkRuleBypassOptions']] network_rule_bypass_options: Whether to allow trusted Azure services to access a network restricted registry.
:param pulumi.Input['NetworkRuleSetArgs'] network_rule_set: The network rule set for a container registry.
:param pulumi.Input['PoliciesArgs'] policies: The policies for a container registry.
:param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether or not public network access is allowed for the container registry.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
if admin_user_enabled is None:
admin_user_enabled = False
if admin_user_enabled is not None:
pulumi.set(__self__, "admin_user_enabled", admin_user_enabled)
if data_endpoint_enabled is not None:
pulumi.set(__self__, "data_endpoint_enabled", data_endpoint_enabled)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if network_rule_bypass_options is None:
network_rule_bypass_options = 'AzureServices'
if network_rule_bypass_options is not None:
pulumi.set(__self__, "network_rule_bypass_options", network_rule_bypass_options)
if network_rule_set is not None:
pulumi.set(__self__, "network_rule_set", network_rule_set)
if policies is not None:
pulumi.set(__self__, "policies", policies)
if public_network_access is None:
public_network_access = 'Enabled'
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if registry_name is not None:
pulumi.set(__self__, "registry_name", registry_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group to which the container registry belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['SkuArgs']:
"""
The SKU of the container registry.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['SkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="adminUserEnabled")
def admin_user_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value that indicates whether the admin user is enabled.
"""
return pulumi.get(self, "admin_user_enabled")
@admin_user_enabled.setter
def admin_user_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "admin_user_enabled", value)
@property
@pulumi.getter(name="dataEndpointEnabled")
def data_endpoint_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable a single data endpoint per region for serving data.
"""
return pulumi.get(self, "data_endpoint_enabled")
@data_endpoint_enabled.setter
def data_endpoint_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_endpoint_enabled", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['EncryptionPropertyArgs']]:
"""
The encryption settings of container registry.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['EncryptionPropertyArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityPropertiesArgs']]:
"""
The identity of the container registry.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityPropertiesArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkRuleBypassOptions")
def network_rule_bypass_options(self) -> Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]]:
"""
Whether to allow trusted Azure services to access a network restricted registry.
"""
return pulumi.get(self, "network_rule_bypass_options")
@network_rule_bypass_options.setter
def network_rule_bypass_options(self, value: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]]):
pulumi.set(self, "network_rule_bypass_options", value)
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> Optional[pulumi.Input['NetworkRuleSetArgs']]:
"""
The network rule set for a container registry.
"""
return pulumi.get(self, "network_rule_set")
@network_rule_set.setter
def network_rule_set(self, value: Optional[pulumi.Input['NetworkRuleSetArgs']]):
pulumi.set(self, "network_rule_set", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input['PoliciesArgs']]:
"""
The policies for a container registry.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input['PoliciesArgs']]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]:
"""
Whether or not public network access is allowed for the container registry.
"""
return pulumi.get(self, "public_network_access")
@public_network_access.setter
def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]):
pulumi.set(self, "public_network_access", value)
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the container registry.
"""
return pulumi.get(self, "registry_name")
@registry_name.setter
def registry_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registry_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Registry(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_user_enabled: Optional[pulumi.Input[bool]] = None,
data_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionPropertyArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule_bypass_options: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]] = None,
network_rule_set: Optional[pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['PoliciesArgs']]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
An object that represents a container registry.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_user_enabled: The value that indicates whether the admin user is enabled.
:param pulumi.Input[bool] data_endpoint_enabled: Enable a single data endpoint per region for serving data.
:param pulumi.Input[pulumi.InputType['EncryptionPropertyArgs']] encryption: The encryption settings of container registry.
:param pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']] identity: The identity of the container registry.
:param pulumi.Input[str] location: The location of the resource. This cannot be changed after the resource is created.
:param pulumi.Input[Union[str, 'NetworkRuleBypassOptions']] network_rule_bypass_options: Whether to allow trusted Azure services to access a network restricted registry.
:param pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']] network_rule_set: The network rule set for a container registry.
:param pulumi.Input[pulumi.InputType['PoliciesArgs']] policies: The policies for a container registry.
:param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether or not public network access is allowed for the container registry.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The SKU of the container registry.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegistryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An object that represents a container registry.
:param str resource_name: The name of the resource.
:param RegistryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegistryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_user_enabled: Optional[pulumi.Input[bool]] = None,
data_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionPropertyArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule_bypass_options: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]] = None,
network_rule_set: Optional[pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['PoliciesArgs']]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegistryArgs.__new__(RegistryArgs)
if admin_user_enabled is None:
admin_user_enabled = False
__props__.__dict__["admin_user_enabled"] = admin_user_enabled
__props__.__dict__["data_endpoint_enabled"] = data_endpoint_enabled
__props__.__dict__["encryption"] = encryption
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
if network_rule_bypass_options is None:
network_rule_bypass_options = 'AzureServices'
__props__.__dict__["network_rule_bypass_options"] = network_rule_bypass_options
__props__.__dict__["network_rule_set"] = network_rule_set
__props__.__dict__["policies"] = policies
if public_network_access is None:
public_network_access = 'Enabled'
__props__.__dict__["public_network_access"] = public_network_access
__props__.__dict__["registry_name"] = registry_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["creation_date"] = None
__props__.__dict__["data_endpoint_host_names"] = None
__props__.__dict__["login_server"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerregistry/v20191201preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20160627preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20160627preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20170301:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20170301:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20170601preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20170601preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20171001:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20171001:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20190501:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20190501:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20201101preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20201101preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20210601preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20210601preview:Registry")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Registry, __self__).__init__(
'azure-native:containerregistry/v20191201preview:Registry',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Registry':
"""
Get an existing Registry resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RegistryArgs.__new__(RegistryArgs)
__props__.__dict__["admin_user_enabled"] = None
__props__.__dict__["creation_date"] = None
__props__.__dict__["data_endpoint_enabled"] = None
__props__.__dict__["data_endpoint_host_names"] = None
__props__.__dict__["encryption"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["login_server"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_rule_bypass_options"] = None
__props__.__dict__["network_rule_set"] = None
__props__.__dict__["policies"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_network_access"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Registry(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminUserEnabled")
def admin_user_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
The value that indicates whether the admin user is enabled.
"""
return pulumi.get(self, "admin_user_enabled")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the container registry in ISO8601 format.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="dataEndpointEnabled")
def data_endpoint_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Enable a single data endpoint per region for serving data.
"""
return pulumi.get(self, "data_endpoint_enabled")
@property
@pulumi.getter(name="dataEndpointHostNames")
def data_endpoint_host_names(self) -> pulumi.Output[Sequence[str]]:
"""
List of host names that will serve data when dataEndpointEnabled is true.
"""
return pulumi.get(self, "data_endpoint_host_names")
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[Optional['outputs.EncryptionPropertyResponse']]:
"""
The encryption settings of container registry.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityPropertiesResponse']]:
"""
The identity of the container registry.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="loginServer")
def login_server(self) -> pulumi.Output[str]:
"""
The URL that can be used to log into the container registry.
"""
return pulumi.get(self, "login_server")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkRuleBypassOptions")
def network_rule_bypass_options(self) -> pulumi.Output[Optional[str]]:
"""
Whether to allow trusted Azure services to access a network restricted registry.
"""
return pulumi.get(self, "network_rule_bypass_options")
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> pulumi.Output[Optional['outputs.NetworkRuleSetResponse']]:
"""
The network rule set for a container registry.
"""
return pulumi.get(self, "network_rule_set")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Optional['outputs.PoliciesResponse']]:
"""
The policies for a container registry.
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]:
"""
List of private endpoint connections for a container registry.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the container registry at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> pulumi.Output[Optional[str]]:
"""
Whether or not public network access is allowed for the container registry.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.SkuResponse']:
"""
The SKU of the container registry.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StatusResponse']:
"""
The status of the container registry at the time the operation was called.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| 46.955437 | 1,346 | 0.667451 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RegistryArgs', 'Registry']
@pulumi.input_type
class RegistryArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['SkuArgs'],
admin_user_enabled: Optional[pulumi.Input[bool]] = None,
data_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input['EncryptionPropertyArgs']] = None,
identity: Optional[pulumi.Input['IdentityPropertiesArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule_bypass_options: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]] = None,
network_rule_set: Optional[pulumi.Input['NetworkRuleSetArgs']] = None,
policies: Optional[pulumi.Input['PoliciesArgs']] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
if admin_user_enabled is None:
admin_user_enabled = False
if admin_user_enabled is not None:
pulumi.set(__self__, "admin_user_enabled", admin_user_enabled)
if data_endpoint_enabled is not None:
pulumi.set(__self__, "data_endpoint_enabled", data_endpoint_enabled)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if network_rule_bypass_options is None:
network_rule_bypass_options = 'AzureServices'
if network_rule_bypass_options is not None:
pulumi.set(__self__, "network_rule_bypass_options", network_rule_bypass_options)
if network_rule_set is not None:
pulumi.set(__self__, "network_rule_set", network_rule_set)
if policies is not None:
pulumi.set(__self__, "policies", policies)
if public_network_access is None:
public_network_access = 'Enabled'
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if registry_name is not None:
pulumi.set(__self__, "registry_name", registry_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['SkuArgs']:
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['SkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="adminUserEnabled")
def admin_user_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "admin_user_enabled")
@admin_user_enabled.setter
def admin_user_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "admin_user_enabled", value)
@property
@pulumi.getter(name="dataEndpointEnabled")
def data_endpoint_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "data_endpoint_enabled")
@data_endpoint_enabled.setter
def data_endpoint_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_endpoint_enabled", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['EncryptionPropertyArgs']]:
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['EncryptionPropertyArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityPropertiesArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityPropertiesArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkRuleBypassOptions")
def network_rule_bypass_options(self) -> Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]]:
return pulumi.get(self, "network_rule_bypass_options")
@network_rule_bypass_options.setter
def network_rule_bypass_options(self, value: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]]):
pulumi.set(self, "network_rule_bypass_options", value)
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> Optional[pulumi.Input['NetworkRuleSetArgs']]:
return pulumi.get(self, "network_rule_set")
@network_rule_set.setter
def network_rule_set(self, value: Optional[pulumi.Input['NetworkRuleSetArgs']]):
pulumi.set(self, "network_rule_set", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input['PoliciesArgs']]:
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input['PoliciesArgs']]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]:
return pulumi.get(self, "public_network_access")
@public_network_access.setter
def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]):
pulumi.set(self, "public_network_access", value)
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "registry_name")
@registry_name.setter
def registry_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registry_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Registry(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_user_enabled: Optional[pulumi.Input[bool]] = None,
data_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionPropertyArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule_bypass_options: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]] = None,
network_rule_set: Optional[pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['PoliciesArgs']]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: RegistryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegistryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_user_enabled: Optional[pulumi.Input[bool]] = None,
data_endpoint_enabled: Optional[pulumi.Input[bool]] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['EncryptionPropertyArgs']]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_rule_bypass_options: Optional[pulumi.Input[Union[str, 'NetworkRuleBypassOptions']]] = None,
network_rule_set: Optional[pulumi.Input[pulumi.InputType['NetworkRuleSetArgs']]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['PoliciesArgs']]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegistryArgs.__new__(RegistryArgs)
if admin_user_enabled is None:
admin_user_enabled = False
__props__.__dict__["admin_user_enabled"] = admin_user_enabled
__props__.__dict__["data_endpoint_enabled"] = data_endpoint_enabled
__props__.__dict__["encryption"] = encryption
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
if network_rule_bypass_options is None:
network_rule_bypass_options = 'AzureServices'
__props__.__dict__["network_rule_bypass_options"] = network_rule_bypass_options
__props__.__dict__["network_rule_set"] = network_rule_set
__props__.__dict__["policies"] = policies
if public_network_access is None:
public_network_access = 'Enabled'
__props__.__dict__["public_network_access"] = public_network_access
__props__.__dict__["registry_name"] = registry_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["creation_date"] = None
__props__.__dict__["data_endpoint_host_names"] = None
__props__.__dict__["login_server"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerregistry/v20191201preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20160627preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20160627preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20170301:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20170301:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20170601preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20170601preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20171001:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20171001:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20190501:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20190501:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20201101preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20201101preview:Registry"), pulumi.Alias(type_="azure-native:containerregistry/v20210601preview:Registry"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20210601preview:Registry")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Registry, __self__).__init__(
'azure-native:containerregistry/v20191201preview:Registry',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Registry':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RegistryArgs.__new__(RegistryArgs)
__props__.__dict__["admin_user_enabled"] = None
__props__.__dict__["creation_date"] = None
__props__.__dict__["data_endpoint_enabled"] = None
__props__.__dict__["data_endpoint_host_names"] = None
__props__.__dict__["encryption"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["login_server"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_rule_bypass_options"] = None
__props__.__dict__["network_rule_set"] = None
__props__.__dict__["policies"] = None
__props__.__dict__["private_endpoint_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_network_access"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Registry(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminUserEnabled")
def admin_user_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "admin_user_enabled")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="dataEndpointEnabled")
def data_endpoint_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "data_endpoint_enabled")
@property
@pulumi.getter(name="dataEndpointHostNames")
def data_endpoint_host_names(self) -> pulumi.Output[Sequence[str]]:
return pulumi.get(self, "data_endpoint_host_names")
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[Optional['outputs.EncryptionPropertyResponse']]:
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityPropertiesResponse']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter(name="loginServer")
def login_server(self) -> pulumi.Output[str]:
return pulumi.get(self, "login_server")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkRuleBypassOptions")
def network_rule_bypass_options(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "network_rule_bypass_options")
@property
@pulumi.getter(name="networkRuleSet")
def network_rule_set(self) -> pulumi.Output[Optional['outputs.NetworkRuleSetResponse']]:
return pulumi.get(self, "network_rule_set")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Optional['outputs.PoliciesResponse']]:
return pulumi.get(self, "policies")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> pulumi.Output[Sequence['outputs.PrivateEndpointConnectionResponse']]:
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.SkuResponse']:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.StatusResponse']:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true | true |
f7fd2823411e2855af0b3f2564fa84386d9e3031 | 6,105 | py | Python | tensornetwork/contractors/opt_einsum_paths/path_contractors_node_test.py | jensenjhwang/TensorNetwork | 35d1247cc3fb80768965f7429ac9b8b914a144a8 | [
"Apache-2.0"
] | 1 | 2020-02-17T00:12:30.000Z | 2020-02-17T00:12:30.000Z | tensornetwork/contractors/opt_einsum_paths/path_contractors_node_test.py | jensenjhwang/TensorNetwork | 35d1247cc3fb80768965f7429ac9b8b914a144a8 | [
"Apache-2.0"
] | null | null | null | tensornetwork/contractors/opt_einsum_paths/path_contractors_node_test.py | jensenjhwang/TensorNetwork | 35d1247cc3fb80768965f7429ac9b8b914a144a8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from tensornetwork import Node
from tensornetwork.contractors import auto
from tensornetwork.contractors.opt_einsum_paths import path_contractors
@pytest.fixture(
name="path_algorithm", params=["optimal", "branch", "greedy", "auto"])
def path_algorithm_fixture(request):
return getattr(path_contractors, request.param)
def test_sanity_check(backend, path_algorithm):
a = Node(np.eye(2), backend=backend)
b = Node(np.ones((2, 7, 11)), backend=backend)
c = Node(np.ones((7, 11, 13, 2)), backend=backend)
d = Node(np.eye(13), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ b[0]
b[1] ^ c[0]
b[2] ^ c[1]
c[2] ^ d[1]
c[3] ^ a[1]
nodes = [a, b, c, d]
final_node = path_algorithm(nodes)
assert final_node.shape == (13,)
def test_trace_edge(backend, path_algorithm):
a = Node(np.ones((2, 2, 2, 2, 2)), backend=backend)
b = Node(np.ones((2, 2, 2)), backend=backend)
c = Node(np.ones((2, 2, 2)), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ a[1]
a[2] ^ b[0]
a[3] ^ c[0]
b[1] ^ c[1]
b[2] ^ c[2]
nodes = [a, b, c]
node = path_algorithm(nodes)
np.testing.assert_allclose(node.tensor, np.ones(2) * 32.0)
def test_single_node(backend, path_algorithm):
a = Node(np.ones((2, 2, 2)), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ a[1]
nodes = [a]
node = path_algorithm(nodes)
np.testing.assert_allclose(node.tensor, np.ones(2) * 2.0)
def test_custom_sanity_check(backend):
a = Node(np.ones(2), backend=backend)
b = Node(np.ones((2, 5)), backend=backend)
# pylint: disable=pointless-statement
a[0] ^ b[0]
nodes = [a, b]
class PathOptimizer:
def __call__(self, inputs, output, size_dict, memory_limit=None):
return [(0, 1)]
optimizer = PathOptimizer()
final_node = path_contractors.custom(nodes, optimizer)
np.testing.assert_allclose(final_node.tensor, np.ones(5) * 2.0)
def test_subgraph_contraction(backend, path_algorithm):
a_tensor = np.arange(4).reshape((2, 2))
b_tensor = np.arange(4).reshape((2, 2)) + 10
c_tensor = np.arange(4).reshape((2, 2)) + 20
a = Node(a_tensor, backend=backend)
b = Node(b_tensor, backend=backend)
c = Node(c_tensor, backend=backend)
a[0] ^ b[1]
c[1] ^ b[0]
remaining_edges = [c[0], a[1]]
result = path_algorithm({a, b}, [b[0], a[1]])
np.testing.assert_allclose(result.tensor, b_tensor @ a_tensor)
final = (c @ result).reorder_edges(remaining_edges)
np.testing.assert_allclose(final.tensor, c_tensor @ b_tensor @ a_tensor)
def test_multiple_partial_contractions(backend, path_algorithm):
a_tensor = np.arange(4).reshape((2, 2))
b_tensor = np.arange(4).reshape((2, 2)) + 10
c_tensor = np.arange(4).reshape((2, 2)) + 20
d_tensor = np.arange(4).reshape((2, 2)) + 30
a = Node(a_tensor, backend=backend)
b = Node(b_tensor, backend=backend)
c = Node(c_tensor, backend=backend)
d = Node(d_tensor, backend=backend)
a[1] ^ b[0]
b[1] ^ c[0]
c[1] ^ d[0]
d[1] ^ a[0]
ab = path_algorithm({a, b}, [a[0], b[1]])
np.testing.assert_allclose(ab.tensor, a_tensor @ b_tensor)
cd = path_algorithm({c, d}, [c[0], d[1]])
np.testing.assert_allclose(cd.tensor, c_tensor @ d_tensor)
result = path_algorithm({ab, cd})
np.testing.assert_allclose(
result.tensor, np.trace(a_tensor @ b_tensor @ c_tensor @ d_tensor))
def test_single_node_reorder(backend, path_algorithm):
a = Node(np.arange(4).reshape((2, 2)), backend=backend)
expected_edge_order = [a[1], a[0]]
result = path_algorithm({a}, expected_edge_order)
assert result.edges == expected_edge_order
np.testing.assert_allclose(result.tensor, np.arange(4).reshape((2, 2)).T)
def test_ignore_edge_order(backend, path_algorithm):
a = Node(np.ones((1, 1, 1)), backend=backend)
b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)
a[0] ^ b[0]
a[1] ^ b[1]
a[2] ^ b[2]
e0 = b[3]
e1 = b[4]
final_node = path_algorithm({a, b},
ignore_edge_order=True)
assert set(final_node.edges) == {e0, e1}
def test_ignore_edge_order_with_order(backend, path_algorithm):
a = Node(np.ones((1, 1, 1)), backend=backend)
b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)
a[0] ^ b[0]
a[1] ^ b[1]
a[2] ^ b[2]
e0 = b[3]
e1 = b[4]
final_node = path_algorithm({a, b},
[e1, e0],
ignore_edge_order=True)
assert set(final_node.edges) == {e0, e1}
def test_disconnected_network(backend, path_algorithm):
a = Node(np.eye(2), backend=backend)
b = Node(np.eye(2), backend=backend)
c = Node(np.eye(2), backend=backend)
d = Node(np.eye(2), backend=backend)
e = Node(np.eye(2), backend=backend)
f = Node(np.eye(2), backend=backend)
g = Node(np.eye(2), backend=backend)
a[1] ^ b[0]
c[0] ^ d[1]
e[0] ^ f[0]
g[0] ^ f[1]
final_edges = [a[0], b[1], c[1], d[0], e[1], g[1]]
result = path_algorithm(
{a, b, c, d, e, f, g},
final_edges)
assert result.edges == final_edges
def test_passes_ignore_edge_order_from_auto(backend):
a = Node(np.eye(2), backend=backend)
b = Node(np.eye(2), backend=backend)
c = Node(np.eye(2), backend=backend)
d = Node(np.eye(2), backend=backend)
e = Node(np.eye(2), backend=backend)
# pylint: disable=pointless-statement
a[1] ^ b[0]
c[0] ^ d[1]
c[1] ^ e[0]
nodes = [a, b, c, d, e]
try:
auto(nodes, ignore_edge_order=True)
except ValueError:
pytest.fail("auto should pass ignore_edge_order when n >= 5 && n < 7")
| 30.373134 | 75 | 0.656839 |
import numpy as np
import pytest
from tensornetwork import Node
from tensornetwork.contractors import auto
from tensornetwork.contractors.opt_einsum_paths import path_contractors
@pytest.fixture(
name="path_algorithm", params=["optimal", "branch", "greedy", "auto"])
def path_algorithm_fixture(request):
return getattr(path_contractors, request.param)
def test_sanity_check(backend, path_algorithm):
a = Node(np.eye(2), backend=backend)
b = Node(np.ones((2, 7, 11)), backend=backend)
c = Node(np.ones((7, 11, 13, 2)), backend=backend)
d = Node(np.eye(13), backend=backend)
a[0] ^ b[0]
b[1] ^ c[0]
b[2] ^ c[1]
c[2] ^ d[1]
c[3] ^ a[1]
nodes = [a, b, c, d]
final_node = path_algorithm(nodes)
assert final_node.shape == (13,)
def test_trace_edge(backend, path_algorithm):
a = Node(np.ones((2, 2, 2, 2, 2)), backend=backend)
b = Node(np.ones((2, 2, 2)), backend=backend)
c = Node(np.ones((2, 2, 2)), backend=backend)
a[0] ^ a[1]
a[2] ^ b[0]
a[3] ^ c[0]
b[1] ^ c[1]
b[2] ^ c[2]
nodes = [a, b, c]
node = path_algorithm(nodes)
np.testing.assert_allclose(node.tensor, np.ones(2) * 32.0)
def test_single_node(backend, path_algorithm):
a = Node(np.ones((2, 2, 2)), backend=backend)
a[0] ^ a[1]
nodes = [a]
node = path_algorithm(nodes)
np.testing.assert_allclose(node.tensor, np.ones(2) * 2.0)
def test_custom_sanity_check(backend):
a = Node(np.ones(2), backend=backend)
b = Node(np.ones((2, 5)), backend=backend)
a[0] ^ b[0]
nodes = [a, b]
class PathOptimizer:
def __call__(self, inputs, output, size_dict, memory_limit=None):
return [(0, 1)]
optimizer = PathOptimizer()
final_node = path_contractors.custom(nodes, optimizer)
np.testing.assert_allclose(final_node.tensor, np.ones(5) * 2.0)
def test_subgraph_contraction(backend, path_algorithm):
a_tensor = np.arange(4).reshape((2, 2))
b_tensor = np.arange(4).reshape((2, 2)) + 10
c_tensor = np.arange(4).reshape((2, 2)) + 20
a = Node(a_tensor, backend=backend)
b = Node(b_tensor, backend=backend)
c = Node(c_tensor, backend=backend)
a[0] ^ b[1]
c[1] ^ b[0]
remaining_edges = [c[0], a[1]]
result = path_algorithm({a, b}, [b[0], a[1]])
np.testing.assert_allclose(result.tensor, b_tensor @ a_tensor)
final = (c @ result).reorder_edges(remaining_edges)
np.testing.assert_allclose(final.tensor, c_tensor @ b_tensor @ a_tensor)
def test_multiple_partial_contractions(backend, path_algorithm):
a_tensor = np.arange(4).reshape((2, 2))
b_tensor = np.arange(4).reshape((2, 2)) + 10
c_tensor = np.arange(4).reshape((2, 2)) + 20
d_tensor = np.arange(4).reshape((2, 2)) + 30
a = Node(a_tensor, backend=backend)
b = Node(b_tensor, backend=backend)
c = Node(c_tensor, backend=backend)
d = Node(d_tensor, backend=backend)
a[1] ^ b[0]
b[1] ^ c[0]
c[1] ^ d[0]
d[1] ^ a[0]
ab = path_algorithm({a, b}, [a[0], b[1]])
np.testing.assert_allclose(ab.tensor, a_tensor @ b_tensor)
cd = path_algorithm({c, d}, [c[0], d[1]])
np.testing.assert_allclose(cd.tensor, c_tensor @ d_tensor)
result = path_algorithm({ab, cd})
np.testing.assert_allclose(
result.tensor, np.trace(a_tensor @ b_tensor @ c_tensor @ d_tensor))
def test_single_node_reorder(backend, path_algorithm):
a = Node(np.arange(4).reshape((2, 2)), backend=backend)
expected_edge_order = [a[1], a[0]]
result = path_algorithm({a}, expected_edge_order)
assert result.edges == expected_edge_order
np.testing.assert_allclose(result.tensor, np.arange(4).reshape((2, 2)).T)
def test_ignore_edge_order(backend, path_algorithm):
a = Node(np.ones((1, 1, 1)), backend=backend)
b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)
a[0] ^ b[0]
a[1] ^ b[1]
a[2] ^ b[2]
e0 = b[3]
e1 = b[4]
final_node = path_algorithm({a, b},
ignore_edge_order=True)
assert set(final_node.edges) == {e0, e1}
def test_ignore_edge_order_with_order(backend, path_algorithm):
a = Node(np.ones((1, 1, 1)), backend=backend)
b = Node(np.ones((1, 1, 1, 2, 3)), backend=backend)
a[0] ^ b[0]
a[1] ^ b[1]
a[2] ^ b[2]
e0 = b[3]
e1 = b[4]
final_node = path_algorithm({a, b},
[e1, e0],
ignore_edge_order=True)
assert set(final_node.edges) == {e0, e1}
def test_disconnected_network(backend, path_algorithm):
a = Node(np.eye(2), backend=backend)
b = Node(np.eye(2), backend=backend)
c = Node(np.eye(2), backend=backend)
d = Node(np.eye(2), backend=backend)
e = Node(np.eye(2), backend=backend)
f = Node(np.eye(2), backend=backend)
g = Node(np.eye(2), backend=backend)
a[1] ^ b[0]
c[0] ^ d[1]
e[0] ^ f[0]
g[0] ^ f[1]
final_edges = [a[0], b[1], c[1], d[0], e[1], g[1]]
result = path_algorithm(
{a, b, c, d, e, f, g},
final_edges)
assert result.edges == final_edges
def test_passes_ignore_edge_order_from_auto(backend):
a = Node(np.eye(2), backend=backend)
b = Node(np.eye(2), backend=backend)
c = Node(np.eye(2), backend=backend)
d = Node(np.eye(2), backend=backend)
e = Node(np.eye(2), backend=backend)
a[1] ^ b[0]
c[0] ^ d[1]
c[1] ^ e[0]
nodes = [a, b, c, d, e]
try:
auto(nodes, ignore_edge_order=True)
except ValueError:
pytest.fail("auto should pass ignore_edge_order when n >= 5 && n < 7")
| true | true |
f7fd28549b3dc76929f0dd90840802ceaab7d1d3 | 823 | py | Python | ckan/migration/versions/019_pkg_relationships_state.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 12 | 2015-08-28T16:59:07.000Z | 2020-03-08T01:39:30.000Z | ckan/migration/versions/019_pkg_relationships_state.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 13 | 2019-05-02T21:01:28.000Z | 2020-10-20T23:34:48.000Z | ckan/migration/versions/019_pkg_relationships_state.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 10 | 2015-05-08T04:33:20.000Z | 2020-03-03T15:17:58.000Z | from sqlalchemy import *
from migrate import *
import migrate.changeset
def upgrade(migrate_engine):
metadata = MetaData()
metadata.bind = migrate_engine
package_relationship_table = Table('package_relationship',
metadata, autoload=True)
package_relationship_revision_table = Table('package_relationship_revision',
metadata, autoload=True)
state_column = Column('state', UnicodeText)
state_column.create(package_relationship_table)
state_column = Column('state', UnicodeText)
state_column.create(package_relationship_revision_table)
# No package relationship objects exist to migrate, so no
# need to populate state column
def downgrade(migrate_engine):
raise NotImplementedError()
| 32.92 | 80 | 0.693803 | from sqlalchemy import *
from migrate import *
import migrate.changeset
def upgrade(migrate_engine):
metadata = MetaData()
metadata.bind = migrate_engine
package_relationship_table = Table('package_relationship',
metadata, autoload=True)
package_relationship_revision_table = Table('package_relationship_revision',
metadata, autoload=True)
state_column = Column('state', UnicodeText)
state_column.create(package_relationship_table)
state_column = Column('state', UnicodeText)
state_column.create(package_relationship_revision_table)
def downgrade(migrate_engine):
raise NotImplementedError()
| true | true |
f7fd2a3c1a5d0bb36005657256cc089cd952fa08 | 735 | gyp | Python | Dependencies/gyp-master/test/prune_targets/test2.gyp | knight666/exlibris | b21b46e0c84e5c4f81f8048022cda88e7bb3dca2 | [
"MIT"
] | null | null | null | Dependencies/gyp-master/test/prune_targets/test2.gyp | knight666/exlibris | b21b46e0c84e5c4f81f8048022cda88e7bb3dca2 | [
"MIT"
] | null | null | null | Dependencies/gyp-master/test/prune_targets/test2.gyp | knight666/exlibris | b21b46e0c84e5c4f81f8048022cda88e7bb3dca2 | [
"MIT"
] | null | null | null | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'lib1',
'type': 'static_library',
'sources': [ 'lib1.cc' ],
'dependencies': [ 'lib_indirect' ],
},
{
'target_name': 'lib2',
'type': 'static_library',
'sources': [ 'lib2.cc' ],
'dependencies': [ 'lib_indirect' ],
},
{
'target_name': 'lib3',
'type': 'static_library',
'sources': [ 'lib3.cc' ],
},
{
'target_name': 'lib_indirect',
'type': 'static_library',
'sources': [ 'lib_indirect.cc' ],
},
],
}
| 23.709677 | 73 | 0.506122 |
{
'targets': [
{
'target_name': 'lib1',
'type': 'static_library',
'sources': [ 'lib1.cc' ],
'dependencies': [ 'lib_indirect' ],
},
{
'target_name': 'lib2',
'type': 'static_library',
'sources': [ 'lib2.cc' ],
'dependencies': [ 'lib_indirect' ],
},
{
'target_name': 'lib3',
'type': 'static_library',
'sources': [ 'lib3.cc' ],
},
{
'target_name': 'lib_indirect',
'type': 'static_library',
'sources': [ 'lib_indirect.cc' ],
},
],
}
| true | true |
f7fd2c0275ccc8276d5b8a95673b4d0be8343ca6 | 5,738 | py | Python | web/application.py | nancy301513/xmind2testlink | 8345b6e157fa025ed019e4053f6d6c9a41636a2d | [
"MIT"
] | null | null | null | web/application.py | nancy301513/xmind2testlink | 8345b6e157fa025ed019e4053f6d6c9a41636a2d | [
"MIT"
] | null | null | null | web/application.py | nancy301513/xmind2testlink | 8345b6e157fa025ed019e4053f6d6c9a41636a2d | [
"MIT"
] | null | null | null | import os
import sqlite3
from contextlib import closing
from os.path import join, exists
import arrow
from flask import Flask, request, send_from_directory, g, render_template, abort, redirect, url_for
from werkzeug.utils import secure_filename
from xmind2testlink.main import xmind_to_suite, xmind_to_testlink
from xmind2testlink.sharedparser import flat_suite
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = ['xmind']
DEBUG = True
DATABASE = './data.db3'
HOST = '0.0.0.0'
V2 = True
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = os.urandom(32)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def init():
if not exists(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
if not exists(DATABASE):
init_db()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def insert_record(xmind_name, note=''):
c = g.db.cursor()
now = str(arrow.now())
sql = "INSERT INTO records (name,create_on,note) VALUES (?,?,?)"
c.execute(sql, (xmind_name, now, str(note)))
g.db.commit()
def delete_records(keep=20):
"""Clean up files on server and mark the record as deleted"""
sql = "SELECT * from records where is_deleted<>1 ORDER BY id desc LIMIT -1 offset {}".format(keep)
assert isinstance(g.db, sqlite3.Connection)
c = g.db.cursor()
c.execute(sql)
rows = c.fetchall()
for row in rows:
name = row[1]
xmind = join(app.config['UPLOAD_FOLDER'], name)
xml = join(app.config['UPLOAD_FOLDER'], name[:-5] + 'xml')
for f in [xmind, xml]:
if exists(f):
os.remove(f)
sql = 'UPDATE records SET is_deleted=1 WHERE id = ?'
c.execute(sql, (row[0],))
g.db.commit()
def get_latest_record():
found = list(get_records(1))
if found:
return found[0]
def get_records(limit=8):
short_name_length = 120 if V2 else 30
c = g.db.cursor()
sql = "select * from records where is_deleted<>1 order by id desc limit {}".format(int(limit))
c.execute(sql)
rows = c.fetchall()
for row in rows:
name, short_name, create_on, note = row[1], row[1], row[2], row[3]
# shorten the name for display
if len(name) > short_name_length:
short_name = name[:short_name_length] + '...'
# more readable time format
create_on = arrow.get(create_on).humanize()
yield short_name, name, create_on, note
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def save_file(file):
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
upload_to = join(app.config['UPLOAD_FOLDER'], filename)
if exists(upload_to):
filename = '{}_{}.xmind'.format(filename[:-6], arrow.now().strftime('%Y%m%d_%H%M%S'))
upload_to = join(app.config['UPLOAD_FOLDER'], filename)
file.save(upload_to)
insert_record(filename)
g.is_success = True
return filename
elif file.filename == '':
g.is_success = False
g.error = "Please select a file!"
else:
g.is_success = False
g.invalid_files.append(file.filename)
def verify_uploaded_files(files):
# download the xml directly if only 1 file uploaded
if len(files) == 1 and getattr(g, 'is_success', False):
g.download_xml = get_latest_record()[1]
if g.invalid_files:
g.error = "Invalid file: {}".format(','.join(g.invalid_files))
@app.route('/', methods=['GET', 'POST'])
def index(download_xml=None):
g.invalid_files = []
g.error = None
g.download_xml = download_xml
g.filename = None
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
g.filename = save_file(file)
verify_uploaded_files([file])
delete_records()
else:
g.upload_form = True
if V2:
if g.filename:
return redirect(url_for('preview_file', filename=g.filename))
else:
return render_template('v2/index.html', records=list(get_records()))
else:
return render_template('index.html', download_xml=g.download_xml, records=list(get_records()))
@app.route('/<filename>/to/testlink')
def download_file(filename):
full_path = join(app.config['UPLOAD_FOLDER'], filename)
if not exists(full_path):
abort(404)
xmind_to_testlink(full_path)
filename = filename[:-5] + 'xml'
return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True)
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route('/preview/<filename>')
def preview_file(filename):
full_path = join(app.config['UPLOAD_FOLDER'], filename)
if not exists(full_path):
abort(404)
suite = xmind_to_suite(full_path)
suite_count = len(suite.sub_suites)
suite = flat_suite(suite)
return render_template('v2/preview.html', name=filename, suite=suite, suite_count=suite_count)
@app.errorhandler(Exception)
def app_error(e):
return str(e)
init()
if __name__ == '__main__':
app.run(HOST, debug=DEBUG, port=5001)
| 26.081818 | 102 | 0.645173 | import os
import sqlite3
from contextlib import closing
from os.path import join, exists
import arrow
from flask import Flask, request, send_from_directory, g, render_template, abort, redirect, url_for
from werkzeug.utils import secure_filename
from xmind2testlink.main import xmind_to_suite, xmind_to_testlink
from xmind2testlink.sharedparser import flat_suite
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = ['xmind']
DEBUG = True
DATABASE = './data.db3'
HOST = '0.0.0.0'
V2 = True
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = os.urandom(32)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def init():
if not exists(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
if not exists(DATABASE):
init_db()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def insert_record(xmind_name, note=''):
c = g.db.cursor()
now = str(arrow.now())
sql = "INSERT INTO records (name,create_on,note) VALUES (?,?,?)"
c.execute(sql, (xmind_name, now, str(note)))
g.db.commit()
def delete_records(keep=20):
sql = "SELECT * from records where is_deleted<>1 ORDER BY id desc LIMIT -1 offset {}".format(keep)
assert isinstance(g.db, sqlite3.Connection)
c = g.db.cursor()
c.execute(sql)
rows = c.fetchall()
for row in rows:
name = row[1]
xmind = join(app.config['UPLOAD_FOLDER'], name)
xml = join(app.config['UPLOAD_FOLDER'], name[:-5] + 'xml')
for f in [xmind, xml]:
if exists(f):
os.remove(f)
sql = 'UPDATE records SET is_deleted=1 WHERE id = ?'
c.execute(sql, (row[0],))
g.db.commit()
def get_latest_record():
found = list(get_records(1))
if found:
return found[0]
def get_records(limit=8):
short_name_length = 120 if V2 else 30
c = g.db.cursor()
sql = "select * from records where is_deleted<>1 order by id desc limit {}".format(int(limit))
c.execute(sql)
rows = c.fetchall()
for row in rows:
name, short_name, create_on, note = row[1], row[1], row[2], row[3]
if len(name) > short_name_length:
short_name = name[:short_name_length] + '...'
create_on = arrow.get(create_on).humanize()
yield short_name, name, create_on, note
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def save_file(file):
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
upload_to = join(app.config['UPLOAD_FOLDER'], filename)
if exists(upload_to):
filename = '{}_{}.xmind'.format(filename[:-6], arrow.now().strftime('%Y%m%d_%H%M%S'))
upload_to = join(app.config['UPLOAD_FOLDER'], filename)
file.save(upload_to)
insert_record(filename)
g.is_success = True
return filename
elif file.filename == '':
g.is_success = False
g.error = "Please select a file!"
else:
g.is_success = False
g.invalid_files.append(file.filename)
def verify_uploaded_files(files):
if len(files) == 1 and getattr(g, 'is_success', False):
g.download_xml = get_latest_record()[1]
if g.invalid_files:
g.error = "Invalid file: {}".format(','.join(g.invalid_files))
@app.route('/', methods=['GET', 'POST'])
def index(download_xml=None):
g.invalid_files = []
g.error = None
g.download_xml = download_xml
g.filename = None
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
g.filename = save_file(file)
verify_uploaded_files([file])
delete_records()
else:
g.upload_form = True
if V2:
if g.filename:
return redirect(url_for('preview_file', filename=g.filename))
else:
return render_template('v2/index.html', records=list(get_records()))
else:
return render_template('index.html', download_xml=g.download_xml, records=list(get_records()))
@app.route('/<filename>/to/testlink')
def download_file(filename):
full_path = join(app.config['UPLOAD_FOLDER'], filename)
if not exists(full_path):
abort(404)
xmind_to_testlink(full_path)
filename = filename[:-5] + 'xml'
return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True)
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route('/preview/<filename>')
def preview_file(filename):
full_path = join(app.config['UPLOAD_FOLDER'], filename)
if not exists(full_path):
abort(404)
suite = xmind_to_suite(full_path)
suite_count = len(suite.sub_suites)
suite = flat_suite(suite)
return render_template('v2/preview.html', name=filename, suite=suite, suite_count=suite_count)
@app.errorhandler(Exception)
def app_error(e):
return str(e)
init()
if __name__ == '__main__':
app.run(HOST, debug=DEBUG, port=5001)
| true | true |
f7fd2c7b3875406c0082d068d4604f9815d68c42 | 1,142 | py | Python | audio_pouring/pickle/spilt_train_test_data.py | lianghongzhuo/AudioPouring | 25daabfb200eaab9f8fc269b8e882260f3bd6c6a | [
"MIT"
] | 6 | 2019-03-25T13:07:41.000Z | 2021-01-10T09:53:04.000Z | audio_pouring/pickle/spilt_train_test_data.py | lianghongzhuo/AudioPouring | 25daabfb200eaab9f8fc269b8e882260f3bd6c6a | [
"MIT"
] | 1 | 2020-11-19T17:43:57.000Z | 2020-11-19T17:43:57.000Z | audio_pouring/pickle/spilt_train_test_data.py | lianghongzhuo/AudioPouring | 25daabfb200eaab9f8fc269b8e882260f3bd6c6a | [
"MIT"
] | 5 | 2019-04-24T14:17:12.000Z | 2020-01-16T18:29:17.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description: move 20% of file from train to test
# Date : 20/03/2019: 1:30 PM
# File Name : spilt_train_test_data
import glob
import os
import shutil
if __name__ == "__main__":
for bottle in [1, 3, 4]:
pickle = glob.glob(os.path.join("./pickle_train_" + str(bottle), "*.pickle"))
pickle_test = glob.glob(os.path.join("./pickle_test_" + str(bottle), "*.pickle"))
pickle_num = len(pickle)
pickle_test_num = len(pickle_test)
if pickle_test_num == 0:
if pickle_num != 0:
train_num = int(pickle_num * 0.8)
test_num = pickle_num - train_num
for test_file in pickle[:test_num]:
shutil.move(test_file, "pickle_test_" + str(bottle) + "/.")
print("Done, bottle {}".format(bottle))
else:
print("Pickle train folder for bottle {} is empty".format(bottle))
else:
print("Pickle test folder for bottle {} is not empty".format(bottle))
| 39.37931 | 89 | 0.582312 |
import glob
import os
import shutil
if __name__ == "__main__":
for bottle in [1, 3, 4]:
pickle = glob.glob(os.path.join("./pickle_train_" + str(bottle), "*.pickle"))
pickle_test = glob.glob(os.path.join("./pickle_test_" + str(bottle), "*.pickle"))
pickle_num = len(pickle)
pickle_test_num = len(pickle_test)
if pickle_test_num == 0:
if pickle_num != 0:
train_num = int(pickle_num * 0.8)
test_num = pickle_num - train_num
for test_file in pickle[:test_num]:
shutil.move(test_file, "pickle_test_" + str(bottle) + "/.")
print("Done, bottle {}".format(bottle))
else:
print("Pickle train folder for bottle {} is empty".format(bottle))
else:
print("Pickle test folder for bottle {} is not empty".format(bottle))
| true | true |
f7fd2e2956a79e5dd0d74283a55acb2fe021635a | 19,799 | py | Python | monai/networks/utils.py | yiheng-wang-nv/MONAI | 08f9bac8b1bc304ee4d61d32e060eb1c3646da3a | [
"Apache-2.0"
] | 3 | 2020-06-22T20:59:14.000Z | 2021-04-09T21:24:45.000Z | monai/networks/utils.py | ericspod/MONAI | 885d5b947aeafc1a9bee2899cfd48fff9036e68a | [
"Apache-2.0"
] | null | null | null | monai/networks/utils.py | ericspod/MONAI | 885d5b947aeafc1a9bee2899cfd48fff9036e68a | [
"Apache-2.0"
] | 1 | 2020-06-22T19:22:59.000Z | 2020-06-22T19:22:59.000Z | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities and types for defining networks, these depend on PyTorch.
"""
import re
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.misc import ensure_tuple, set_determinism
from monai.utils.module import pytorch_after
__all__ = [
"one_hot",
"slice_channels",
"predict_segmentation",
"normalize_transform",
"to_norm_affine",
"normal_init",
"icnr_init",
"pixelshuffle",
"eval_mode",
"train_mode",
"copy_model_state",
"convert_to_torchscript",
]
def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 1) -> torch.Tensor:
"""
For every value v in `labels`, the value in the output will be either 1 or 0. Each vector along the `dim`-th
dimension has the "one-hot" format, i.e., it has a total length of `num_classes`,
with a one and `num_class-1` zeros.
Note that this will include the background label, thus a binary mask should be treated as having two classes.
Args:
labels: input tensor of integers to be converted into the 'one-hot' format. Internally `labels` will be
converted into integers `labels.long()`.
num_classes: number of output channels, the corresponding length of `labels[dim]` will be converted to
`num_classes` from `1`.
dtype: the data type of the output one_hot label.
dim: the dimension to be converted to `num_classes` channels from `1` channel, should be non-negative number.
Example:
For a tensor `labels` of dimensions [B]1[spatial_dims], return a tensor of dimensions `[B]N[spatial_dims]`
when `num_classes=N` number of classes and `dim=1`.
.. code-block:: python
from monai.networks.utils import one_hot
import torch
a = torch.randint(0, 2, size=(1, 2, 2, 2))
out = one_hot(a, num_classes=2, dim=0)
print(out.shape) # torch.Size([2, 2, 2, 2])
a = torch.randint(0, 2, size=(2, 1, 2, 2, 2))
out = one_hot(a, num_classes=2, dim=1)
print(out.shape) # torch.Size([2, 2, 2, 2, 2])
"""
# if `dim` is bigger, add singleton dim at the end
if labels.ndim < dim + 1:
shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape))
labels = torch.reshape(labels, shape)
sh = list(labels.shape)
if sh[dim] != 1:
raise AssertionError("labels should have a channel with length equal to one.")
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
def slice_channels(tensor: torch.Tensor, *slicevals: Optional[int]) -> torch.Tensor:
slices = [slice(None)] * len(tensor.shape)
slices[1] = slice(*slicevals)
return tensor[slices]
def predict_segmentation(logits: torch.Tensor, mutually_exclusive: bool = False, threshold: float = 0.0) -> Any:
"""
Given the logits from a network, computing the segmentation by thresholding all values above 0
if multi-labels task, computing the `argmax` along the channel axis if multi-classes task,
logits has shape `BCHW[D]`.
Args:
logits: raw data of model output.
mutually_exclusive: if True, `logits` will be converted into a binary matrix using
a combination of argmax, which is suitable for multi-classes task. Defaults to False.
threshold: thresholding the prediction values if multi-labels task.
"""
if not mutually_exclusive:
return (logits >= threshold).int()
if logits.shape[1] == 1:
warnings.warn("single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.")
return (logits >= threshold).int()
return logits.argmax(1, keepdim=True)
def normalize_transform(
shape: Sequence[int],
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
align_corners: bool = False,
) -> torch.Tensor:
"""
Compute an affine matrix according to the input shape.
The transform normalizes the homogeneous image coordinates to the
range of `[-1, 1]`.
Args:
shape: input spatial shape
device: device on which the returned affine will be allocated.
dtype: data type of the returned affine
align_corners: if True, consider -1 and 1 to refer to the centers of the
corner pixels rather than the image corners.
See also: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample
"""
norm = torch.tensor(shape, dtype=torch.float64, device=device) # no in-place change
if align_corners:
norm[norm <= 1.0] = 2.0
norm = 2.0 / (norm - 1.0)
norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))
norm[:-1, -1] = -1.0
else:
norm[norm <= 0.0] = 2.0
norm = 2.0 / norm
norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))
norm[:-1, -1] = 1.0 / torch.tensor(shape, dtype=torch.float64, device=device) - 1.0
norm = norm.unsqueeze(0).to(dtype=dtype)
norm.requires_grad = False
return norm
def to_norm_affine(
affine: torch.Tensor, src_size: Sequence[int], dst_size: Sequence[int], align_corners: bool = False
) -> torch.Tensor:
"""
Given ``affine`` defined for coordinates in the pixel space, compute the corresponding affine
for the normalized coordinates.
Args:
affine: Nxdxd batched square matrix
src_size: source image spatial shape
dst_size: target image spatial shape
align_corners: if True, consider -1 and 1 to refer to the centers of the
corner pixels rather than the image corners.
See also: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample
Raises:
TypeError: When ``affine`` is not a ``torch.Tensor``.
ValueError: When ``affine`` is not Nxdxd.
ValueError: When ``src_size`` or ``dst_size`` dimensions differ from ``affine``.
"""
if not isinstance(affine, torch.Tensor):
raise TypeError(f"affine must be a torch.Tensor but is {type(affine).__name__}.")
if affine.ndimension() != 3 or affine.shape[1] != affine.shape[2]:
raise ValueError(f"affine must be Nxdxd, got {tuple(affine.shape)}.")
sr = affine.shape[1] - 1
if sr != len(src_size) or sr != len(dst_size):
raise ValueError(f"affine suggests {sr}D, got src={len(src_size)}D, dst={len(dst_size)}D.")
src_xform = normalize_transform(src_size, affine.device, affine.dtype, align_corners)
dst_xform = normalize_transform(dst_size, affine.device, affine.dtype, align_corners)
return src_xform @ affine @ torch.inverse(dst_xform)
def normal_init(
m, std: float = 0.02, normal_func: Callable[[torch.Tensor, float, float], Any] = torch.nn.init.normal_
) -> None:
"""
Initialize the weight and bias tensors of `m' and its submodules to values from a normal distribution with a
stddev of `std'. Weight tensors of convolution and linear modules are initialized with a mean of 0, batch
norm modules with a mean of 1. The callable `normal_func', used to assign values, should have the same arguments
as its default normal_(). This can be used with `nn.Module.apply` to visit submodules of a network.
"""
cname = m.__class__.__name__
if getattr(m, "weight", None) is not None and (cname.find("Conv") != -1 or cname.find("Linear") != -1):
normal_func(m.weight.data, 0.0, std)
if getattr(m, "bias", None) is not None:
nn.init.constant_(m.bias.data, 0.0)
elif cname.find("BatchNorm") != -1:
normal_func(m.weight.data, 1.0, std)
nn.init.constant_(m.bias.data, 0)
def icnr_init(conv, upsample_factor, init=nn.init.kaiming_normal_):
"""
ICNR initialization for 2D/3D kernels adapted from Aitken et al.,2017 , "Checkerboard artifact free
sub-pixel convolution".
"""
out_channels, in_channels, *dims = conv.weight.shape
scale_factor = upsample_factor ** len(dims)
oc2 = int(out_channels / scale_factor)
kernel = torch.zeros([oc2, in_channels] + dims)
kernel = init(kernel)
kernel = kernel.transpose(0, 1)
kernel = kernel.reshape(oc2, in_channels, -1)
kernel = kernel.repeat(1, 1, scale_factor)
kernel = kernel.reshape([in_channels, out_channels] + dims)
kernel = kernel.transpose(0, 1)
conv.weight.data.copy_(kernel)
@deprecated_arg(
name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead."
)
def pixelshuffle(
x: torch.Tensor, spatial_dims: int, scale_factor: int, dimensions: Optional[int] = None
) -> torch.Tensor:
"""
Apply pixel shuffle to the tensor `x` with spatial dimensions `spatial_dims` and scaling factor `scale_factor`.
See: Shi et al., 2016, "Real-Time Single Image and Video Super-Resolution
Using a nEfficient Sub-Pixel Convolutional Neural Network."
See: Aitken et al., 2017, "Checkerboard artifact free sub-pixel convolution".
Args:
x: Input tensor
spatial_dims: number of spatial dimensions, typically 2 or 3 for 2D or 3D
scale_factor: factor to rescale the spatial dimensions by, must be >=1
.. deprecated:: 0.6.0
``dimensions`` is deprecated, use ``spatial_dims`` instead.
Returns:
Reshuffled version of `x`.
Raises:
ValueError: When input channels of `x` are not divisible by (scale_factor ** spatial_dims)
"""
if dimensions is not None:
spatial_dims = dimensions
dim, factor = spatial_dims, scale_factor
input_size = list(x.size())
batch_size, channels = input_size[:2]
scale_divisor = factor ** dim
if channels % scale_divisor != 0:
raise ValueError(
f"Number of input channels ({channels}) must be evenly "
f"divisible by scale_factor ** dimensions ({factor}**{dim}={scale_divisor})."
)
org_channels = channels // scale_divisor
output_size = [batch_size, org_channels] + [d * factor for d in input_size[2:]]
indices = tuple(range(2, 2 + 2 * dim))
indices_factor, indices_dim = indices[:dim], indices[dim:]
permute_indices = (0, 1) + sum(zip(indices_dim, indices_factor), ())
x = x.reshape(batch_size, org_channels, *([factor] * dim + input_size[2:]))
x = x.permute(permute_indices).reshape(output_size)
return x
@contextmanager
def eval_mode(*nets: nn.Module):
"""
Set network(s) to eval mode and then return to original state at the end.
Args:
nets: Input network(s)
Examples
.. code-block:: python
t=torch.rand(1,1,16,16)
p=torch.nn.Conv2d(1,1,3)
print(p.training) # True
with eval_mode(p):
print(p.training) # False
print(p(t).sum().backward()) # will correctly raise an exception as gradients are calculated
"""
# Get original state of network(s)
training = [n for n in nets if n.training]
try:
# set to eval mode
with torch.no_grad():
yield [n.eval() for n in nets]
finally:
# Return required networks to training
for n in training:
n.train()
@contextmanager
def train_mode(*nets: nn.Module):
"""
Set network(s) to train mode and then return to original state at the end.
Args:
nets: Input network(s)
Examples
.. code-block:: python
t=torch.rand(1,1,16,16)
p=torch.nn.Conv2d(1,1,3)
p.eval()
print(p.training) # False
with train_mode(p):
print(p.training) # True
print(p(t).sum().backward()) # No exception
"""
# Get original state of network(s)
eval_list = [n for n in nets if not n.training]
try:
# set to train mode
with torch.set_grad_enabled(True):
yield [n.train() for n in nets]
finally:
# Return required networks to eval_list
for n in eval_list:
n.eval()
def copy_model_state(
dst: Union[torch.nn.Module, Mapping],
src: Union[torch.nn.Module, Mapping],
dst_prefix="",
mapping=None,
exclude_vars=None,
inplace=True,
):
"""
Compute a module state_dict, of which the keys are the same as `dst`. The values of `dst` are overwritten
by the ones from `src` whenever their keys match. The method provides additional `dst_prefix` for
the `dst` key when matching them. `mapping` can be a `{"src_key": "dst_key"}` dict, indicating
`dst[dst_prefix + dst_key] = src[src_key]`.
This function is mainly to return a model state dict
for loading the `src` model state into the `dst` model, `src` and `dst` can have different dict keys, but
their corresponding values normally have the same shape.
Args:
dst: a pytorch module or state dict to be updated.
src: a pytorch module or state dist used to get the values used for the update.
dst_prefix: `dst` key prefix, so that `dst[dst_prefix + src_key]`
will be assigned to the value of `src[src_key]`.
mapping: a `{"src_key": "dst_key"}` dict, indicating that `dst[dst_prefix + dst_key]`
to be assigned to the value of `src[src_key]`.
exclude_vars: a regular expression to match the `dst` variable names,
so that their values are not overwritten by `src`.
inplace: whether to set the `dst` module with the updated `state_dict` via `load_state_dict`.
This option is only available when `dst` is a `torch.nn.Module`.
Examples:
.. code-block:: python
from monai.networks.nets import BasicUNet
from monai.networks.utils import copy_model_state
model_a = BasicUNet(in_channels=1, out_channels=4)
model_b = BasicUNet(in_channels=1, out_channels=2)
model_a_b, changed, unchanged = copy_model_state(
model_a, model_b, exclude_vars="conv_0.conv_0", inplace=False)
# dst model updated: 76 of 82 variables.
model_a.load_state_dict(model_a_b)
# <All keys matched successfully>
Returns: an OrderedDict of the updated `dst` state, the changed, and unchanged keys.
"""
if isinstance(src, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
src = src.module
if isinstance(dst, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
dst = dst.module
src_dict = src.state_dict() if isinstance(src, torch.nn.Module) else src
dst_dict = dst.state_dict() if isinstance(dst, torch.nn.Module) else dst
dst_dict = OrderedDict(dst_dict)
to_skip = {s_key for s_key in src_dict if exclude_vars and re.compile(exclude_vars).search(s_key)}
# update dst with items from src
all_keys, updated_keys = list(dst_dict), list()
for s, val in src_dict.items():
dst_key = f"{dst_prefix}{s}"
if dst_key in dst_dict and dst_key not in to_skip and dst_dict[dst_key].shape == val.shape:
dst_dict[dst_key] = val
updated_keys.append(dst_key)
for s in mapping if mapping else {}:
dst_key = f"{dst_prefix}{mapping[s]}"
if dst_key in dst_dict and dst_key not in to_skip:
if dst_dict[dst_key].shape != src_dict[s].shape:
warnings.warn(f"Param. shape changed from {dst_dict[dst_key].shape} to {src_dict[s].shape}.")
dst_dict[dst_key] = src_dict[s]
updated_keys.append(dst_key)
updated_keys = sorted(set(updated_keys))
unchanged_keys = sorted(set(all_keys).difference(updated_keys))
print(f"'dst' model updated: {len(updated_keys)} of {len(dst_dict)} variables.")
if inplace and isinstance(dst, torch.nn.Module):
dst.load_state_dict(dst_dict)
return dst_dict, updated_keys, unchanged_keys
def convert_to_torchscript(
model: nn.Module,
filename_or_obj: Optional[Any] = None,
extra_files: Optional[Dict] = None,
verify: bool = False,
inputs: Optional[Sequence[Any]] = None,
device: Optional[torch.device] = None,
rtol: float = 1e-4,
atol: float = 0.0,
**kwargs,
):
"""
Utility to convert a model into TorchScript model and save to file,
with optional input / output data verification.
Args:
model: source PyTorch model to save.
filename_or_obj: if not None, specify a file-like object (has to implement write and flush)
or a string containing a file path name to save the TorchScript model.
extra_files: map from filename to contents which will be stored as part of the save model file.
works for PyTorch 1.7 or later.
for more details: https://pytorch.org/docs/stable/generated/torch.jit.save.html.
verify: whether to verify the input and output of TorchScript model.
if `filename_or_obj` is not None, load the saved TorchScript model and verify.
inputs: input test data to verify model, should be a sequence of data, every item maps to a argument
of `model()` function.
device: target device to verify the model, if None, use CUDA if available.
rtol: the relative tolerance when comparing the outputs of PyTorch model and TorchScript model.
atol: the absolute tolerance when comparing the outputs of PyTorch model and TorchScript model.
kwargs: other arguments except `obj` for `torch.jit.script()` to convert model, for more details:
https://pytorch.org/docs/master/generated/torch.jit.script.html.
"""
model.eval()
with torch.no_grad():
script_module = torch.jit.script(model, **kwargs)
if filename_or_obj is not None:
if not pytorch_after(1, 7):
torch.jit.save(m=script_module, f=filename_or_obj)
else:
torch.jit.save(m=script_module, f=filename_or_obj, _extra_files=extra_files)
if verify:
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if inputs is None:
raise ValueError("missing input data for verification.")
inputs = [i.to(device) if isinstance(i, torch.Tensor) else i for i in inputs]
ts_model = torch.jit.load(filename_or_obj) if filename_or_obj is not None else script_module
ts_model.eval().to(device)
model = model.to(device)
with torch.no_grad():
set_determinism(seed=0)
torch_out = ensure_tuple(model(*inputs))
set_determinism(seed=0)
torchscript_out = ensure_tuple(ts_model(*inputs))
set_determinism(seed=None)
# compare TorchScript and PyTorch results
for r1, r2 in zip(torch_out, torchscript_out):
if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor):
torch.testing.assert_allclose(r1, r2, rtol=rtol, atol=atol)
return script_module
| 39.837022 | 117 | 0.662256 |
import re
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.misc import ensure_tuple, set_determinism
from monai.utils.module import pytorch_after
__all__ = [
"one_hot",
"slice_channels",
"predict_segmentation",
"normalize_transform",
"to_norm_affine",
"normal_init",
"icnr_init",
"pixelshuffle",
"eval_mode",
"train_mode",
"copy_model_state",
"convert_to_torchscript",
]
def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 1) -> torch.Tensor:
if labels.ndim < dim + 1:
shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape))
labels = torch.reshape(labels, shape)
sh = list(labels.shape)
if sh[dim] != 1:
raise AssertionError("labels should have a channel with length equal to one.")
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
def slice_channels(tensor: torch.Tensor, *slicevals: Optional[int]) -> torch.Tensor:
slices = [slice(None)] * len(tensor.shape)
slices[1] = slice(*slicevals)
return tensor[slices]
def predict_segmentation(logits: torch.Tensor, mutually_exclusive: bool = False, threshold: float = 0.0) -> Any:
if not mutually_exclusive:
return (logits >= threshold).int()
if logits.shape[1] == 1:
warnings.warn("single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.")
return (logits >= threshold).int()
return logits.argmax(1, keepdim=True)
def normalize_transform(
shape: Sequence[int],
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
align_corners: bool = False,
) -> torch.Tensor:
norm = torch.tensor(shape, dtype=torch.float64, device=device)
if align_corners:
norm[norm <= 1.0] = 2.0
norm = 2.0 / (norm - 1.0)
norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))
norm[:-1, -1] = -1.0
else:
norm[norm <= 0.0] = 2.0
norm = 2.0 / norm
norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))
norm[:-1, -1] = 1.0 / torch.tensor(shape, dtype=torch.float64, device=device) - 1.0
norm = norm.unsqueeze(0).to(dtype=dtype)
norm.requires_grad = False
return norm
def to_norm_affine(
affine: torch.Tensor, src_size: Sequence[int], dst_size: Sequence[int], align_corners: bool = False
) -> torch.Tensor:
if not isinstance(affine, torch.Tensor):
raise TypeError(f"affine must be a torch.Tensor but is {type(affine).__name__}.")
if affine.ndimension() != 3 or affine.shape[1] != affine.shape[2]:
raise ValueError(f"affine must be Nxdxd, got {tuple(affine.shape)}.")
sr = affine.shape[1] - 1
if sr != len(src_size) or sr != len(dst_size):
raise ValueError(f"affine suggests {sr}D, got src={len(src_size)}D, dst={len(dst_size)}D.")
src_xform = normalize_transform(src_size, affine.device, affine.dtype, align_corners)
dst_xform = normalize_transform(dst_size, affine.device, affine.dtype, align_corners)
return src_xform @ affine @ torch.inverse(dst_xform)
def normal_init(
m, std: float = 0.02, normal_func: Callable[[torch.Tensor, float, float], Any] = torch.nn.init.normal_
) -> None:
cname = m.__class__.__name__
if getattr(m, "weight", None) is not None and (cname.find("Conv") != -1 or cname.find("Linear") != -1):
normal_func(m.weight.data, 0.0, std)
if getattr(m, "bias", None) is not None:
nn.init.constant_(m.bias.data, 0.0)
elif cname.find("BatchNorm") != -1:
normal_func(m.weight.data, 1.0, std)
nn.init.constant_(m.bias.data, 0)
def icnr_init(conv, upsample_factor, init=nn.init.kaiming_normal_):
out_channels, in_channels, *dims = conv.weight.shape
scale_factor = upsample_factor ** len(dims)
oc2 = int(out_channels / scale_factor)
kernel = torch.zeros([oc2, in_channels] + dims)
kernel = init(kernel)
kernel = kernel.transpose(0, 1)
kernel = kernel.reshape(oc2, in_channels, -1)
kernel = kernel.repeat(1, 1, scale_factor)
kernel = kernel.reshape([in_channels, out_channels] + dims)
kernel = kernel.transpose(0, 1)
conv.weight.data.copy_(kernel)
@deprecated_arg(
name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead."
)
def pixelshuffle(
x: torch.Tensor, spatial_dims: int, scale_factor: int, dimensions: Optional[int] = None
) -> torch.Tensor:
if dimensions is not None:
spatial_dims = dimensions
dim, factor = spatial_dims, scale_factor
input_size = list(x.size())
batch_size, channels = input_size[:2]
scale_divisor = factor ** dim
if channels % scale_divisor != 0:
raise ValueError(
f"Number of input channels ({channels}) must be evenly "
f"divisible by scale_factor ** dimensions ({factor}**{dim}={scale_divisor})."
)
org_channels = channels // scale_divisor
output_size = [batch_size, org_channels] + [d * factor for d in input_size[2:]]
indices = tuple(range(2, 2 + 2 * dim))
indices_factor, indices_dim = indices[:dim], indices[dim:]
permute_indices = (0, 1) + sum(zip(indices_dim, indices_factor), ())
x = x.reshape(batch_size, org_channels, *([factor] * dim + input_size[2:]))
x = x.permute(permute_indices).reshape(output_size)
return x
@contextmanager
def eval_mode(*nets: nn.Module):
training = [n for n in nets if n.training]
try:
with torch.no_grad():
yield [n.eval() for n in nets]
finally:
for n in training:
n.train()
@contextmanager
def train_mode(*nets: nn.Module):
eval_list = [n for n in nets if not n.training]
try:
with torch.set_grad_enabled(True):
yield [n.train() for n in nets]
finally:
for n in eval_list:
n.eval()
def copy_model_state(
dst: Union[torch.nn.Module, Mapping],
src: Union[torch.nn.Module, Mapping],
dst_prefix="",
mapping=None,
exclude_vars=None,
inplace=True,
):
if isinstance(src, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
src = src.module
if isinstance(dst, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
dst = dst.module
src_dict = src.state_dict() if isinstance(src, torch.nn.Module) else src
dst_dict = dst.state_dict() if isinstance(dst, torch.nn.Module) else dst
dst_dict = OrderedDict(dst_dict)
to_skip = {s_key for s_key in src_dict if exclude_vars and re.compile(exclude_vars).search(s_key)}
all_keys, updated_keys = list(dst_dict), list()
for s, val in src_dict.items():
dst_key = f"{dst_prefix}{s}"
if dst_key in dst_dict and dst_key not in to_skip and dst_dict[dst_key].shape == val.shape:
dst_dict[dst_key] = val
updated_keys.append(dst_key)
for s in mapping if mapping else {}:
dst_key = f"{dst_prefix}{mapping[s]}"
if dst_key in dst_dict and dst_key not in to_skip:
if dst_dict[dst_key].shape != src_dict[s].shape:
warnings.warn(f"Param. shape changed from {dst_dict[dst_key].shape} to {src_dict[s].shape}.")
dst_dict[dst_key] = src_dict[s]
updated_keys.append(dst_key)
updated_keys = sorted(set(updated_keys))
unchanged_keys = sorted(set(all_keys).difference(updated_keys))
print(f"'dst' model updated: {len(updated_keys)} of {len(dst_dict)} variables.")
if inplace and isinstance(dst, torch.nn.Module):
dst.load_state_dict(dst_dict)
return dst_dict, updated_keys, unchanged_keys
def convert_to_torchscript(
model: nn.Module,
filename_or_obj: Optional[Any] = None,
extra_files: Optional[Dict] = None,
verify: bool = False,
inputs: Optional[Sequence[Any]] = None,
device: Optional[torch.device] = None,
rtol: float = 1e-4,
atol: float = 0.0,
**kwargs,
):
model.eval()
with torch.no_grad():
script_module = torch.jit.script(model, **kwargs)
if filename_or_obj is not None:
if not pytorch_after(1, 7):
torch.jit.save(m=script_module, f=filename_or_obj)
else:
torch.jit.save(m=script_module, f=filename_or_obj, _extra_files=extra_files)
if verify:
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if inputs is None:
raise ValueError("missing input data for verification.")
inputs = [i.to(device) if isinstance(i, torch.Tensor) else i for i in inputs]
ts_model = torch.jit.load(filename_or_obj) if filename_or_obj is not None else script_module
ts_model.eval().to(device)
model = model.to(device)
with torch.no_grad():
set_determinism(seed=0)
torch_out = ensure_tuple(model(*inputs))
set_determinism(seed=0)
torchscript_out = ensure_tuple(ts_model(*inputs))
set_determinism(seed=None)
for r1, r2 in zip(torch_out, torchscript_out):
if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor):
torch.testing.assert_allclose(r1, r2, rtol=rtol, atol=atol)
return script_module
| true | true |
f7fd2f5a3f4427884533dedeb7ae9d2ef7ea9094 | 701 | py | Python | multiprocessing/5_concurrent_futures.py | BenedictusAryo/python_threading | 8133315ed59b2ad927dfe748f63a1f41dda3d0bc | [
"MIT"
] | 1 | 2021-02-01T10:14:22.000Z | 2021-02-01T10:14:22.000Z | multiprocessing/5_concurrent_futures.py | BenedictusAryo/python_threading | 8133315ed59b2ad927dfe748f63a1f41dda3d0bc | [
"MIT"
] | null | null | null | multiprocessing/5_concurrent_futures.py | BenedictusAryo/python_threading | 8133315ed59b2ad927dfe748f63a1f41dda3d0bc | [
"MIT"
] | null | null | null | # 5 Using concurrent future multiprocessing in python
import concurrent.futures
import time
# Create simple function that sleep in 1 second
def do_something(seconds):
print(f'Sleeping {seconds} second(s) ..')
time.sleep(seconds)
return 'Done Sleeping...'
if __name__ == '__main__':
# Start counting
start = time.perf_counter()
with concurrent.futures.ProcessPoolExecutor() as executor:
f1 = executor.submit(do_something, 1)
f2 = executor.submit(do_something, 1)
print(f1.result())
print(f2.result())
# Finish counting and show script runtime
finish = time.perf_counter()
print(f"Finished in {round(finish-start, 2)} second(s)")
| 29.208333 | 62 | 0.687589 |
import concurrent.futures
import time
def do_something(seconds):
print(f'Sleeping {seconds} second(s) ..')
time.sleep(seconds)
return 'Done Sleeping...'
if __name__ == '__main__':
start = time.perf_counter()
with concurrent.futures.ProcessPoolExecutor() as executor:
f1 = executor.submit(do_something, 1)
f2 = executor.submit(do_something, 1)
print(f1.result())
print(f2.result())
finish = time.perf_counter()
print(f"Finished in {round(finish-start, 2)} second(s)")
| true | true |
f7fd2fad3cde153bc58452438ae989c197b3f3ef | 7,748 | py | Python | model/backbone.py | buriedms/MASTER-paddle | e843b5e5d5bee27e2043c31c2d1f67a08ca63694 | [
"MIT"
] | null | null | null | model/backbone.py | buriedms/MASTER-paddle | e843b5e5d5bee27e2043c31c2d1f67a08ca63694 | [
"MIT"
] | null | null | null | model/backbone.py | buriedms/MASTER-paddle | e843b5e5d5bee27e2043c31c2d1f67a08ca63694 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Wenwen Yu
# @Created Time: 10/4/2020 14:19
# from torch import nn
from model.context_block import MultiAspectGCAttention
from paddle import nn
from .init import constant_,kaiming_normal_
# CNN for Feature Extraction + Multi-Aspect GCAttention
def conv3x3(in_planes, out_planes, stride=1):
# "3x3 convolution with padding"
return nn.Conv2D(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias_attr=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2D(in_planes, out_planes, kernel_size=1, stride=stride, bias_attr=False)
class BasicBlock(nn.Layer):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_gcb=False, gcb_config=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2D(planes, momentum=0.9)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2D(planes, momentum=0.9)
self.downsample = downsample
self.stride = stride
self.use_gcb = use_gcb
if self.use_gcb:
gcb_ratio = gcb_config['ratio']
gcb_headers = gcb_config['headers']
att_scale = gcb_config['att_scale']
fusion_type = gcb_config['fusion_type']
self.context_block = MultiAspectGCAttention(inplanes=planes,
ratio=gcb_ratio,
headers=gcb_headers,
att_scale=att_scale,
fusion_type=fusion_type)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_gcb:
out = self.context_block(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Layer):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2D(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2D(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2D(planes * self.expansion)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Layer):
def __init__(self, block, layers, zero_init_residual=False, gcb=None, in_channels=1):
super(ResNet, self).__init__()
gcb_config = gcb
self.inplanes = 128
self.conv1 = nn.Conv2D(in_channels, 64, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn1 = nn.BatchNorm2D(64)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2D(64, 128, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn2 = nn.BatchNorm2D(128)
self.relu2 = nn.ReLU()
self.maxpool1 = nn.MaxPool2D(kernel_size=2, stride=2)
self.layer1 = self._make_layer(block, 256, layers[0], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][0])
self.conv3 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn3 = nn.BatchNorm2D(256)
self.relu3 = nn.ReLU()
self.maxpool2 = nn.MaxPool2D(kernel_size=2, stride=2)
self.layer2 = self._make_layer(block, 256, layers[1], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][1])
self.conv4 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn4 = nn.BatchNorm2D(256)
self.relu4 = nn.ReLU()
self.maxpool3 = nn.MaxPool2D(kernel_size=(2, 1), stride=(2, 1))
self.layer3 = self._make_layer(block, 512, layers[2], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][2])
self.conv5 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn5 = nn.BatchNorm2D(512)
self.relu5 = nn.ReLU()
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][3])
self.conv6 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn6 = nn.BatchNorm2D(512)
self.relu6 = nn.ReLU()
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2D):
constant_(m.weight, 1)
constant_(m.bias, 0)
if zero_init_residual:
for m in self.sublayers():
if isinstance(m, Bottleneck):
constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, use_gcb=False, gcb_config=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2D(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_gcb=use_gcb, gcb_config=gcb_config))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu5(x)
x = self.layer4(x)
x = self.conv6(x)
x = self.bn6(x)
x = self.relu6(x)
return x
def resnet50(gcb_kwargs, in_channels=1):
model = ResNet(BasicBlock, [1, 2, 5, 3], gcb=gcb_kwargs, in_channels=in_channels)
return model
class ConvEmbeddingGC(nn.Layer):
def __init__(self, gcb_kwargs, in_channels=1):
super().__init__()
self.backbone = resnet50(gcb_kwargs, in_channels=in_channels)
def forward(self, x):
feature = self.backbone(x)
b, c, h, w = feature.shape # (B, C, H/8, W/4)
feature = feature.reshape((b, c, h * w))
feature = feature.transpose((0, 2, 1))
return feature
| 32.41841 | 111 | 0.5746 |
from model.context_block import MultiAspectGCAttention
from paddle import nn
from .init import constant_,kaiming_normal_
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2D(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias_attr=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2D(in_planes, out_planes, kernel_size=1, stride=stride, bias_attr=False)
class BasicBlock(nn.Layer):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_gcb=False, gcb_config=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2D(planes, momentum=0.9)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2D(planes, momentum=0.9)
self.downsample = downsample
self.stride = stride
self.use_gcb = use_gcb
if self.use_gcb:
gcb_ratio = gcb_config['ratio']
gcb_headers = gcb_config['headers']
att_scale = gcb_config['att_scale']
fusion_type = gcb_config['fusion_type']
self.context_block = MultiAspectGCAttention(inplanes=planes,
ratio=gcb_ratio,
headers=gcb_headers,
att_scale=att_scale,
fusion_type=fusion_type)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_gcb:
out = self.context_block(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Layer):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2D(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2D(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2D(planes * self.expansion)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Layer):
def __init__(self, block, layers, zero_init_residual=False, gcb=None, in_channels=1):
super(ResNet, self).__init__()
gcb_config = gcb
self.inplanes = 128
self.conv1 = nn.Conv2D(in_channels, 64, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn1 = nn.BatchNorm2D(64)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2D(64, 128, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn2 = nn.BatchNorm2D(128)
self.relu2 = nn.ReLU()
self.maxpool1 = nn.MaxPool2D(kernel_size=2, stride=2)
self.layer1 = self._make_layer(block, 256, layers[0], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][0])
self.conv3 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn3 = nn.BatchNorm2D(256)
self.relu3 = nn.ReLU()
self.maxpool2 = nn.MaxPool2D(kernel_size=2, stride=2)
self.layer2 = self._make_layer(block, 256, layers[1], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][1])
self.conv4 = nn.Conv2D(256, 256, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn4 = nn.BatchNorm2D(256)
self.relu4 = nn.ReLU()
self.maxpool3 = nn.MaxPool2D(kernel_size=(2, 1), stride=(2, 1))
self.layer3 = self._make_layer(block, 512, layers[2], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][2])
self.conv5 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn5 = nn.BatchNorm2D(512)
self.relu5 = nn.ReLU()
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, gcb_config=gcb_config,
use_gcb=gcb_config['layers'][3])
self.conv6 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn6 = nn.BatchNorm2D(512)
self.relu6 = nn.ReLU()
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2D):
constant_(m.weight, 1)
constant_(m.bias, 0)
if zero_init_residual:
for m in self.sublayers():
if isinstance(m, Bottleneck):
constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, use_gcb=False, gcb_config=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2D(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_gcb=use_gcb, gcb_config=gcb_config))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu5(x)
x = self.layer4(x)
x = self.conv6(x)
x = self.bn6(x)
x = self.relu6(x)
return x
def resnet50(gcb_kwargs, in_channels=1):
model = ResNet(BasicBlock, [1, 2, 5, 3], gcb=gcb_kwargs, in_channels=in_channels)
return model
class ConvEmbeddingGC(nn.Layer):
def __init__(self, gcb_kwargs, in_channels=1):
super().__init__()
self.backbone = resnet50(gcb_kwargs, in_channels=in_channels)
def forward(self, x):
feature = self.backbone(x)
b, c, h, w = feature.shape
feature = feature.reshape((b, c, h * w))
feature = feature.transpose((0, 2, 1))
return feature
| true | true |
f7fd30b57df11b122a0c4608a10476267940c007 | 9,102 | py | Python | models/inverse_warp.py | jytime/Deep-SfM-Revisited | 7645c7d524df8c8798ccc1902c1368b4ed59708a | [
"MIT"
] | 126 | 2021-06-17T09:18:30.000Z | 2022-03-18T02:53:34.000Z | models/inverse_warp.py | jytime/Deep-SfM-Revisited | 7645c7d524df8c8798ccc1902c1368b4ed59708a | [
"MIT"
] | 12 | 2021-06-23T05:34:32.000Z | 2022-03-28T12:31:21.000Z | models/inverse_warp.py | jytime/Deep-SfM-Revisited | 7645c7d524df8c8798ccc1902c1368b4ed59708a | [
"MIT"
] | 9 | 2021-07-05T01:54:17.000Z | 2022-02-10T16:39:24.000Z | from __future__ import division
import torch
from torch.autograd import Variable
import torch.nn.functional as F
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w)).type_as(depth) # [1, H, W]
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w)).type_as(depth) # [1, H, W]
ones = Variable(torch.ones(1,h,w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i,size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert(all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or (pixel_coords.shape[-2:]!=depth.shape[-2:]):
set_id_grid(depth)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).contiguous().view(b, 3, -1).cuda() # [B, 3, H*W]
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1)+(X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.view(b,h,w,2)
def pose_vec2mat(vec, rotation_mode='euler'):
"""
Convert 6DoF parameters to transformation matrix.
Args:s
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 3, 4]
"""
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:,3:]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]
return transform_mat
def pose2flow(depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode=None):
"""
Converts pose parameters to rigid optical flow
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
bs, h, w = depth.size()
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]
cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]
pose_mat = pose.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) - grid_x
Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) - grid_y
return torch.stack((X,Y), dim=1)
def inverse_warp(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
feat: the source feature (where to sample pixels) -- [B, CH, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
batch_size, _, feat_height, feat_width = feat.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode,align_corners=True)
return projected_feat
def inverse_warp_map(depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
feat: the source feature (where to sample pixels) -- [B, CH, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
batch_size, feat_height, feat_width = depth.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose.cuda()
# pose_mat = pose_mat
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
# projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode,align_corners=True)
bs, h, w = depth.size()
X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) #- grid_x
Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) #- grid_y
return torch.stack((X,Y), dim=1)
def inverse_warp_im(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
Returns:
projected_img: Source image warped to the target image plane
valid_points: Boolean array indicating point validity
"""
check_sizes(img, 'img', 'B3HW')
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
batch_size, _, img_height, img_width = img.size()
cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]
pose_mat = pose
pose_mat = pose_mat.cuda() # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]
rot, tr = proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:]
src_pixel_coords = cam2pixel(cam_coords, rot, tr, padding_mode) # [B,H,W,2]
projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode,align_corners=True)
return projected_img | 40.096916 | 136 | 0.643595 | from __future__ import division
import torch
from torch.autograd import Variable
import torch.nn.functional as F
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w)).type_as(depth)
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w)).type_as(depth)
ones = Variable(torch.ones(1,h,w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1)
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i,size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert(all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
b, h, w = depth.size()
if (pixel_coords is None) or (pixel_coords.shape[-2:]!=depth.shape[-2:]):
set_id_grid(depth)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).contiguous().view(b, 3, -1).cuda()
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1)
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
X_norm = 2*(X / Z)/(w-1) - 1
Y_norm = 2*(Y / Z)/(h-1) - 1
if padding_mode == 'zeros':
X_mask = ((X_norm > 1)+(X_norm < -1)).detach()
X_norm[X_mask] = 2
Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2)
return pixel_coords.view(b,h,w,2)
def pose_vec2mat(vec, rotation_mode='euler'):
translation = vec[:, :3].unsqueeze(-1)
rot = vec[:,3:]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot)
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot)
transform_mat = torch.cat([rot_mat, translation], dim=2)
return transform_mat
def pose2flow(depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode=None):
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
bs, h, w = depth.size()
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth)
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth)
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose.cuda()
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat)
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode)
X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) - grid_x
Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) - grid_y
return torch.stack((X,Y), dim=1)
def inverse_warp(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
batch_size, _, feat_height, feat_width = feat.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose.cuda()
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat)
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode)
projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode,align_corners=True)
return projected_feat
def inverse_warp_map(depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
batch_size, feat_height, feat_width = depth.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose.cuda()
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat)
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode)
bs, h, w = depth.size()
X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5)
Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5)
return torch.stack((X,Y), dim=1)
def inverse_warp_im(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros'):
check_sizes(img, 'img', 'B3HW')
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
batch_size, _, img_height, img_width = img.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose
pose_mat = pose_mat.cuda()
proj_cam_to_src_pixel = intrinsics @ pose_mat
rot, tr = proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:]
src_pixel_coords = cam2pixel(cam_coords, rot, tr, padding_mode)
projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode,align_corners=True)
return projected_img | true | true |
f7fd31a1e830e497835f0015e1f5d1bbe8212c1b | 668 | py | Python | hog_descriptor.py | Var-ji/crux-fr-sprint | 6a1e3b213d566580f5fecb1f2f83455f6a323310 | [
"MIT"
] | 2 | 2019-05-11T17:27:37.000Z | 2019-07-25T11:26:59.000Z | hog_descriptor.py | Var-ji/crux-fr-sprint | 6a1e3b213d566580f5fecb1f2f83455f6a323310 | [
"MIT"
] | null | null | null | hog_descriptor.py | Var-ji/crux-fr-sprint | 6a1e3b213d566580f5fecb1f2f83455f6a323310 | [
"MIT"
] | null | null | null | import cv2
from skimage import data, exposure
from skimage.feature import hog
capture = cv2.VideoCapture(0)
while True:
ret, frame = capture.read()
if not ret:
continue
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
(H, h_image) = hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualize=True, multichannel=True)
h_image = exposure.rescale_intensity(h_image, out_range=(0, 255))
h_image = h_image.astype("uint8")
cv2.imshow('Webcam Feed', frame)
cv2.imshow('HOG', h_image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows() | 30.363636 | 89 | 0.648204 | import cv2
from skimage import data, exposure
from skimage.feature import hog
capture = cv2.VideoCapture(0)
while True:
ret, frame = capture.read()
if not ret:
continue
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
(H, h_image) = hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualize=True, multichannel=True)
h_image = exposure.rescale_intensity(h_image, out_range=(0, 255))
h_image = h_image.astype("uint8")
cv2.imshow('Webcam Feed', frame)
cv2.imshow('HOG', h_image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows() | true | true |
f7fd323602444fb4d8bd004aee3525f0e13be23a | 634 | py | Python | config.py | e2jk/content-type-converter | daaed62191c3dd73cdd6d61e8c6882b0d64e3723 | [
"MIT"
] | 1 | 2021-03-17T15:30:17.000Z | 2021-03-17T15:30:17.000Z | config.py | e2jk/content-type-converter | daaed62191c3dd73cdd6d61e8c6882b0d64e3723 | [
"MIT"
] | 9 | 2020-11-22T17:52:53.000Z | 2021-10-18T21:29:54.000Z | config.py | e2jk/content-type-converter | daaed62191c3dd73cdd6d61e8c6882b0d64e3723 | [
"MIT"
] | null | null | null | import os
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY") or "you-will-never-guess"
PROFILES = {
"test_profile": {"base_url": "http://myhttpheader.com/", "aa": 1, "bb": 2},
"second_profile": {
"base_url": "https://www.whatismybrowser.com/detect/what-http-headers-is-my-browser-sending",
"aa": 1,
"bb": 2,
},
"third_profile": {
"base_url": "http://localhost:5555",
"header_authorization": "key TestingABC",
"bb": 2,
},
"invalid_profile": {"base_url": "http://nonexistent.invalid/"},
}
| 31.7 | 105 | 0.537855 | import os
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY") or "you-will-never-guess"
PROFILES = {
"test_profile": {"base_url": "http://myhttpheader.com/", "aa": 1, "bb": 2},
"second_profile": {
"base_url": "https://www.whatismybrowser.com/detect/what-http-headers-is-my-browser-sending",
"aa": 1,
"bb": 2,
},
"third_profile": {
"base_url": "http://localhost:5555",
"header_authorization": "key TestingABC",
"bb": 2,
},
"invalid_profile": {"base_url": "http://nonexistent.invalid/"},
}
| true | true |
f7fd32a7be3cb621a04f8be68403bf149be7dc06 | 7,611 | py | Python | steamm.py | jesselangdon/steamm | 815f0375d9590a5b14e329841c91f95ac51281eb | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2017-10-06T18:10:41.000Z | 2017-10-06T18:10:41.000Z | steamm.py | jesselangdon/steamm | 815f0375d9590a5b14e329841c91f95ac51281eb | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-03-23T16:28:36.000Z | 2016-03-23T16:43:00.000Z | steamm.py | jesselangdon/steamm | 815f0375d9590a5b14e329841c91f95ac51281eb | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
STeAMM
A QGIS plugin
Stream Temperature Automated Modeler using MODIS
-------------------
begin : 2016-09-08
git sha : $Format:%H$
copyright : (C) 2016 by South Fork Research, Inc.
email : jesse@southforkresearch.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from dialog_process import ProcessDialog
from dialog_predict import PredictDialog
import os
import os.path
# TODO refactor to include Get MODIS Data and Preprocess MODIS Data tools.
# Import the preprocessing module
import get_swaths.preprocess as process
# Import the prediction module
import predict_temp.predict_temp as predict
class STeAMM:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'STeAMM_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
#TODO add entry for 'Get MODIS Data' tool
self.dlg1 = ProcessDialog()
self.dlg2 = PredictDialog()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&STeAMM')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'STeAMM')
self.toolbar.setObjectName(u'STeAMM')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('STeAMM', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
# TODO Change to "Get MODIS Data" and "Preprocess MODIS Data" tools
icon_path1 = ':/plugins/STeAMM/icon_process.png'
icon_path2 = ':/plugins/STeAMM/icon_predict.png'
self.add_action(
icon_path1,
text=self.tr(u'Get MODIS Swaths'),
callback=self.run1,
parent=self.iface.mainWindow())
self.add_action(
icon_path2,
text=self.tr(u'Predict Stream Temperatures'),
callback=self.run2,
parent=self.iface.mainWindow()
)
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&STeAMM'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
#TODO add get Get MODIS Data tool run method here
def run1(self):
"""Run method that performs all the real work"""
self.dlg1.show() # show the dialog
result = self.dlg1.exec_() # Run the dialog event loop
if result: # See if OK was pressed
process.main()
#process.main(data_dir, data_product, process_yr, swath_id, geo_rca, doy_start_str, doy_end_str, username, password)
pass
def run2(self):
"""Run method for the Predict Temperature tool."""
# show the dialog
self.dlg2.show()
# Run the dialog event loop
result = self.dlg2.exec_()
# See if OK was pressed
if result:
predict.main()
pass | 33.676991 | 128 | 0.572461 |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import resources
from dialog_process import ProcessDialog
from dialog_predict import PredictDialog
import os
import os.path
import get_swaths.preprocess as process
import predict_temp.predict_temp as predict
class STeAMM:
def __init__(self, iface):
self.iface = iface
self.plugin_dir = os.path.dirname(__file__)
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'STeAMM_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
self.dlg1 = ProcessDialog()
self.dlg2 = PredictDialog()
self.actions = []
self.menu = self.tr(u'&STeAMM')
self.toolbar = self.iface.addToolBar(u'STeAMM')
self.toolbar.setObjectName(u'STeAMM')
def tr(self, message):
return QCoreApplication.translate('STeAMM', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
icon_path1 = ':/plugins/STeAMM/icon_process.png'
icon_path2 = ':/plugins/STeAMM/icon_predict.png'
self.add_action(
icon_path1,
text=self.tr(u'Get MODIS Swaths'),
callback=self.run1,
parent=self.iface.mainWindow())
self.add_action(
icon_path2,
text=self.tr(u'Predict Stream Temperatures'),
callback=self.run2,
parent=self.iface.mainWindow()
)
def unload(self):
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&STeAMM'),
action)
self.iface.removeToolBarIcon(action)
del self.toolbar
def run1(self):
self.dlg1.show()
result = self.dlg1.exec_()
if result:
process.main()
pass
def run2(self):
self.dlg2.show()
result = self.dlg2.exec_()
if result:
predict.main()
pass | true | true |
f7fd330705a8b0de369616649805c50bac97b1e4 | 237 | py | Python | BOJ/17000~17999/17600~17699/17626.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/17000~17999/17600~17699/17626.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/17000~17999/17600~17699/17626.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | from math import sqrt
n = int(input())
L = [i*i for i in range(1,int(sqrt(n))+1)]
D = [500000]*100000
for i in L:
D[i] = 1
for i in range(1, n+1):
for j in L:
if D[i+j] > D[i] +1:
D[i+j] = D[i]+1
print(D[n]) | 18.230769 | 42 | 0.489451 | from math import sqrt
n = int(input())
L = [i*i for i in range(1,int(sqrt(n))+1)]
D = [500000]*100000
for i in L:
D[i] = 1
for i in range(1, n+1):
for j in L:
if D[i+j] > D[i] +1:
D[i+j] = D[i]+1
print(D[n]) | true | true |
f7fd3322128ae9745272ec7e23bb6d94e2dbe6cd | 858 | py | Python | test/win/gyptest-cl-pdbname.py | MIPS/external-chromium_org-tools-gyp | 1cf7b53d022d00310b738b24a51dc98d6ea3eb00 | [
"BSD-3-Clause"
] | 11 | 2015-01-19T22:09:14.000Z | 2019-10-03T21:45:31.000Z | test/win/gyptest-cl-pdbname.py | MIPS/external-chromium_org-tools-gyp | 1cf7b53d022d00310b738b24a51dc98d6ea3eb00 | [
"BSD-3-Clause"
] | null | null | null | test/win/gyptest-cl-pdbname.py | MIPS/external-chromium_org-tools-gyp | 1cf7b53d022d00310b738b24a51dc98d6ea3eb00 | [
"BSD-3-Clause"
] | 16 | 2015-01-08T01:47:24.000Z | 2022-02-25T06:06:06.000Z | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure pdb is named as expected (shared between .cc files).
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('pdbname.gyp', chdir=CHDIR)
test.build('pdbname.gyp', test.ALL, chdir=CHDIR)
# Confirm that the default behaviour is to name the .pdb per-target (rather
# than per .cc file).
test.built_file_must_exist('obj/test_pdbname.pdb', chdir=CHDIR)
# Confirm that there should be a .pdb alongside the executable.
test.built_file_must_exist('test_pdbname.exe', chdir=CHDIR)
test.built_file_must_exist('test_pdbname.exe.pdb', chdir=CHDIR)
test.pass_test()
| 27.677419 | 77 | 0.729604 |
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('pdbname.gyp', chdir=CHDIR)
test.build('pdbname.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_exist('obj/test_pdbname.pdb', chdir=CHDIR)
test.built_file_must_exist('test_pdbname.exe', chdir=CHDIR)
test.built_file_must_exist('test_pdbname.exe.pdb', chdir=CHDIR)
test.pass_test()
| true | true |
f7fd33518c45f346fde500b2bf4682bf46f632ff | 2,795 | py | Python | dash_coreui_components/appsidebarfooter.py | sourcewerk/dash-coreui-components | d402eb1dccb2ed9bb52f98edfa3f880ec1eb8b95 | [
"MIT"
] | 11 | 2019-03-30T14:02:05.000Z | 2021-11-12T11:25:02.000Z | dash_coreui_components/appsidebarfooter.py | sourcewerk/dash-coreui-components | d402eb1dccb2ed9bb52f98edfa3f880ec1eb8b95 | [
"MIT"
] | 6 | 2019-01-30T22:42:10.000Z | 2019-10-29T13:01:02.000Z | dash_coreui_components/appsidebarfooter.py | sourcewerk/dash-coreui-components | d402eb1dccb2ed9bb52f98edfa3f880ec1eb8b95 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class appsidebarfooter(Component):
"""A appsidebarfooter component.
CoreUI sidebar footer component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children.
- id (string; optional): The ID used to identify this component in Dash callbacks, defaults to `appsidebarfooter`.
- className (string; optional): The CSS class name, defaults to `sidebar-footer`.
- tag (string; optional): The HTML tag, defaults to `div`.
Available events: """
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, className=Component.UNDEFINED, tag=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'tag']
self._type = 'appsidebarfooter'
self._namespace = 'dash_coreui_components'
self._valid_wildcard_attributes = []
self.available_events = []
self.available_properties = ['children', 'id', 'className', 'tag']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(appsidebarfooter, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('appsidebarfooter(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'appsidebarfooter(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
| 46.583333 | 128 | 0.572451 |
from dash.development.base_component import Component, _explicitize_args
class appsidebarfooter(Component):
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, className=Component.UNDEFINED, tag=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'tag']
self._type = 'appsidebarfooter'
self._namespace = 'dash_coreui_components'
self._valid_wildcard_attributes = []
self.available_events = []
self.available_properties = ['children', 'id', 'className', 'tag']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs)
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(appsidebarfooter, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('appsidebarfooter(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'appsidebarfooter(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
| true | true |
f7fd33a3787cdc760135ca676cc8bda1a0aa071d | 2,265 | py | Python | wacz/util.py | edsu/wacz | 302003a5d1757594d54da14adb2cc5595a6c7919 | [
"MIT"
] | null | null | null | wacz/util.py | edsu/wacz | 302003a5d1757594d54da14adb2cc5595a6c7919 | [
"MIT"
] | null | null | null | wacz/util.py | edsu/wacz | 302003a5d1757594d54da14adb2cc5595a6c7919 | [
"MIT"
] | null | null | null | import hashlib, datetime, json
from warcio.timeutils import iso_date_to_timestamp
import pkg_resources
WACZ_VERSION = "1.1.1"
def check_http_and_https(url, ts, pages_dict):
"""Checks for http and https versions of the passed url
in the pages dict
:param url to check, pages_dict the user passed
:returns: True or False depending on if a match was found
:rtype: boolean
"""
url_body = url.split(":")[1]
checks = [
f"http:{url_body}",
f"https:{url_body}",
f"{ts}/http:{url_body}",
f"{ts}/https:{url_body}",
]
for check in checks:
if check in pages_dict:
return check
return ""
def get_py_wacz_version():
"""Get version of the py-wacz package"""
return pkg_resources.get_distribution("wacz").version
def support_hash_file(hash_type, data):
"""Hashes the passed content using sha256 or md5"""
if hash_type == "sha256":
return "sha256:%s" % hashlib.sha256(data).hexdigest()
if hash_type == "md5":
return "md5:%s" % hashlib.md5(data).hexdigest()
def construct_passed_pages_dict(passed_content):
"""Creates a dictionary of the passed pages with the url as the key or ts/url if ts is present and the title and text as the values if they have been passed"""
passed_pages_dict = {}
for i in range(0, len(passed_content)):
# Skip the file's header if it's been set
header = json.loads(passed_content[i])
if "format" not in header:
pages_dict = dict(header)
url = pages_dict.pop("url", "")
# Set the default key as url
key = url
# If timestamp is present overwrite the key to be 'ts/url'
if "ts" in pages_dict:
key = iso_date_to_timestamp(pages_dict.pop("ts")) + "/" + url
# Add the key to the dictionary with remaining data
passed_pages_dict[key] = pages_dict
return passed_pages_dict
def now():
"""Returns the current time"""
return tuple(datetime.datetime.utcnow().timetuple()[:6])
def validateJSON(jsonData):
"""Attempts to validate a string as json"""
try:
json.loads(jsonData)
except ValueError as err:
return False
return True
| 29.038462 | 163 | 0.633996 | import hashlib, datetime, json
from warcio.timeutils import iso_date_to_timestamp
import pkg_resources
WACZ_VERSION = "1.1.1"
def check_http_and_https(url, ts, pages_dict):
url_body = url.split(":")[1]
checks = [
f"http:{url_body}",
f"https:{url_body}",
f"{ts}/http:{url_body}",
f"{ts}/https:{url_body}",
]
for check in checks:
if check in pages_dict:
return check
return ""
def get_py_wacz_version():
return pkg_resources.get_distribution("wacz").version
def support_hash_file(hash_type, data):
if hash_type == "sha256":
return "sha256:%s" % hashlib.sha256(data).hexdigest()
if hash_type == "md5":
return "md5:%s" % hashlib.md5(data).hexdigest()
def construct_passed_pages_dict(passed_content):
passed_pages_dict = {}
for i in range(0, len(passed_content)):
header = json.loads(passed_content[i])
if "format" not in header:
pages_dict = dict(header)
url = pages_dict.pop("url", "")
key = url
if "ts" in pages_dict:
key = iso_date_to_timestamp(pages_dict.pop("ts")) + "/" + url
passed_pages_dict[key] = pages_dict
return passed_pages_dict
def now():
return tuple(datetime.datetime.utcnow().timetuple()[:6])
def validateJSON(jsonData):
try:
json.loads(jsonData)
except ValueError as err:
return False
return True
| true | true |
f7fd33a87c0c3ee6438ec38e38c7e4e9fe992d50 | 1,061 | py | Python | tests/unit/fake_data_root/kubernetes/var/lib/juju/agents/unit-kubernetes-master-0/charm/reactive/cdk_service_kicker.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 6 | 2021-10-01T19:46:14.000Z | 2022-03-31T17:05:08.000Z | tests/unit/fake_data_root/kubernetes/var/lib/juju/agents/unit-kubernetes-master-0/charm/reactive/cdk_service_kicker.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 111 | 2021-10-01T18:18:17.000Z | 2022-03-29T12:23:20.000Z | tests/unit/fake_data_root/kubernetes/var/lib/juju/agents/unit-kubernetes-master-0/charm/reactive/cdk_service_kicker.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 10 | 2021-09-29T14:47:54.000Z | 2022-03-18T14:52:16.000Z | import os
import subprocess
from charms import layer
from charms.reactive import hook, when_not, remove_state, set_state
from charmhelpers.core.templating import render
@hook('upgrade-charm')
def upgrade_charm():
remove_state('cdk-service-kicker.installed')
@when_not('cdk-service-kicker.installed')
def install_cdk_service_kicker():
''' Installs the cdk-service-kicker service. Workaround for
https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/357
'''
source = 'cdk-service-kicker'
dest = '/usr/bin/cdk-service-kicker'
services = layer.options('cdk-service-kicker').get('services')
context = {'services': ' '.join(services)}
render(source, dest, context)
os.chmod('/usr/bin/cdk-service-kicker', 0o775)
source = 'cdk-service-kicker.service'
dest = '/etc/systemd/system/cdk-service-kicker.service'
context = {}
render(source, dest, context)
command = ['systemctl', 'enable', 'cdk-service-kicker']
subprocess.check_call(command)
set_state('cdk-service-kicker.installed')
| 32.151515 | 76 | 0.719133 | import os
import subprocess
from charms import layer
from charms.reactive import hook, when_not, remove_state, set_state
from charmhelpers.core.templating import render
@hook('upgrade-charm')
def upgrade_charm():
remove_state('cdk-service-kicker.installed')
@when_not('cdk-service-kicker.installed')
def install_cdk_service_kicker():
source = 'cdk-service-kicker'
dest = '/usr/bin/cdk-service-kicker'
services = layer.options('cdk-service-kicker').get('services')
context = {'services': ' '.join(services)}
render(source, dest, context)
os.chmod('/usr/bin/cdk-service-kicker', 0o775)
source = 'cdk-service-kicker.service'
dest = '/etc/systemd/system/cdk-service-kicker.service'
context = {}
render(source, dest, context)
command = ['systemctl', 'enable', 'cdk-service-kicker']
subprocess.check_call(command)
set_state('cdk-service-kicker.installed')
| true | true |
f7fd33bfee01c8d286fed3f10eb65a1ef1f8c79f | 1,304 | py | Python | consultas/consultas/dump consultas/doctype/consulta_privada/consulta_privada.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | consultas/consultas/dump consultas/doctype/consulta_privada/consulta_privada.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | consultas/consultas/dump consultas/doctype/consulta_privada/consulta_privada.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Lewin Villar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.naming import make_autoname
class ConsultaPrivada(Document):
def before_insert(self):
self.id=make_autoname("CLP-.##########")
def guardar_lista_de_precio(self):
for row in self.pruebas:
nombre_lista_precio = self.obtener_lista_de_precio(self.medico, row.prueba)
if nombre_lista_precio:
doc = frappe.get_doc("Lista Precio", nombre_lista_precio)
doc.monto = row.diferencia
doc.save()
else:
doc = frappe.get_doc({
"doctype": "Lista Precio",
"prueba": row.prueba,
"tipo_lista": "Medico",
"ars_medico": self.medico,
"monto": row.diferencia
})
doc.insert()
#frappe.msgprint("Se ha agregado una Prueba nueva a la lista de {0}".format(self.medico))
self.new_inserted = True
return self.new_inserted
def obtener_lista_de_precio(self, medico, prueba):
result = frappe.db.sql("""SELECT name
FROM `tabLista Precio`
WHERE ars_medico = '{0}'
AND prueba = '{1}'"""
.format(medico, prueba),
as_dict=True)
if result:
return result[0].name
return None | 27.166667 | 93 | 0.687117 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.naming import make_autoname
class ConsultaPrivada(Document):
def before_insert(self):
self.id=make_autoname("CLP-.##########")
def guardar_lista_de_precio(self):
for row in self.pruebas:
nombre_lista_precio = self.obtener_lista_de_precio(self.medico, row.prueba)
if nombre_lista_precio:
doc = frappe.get_doc("Lista Precio", nombre_lista_precio)
doc.monto = row.diferencia
doc.save()
else:
doc = frappe.get_doc({
"doctype": "Lista Precio",
"prueba": row.prueba,
"tipo_lista": "Medico",
"ars_medico": self.medico,
"monto": row.diferencia
})
doc.insert()
self.new_inserted = True
return self.new_inserted
def obtener_lista_de_precio(self, medico, prueba):
result = frappe.db.sql("""SELECT name
FROM `tabLista Precio`
WHERE ars_medico = '{0}'
AND prueba = '{1}'"""
.format(medico, prueba),
as_dict=True)
if result:
return result[0].name
return None | true | true |
f7fd34a10c072c220bfe867aafd4ba9e7ed550a3 | 4,991 | py | Python | tests/test_basics.py | OMAS-IIIF/cserve | 8932ed36fa6f1935b3db97ed556f876e2e459c4b | [
"MIT"
] | 1 | 2021-06-24T06:10:07.000Z | 2021-06-24T06:10:07.000Z | tests/test_basics.py | OMAS-IIIF/cserve | 8932ed36fa6f1935b3db97ed556f876e2e459c4b | [
"MIT"
] | null | null | null | tests/test_basics.py | OMAS-IIIF/cserve | 8932ed36fa6f1935b3db97ed556f876e2e459c4b | [
"MIT"
] | null | null | null | import pytest
import os
def test_ping(manager):
"""
Test the ping route (which is programmed in C++)
:param manager: defined in conftest.py
:return: None
"""
response = manager.get('ping')
assert response.text == 'PONG'
assert response.headers['Content-Length'] == '4'
def test_get_html(manager):
"""
Test getting an ordinary HTML file
:param manager: defined in conftest.py
:return: None
"""
with open('./testserver/docroot/test.html') as inf:
str = inf.read()
response = manager.get('/test.html')
assert response.text == str
assert response.headers['Content-Type'] == 'text/html; charset=utf-8'
def test_get_not_found(manager):
"""
Testing for requesting non-existing file
:param manager: defined in conftest.py
:return: None
"""
with pytest.raises(Exception) as e_info:
resource = manager.get('/gaga.html')
def test_sget_html(manager):
"""
testing https access
:param manager: defined in conftest.py
:return: None
"""
with open('./testserver/docroot/test.html') as inf:
str = inf.read()
response = manager.sget('/test.html')
assert response.text == str
assert response.headers['Content-Type'] == 'text/html; charset=utf-8'
def test_get_csv(manager):
"""
Testing access to CSV file
:param manager: defined in conftest.py
:return: None
"""
with open('./testserver/docroot/test.csv') as inf:
str = inf.read()
response = manager.get('/test.csv')
assert response.text == str
assert response.headers['Content-Type'] == 'text/csv'
def test_sget_csv(manager):
"""
Getting CSV file with https
:param manager: defined in conftest.py
:return: None
"""
with open('./testserver/docroot/test.csv') as inf:
str = inf.read()
response = manager.sget('/test.csv')
assert response.text == str
assert response.headers['Content-Type'] == 'text/csv'
def test_get_range(manager):
"""
Testing the range header to get part of a file
:param manager: defined in conftest.py
:return: defined in conftest.py
"""
response = manager.get('/range.dat', headers={"Range": "bytes=5-14"})
assert len(response.text) == 10
assert response.text == '456789B012'
assert response.headers['Content-Type'] == 'text/plain'
def test_elua(manager):
"""
Testing proper elua processing (embedded Lua)
:param manager: defined in conftest.py
:return: None
"""
str = r"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>LuaTest</title>
</head>
<body>
Hello from Lua!
</body>
</html>"""
response = manager.get('/luatest.elua')
assert response.text == str
assert response.headers['Content-Type'] == 'text/html; charset=utf-8'
def test_servervariables(manager):
"""
Testing all config and server variables provided from the server to Lua
:param manager: defined in conftest.py
:return: None
"""
variables = manager.get_json('servervariables', params={'param': 'all'})
assert variables.get('status') == 'OK'
assert variables.get('config') is not None
assert variables['config'].get('port') == 8080
assert variables['config'].get('scriptdir') == './testserver/scripts'
assert variables['config'].get('docroot') == './testserver/docroot'
assert variables['config'].get('keep_alive') == 5
assert variables['config'].get('nthreads') == 4
assert variables['config'].get('max_post_size') == '1MB'
assert variables.get('server') is not None
assert variables['server'].get('method') == 'GET'
assert variables['server'].get('host') == 'localhost:8080'
assert variables['server'].get('client_ip') == '127.0.0.1'
assert variables['server'].get('client_port')
assert variables['server'].get('uri') == '/servervariables'
assert variables['server'].get('secure') == False
assert variables['server'].get('get') is not None
assert variables['server']['get'].get('param') == 'all'
assert variables['server'].get('has_openssl') is not None
if variables['server']['has_openssl']:
assert variables['config'].get('ssl_port') == 8443
assert variables['server'].get('header') is not None
assert variables['server']['header'].get('accept') == '*/*'
assert variables['server']['header'].get('connection') == 'keep-alive'
assert variables['server']['header'].get('host') == 'localhost:8080'
def test_servervariables_secure(manager):
variables = manager.sget_json('servervariables')
assert variables.get('status') == 'OK'
assert variables['server'].get('secure') is True
def test_cookies(manager):
variables = manager.get_json('servervariables', cookies={"keks": "abcdefg"})
assert variables.get('status') == 'OK'
assert variables.get('server')
assert variables['server'].get('cookies')
assert variables['server']['cookies'].get('keks') == 'abcdefg'
| 32.409091 | 80 | 0.648567 | import pytest
import os
def test_ping(manager):
response = manager.get('ping')
assert response.text == 'PONG'
assert response.headers['Content-Length'] == '4'
def test_get_html(manager):
with open('./testserver/docroot/test.html') as inf:
str = inf.read()
response = manager.get('/test.html')
assert response.text == str
assert response.headers['Content-Type'] == 'text/html; charset=utf-8'
def test_get_not_found(manager):
with pytest.raises(Exception) as e_info:
resource = manager.get('/gaga.html')
def test_sget_html(manager):
with open('./testserver/docroot/test.html') as inf:
str = inf.read()
response = manager.sget('/test.html')
assert response.text == str
assert response.headers['Content-Type'] == 'text/html; charset=utf-8'
def test_get_csv(manager):
with open('./testserver/docroot/test.csv') as inf:
str = inf.read()
response = manager.get('/test.csv')
assert response.text == str
assert response.headers['Content-Type'] == 'text/csv'
def test_sget_csv(manager):
with open('./testserver/docroot/test.csv') as inf:
str = inf.read()
response = manager.sget('/test.csv')
assert response.text == str
assert response.headers['Content-Type'] == 'text/csv'
def test_get_range(manager):
response = manager.get('/range.dat', headers={"Range": "bytes=5-14"})
assert len(response.text) == 10
assert response.text == '456789B012'
assert response.headers['Content-Type'] == 'text/plain'
def test_elua(manager):
str = r"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>LuaTest</title>
</head>
<body>
Hello from Lua!
</body>
</html>"""
response = manager.get('/luatest.elua')
assert response.text == str
assert response.headers['Content-Type'] == 'text/html; charset=utf-8'
def test_servervariables(manager):
variables = manager.get_json('servervariables', params={'param': 'all'})
assert variables.get('status') == 'OK'
assert variables.get('config') is not None
assert variables['config'].get('port') == 8080
assert variables['config'].get('scriptdir') == './testserver/scripts'
assert variables['config'].get('docroot') == './testserver/docroot'
assert variables['config'].get('keep_alive') == 5
assert variables['config'].get('nthreads') == 4
assert variables['config'].get('max_post_size') == '1MB'
assert variables.get('server') is not None
assert variables['server'].get('method') == 'GET'
assert variables['server'].get('host') == 'localhost:8080'
assert variables['server'].get('client_ip') == '127.0.0.1'
assert variables['server'].get('client_port')
assert variables['server'].get('uri') == '/servervariables'
assert variables['server'].get('secure') == False
assert variables['server'].get('get') is not None
assert variables['server']['get'].get('param') == 'all'
assert variables['server'].get('has_openssl') is not None
if variables['server']['has_openssl']:
assert variables['config'].get('ssl_port') == 8443
assert variables['server'].get('header') is not None
assert variables['server']['header'].get('accept') == '*/*'
assert variables['server']['header'].get('connection') == 'keep-alive'
assert variables['server']['header'].get('host') == 'localhost:8080'
def test_servervariables_secure(manager):
variables = manager.sget_json('servervariables')
assert variables.get('status') == 'OK'
assert variables['server'].get('secure') is True
def test_cookies(manager):
variables = manager.get_json('servervariables', cookies={"keks": "abcdefg"})
assert variables.get('status') == 'OK'
assert variables.get('server')
assert variables['server'].get('cookies')
assert variables['server']['cookies'].get('keks') == 'abcdefg'
| true | true |
f7fd34d97ad8317f1c22fd94f7ae058db7818685 | 1,856 | py | Python | final/plot/plot_2D_distribution.py | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f | [
"Apache-2.0"
] | 22 | 2021-06-04T13:01:08.000Z | 2022-02-18T13:19:46.000Z | final/plot/plot_2D_distribution.py | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f | [
"Apache-2.0"
] | null | null | null | final/plot/plot_2D_distribution.py | YihaoChan/2021-Tianchi-GAIIC-Track1-Rank-3 | a79a8ae4bc0f8b2662f71df4caaa7fa382735f9f | [
"Apache-2.0"
] | 2 | 2021-06-06T09:41:08.000Z | 2021-06-09T01:05:10.000Z | import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import os
def plot(set_type, color):
df = None
title_name = None
if set_type == 'train':
df = pd.read_csv("./datasets/track1_round1_train_20210222.csv", header=None)
df.columns = ['report_ID', 'description', 'region']
title_name = 'Train Set'
elif set_type == 'test_a':
df = pd.read_csv("./datasets/track1_round1_testA_20210222.csv", header=None)
df.columns = ['report_ID', 'description']
title_name = 'Test Set A'
def get_2D_results():
X = [list(map(int, i.strip('|').strip().split())) for i in df['description'].values]
X = np.array(
[list(r) for r in tf.keras.preprocessing.sequence.pad_sequences(X, maxlen=100, padding='post', value=0)]
)
pca = PCA(n_components=2)
res = pca.fit_transform(X)
return res
def show(data):
x = [k for k, v in data]
y = [v for k, v in data]
plt.style.use('seaborn')
# 蓝色:#3E6BF2 深蓝:#3A2885 紫色:#8273B0 祖母绿:#009298 中蓝:#426EB4
plt.scatter(x, y, c=color)
plt.xticks([])
plt.yticks([])
plt.title(
'Distribution of Description for %s' % title_name,
fontdict=dict(family='Times New Roman', weight='bold')
)
plt.savefig('%s/distribution_%s.jpg' % (save_fig_path, set_type), dpi=1000, transparent=True)
plt.close()
two_dimension_results = get_2D_results()
show(two_dimension_results)
if __name__ == '__main__':
save_fig_path = './fig'
if not os.path.exists(save_fig_path):
os.makedirs(save_fig_path)
plot('train', '#009298')
plot('test_a', '#009298')
| 30.42623 | 117 | 0.589978 | import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import os
def plot(set_type, color):
df = None
title_name = None
if set_type == 'train':
df = pd.read_csv("./datasets/track1_round1_train_20210222.csv", header=None)
df.columns = ['report_ID', 'description', 'region']
title_name = 'Train Set'
elif set_type == 'test_a':
df = pd.read_csv("./datasets/track1_round1_testA_20210222.csv", header=None)
df.columns = ['report_ID', 'description']
title_name = 'Test Set A'
def get_2D_results():
X = [list(map(int, i.strip('|').strip().split())) for i in df['description'].values]
X = np.array(
[list(r) for r in tf.keras.preprocessing.sequence.pad_sequences(X, maxlen=100, padding='post', value=0)]
)
pca = PCA(n_components=2)
res = pca.fit_transform(X)
return res
def show(data):
x = [k for k, v in data]
y = [v for k, v in data]
plt.style.use('seaborn')
% title_name,
fontdict=dict(family='Times New Roman', weight='bold')
)
plt.savefig('%s/distribution_%s.jpg' % (save_fig_path, set_type), dpi=1000, transparent=True)
plt.close()
two_dimension_results = get_2D_results()
show(two_dimension_results)
if __name__ == '__main__':
save_fig_path = './fig'
if not os.path.exists(save_fig_path):
os.makedirs(save_fig_path)
plot('train', '#009298')
plot('test_a', '#009298')
| true | true |
f7fd354d7fdd5c1519e198b3978bfca58df65d8d | 1,611 | py | Python | py/test/selenium/webdriver/remote/test_remote_interactions.py | chromium-googlesource-mirror/selenium | fcf26da81afa5d3e8edfc776f558eebf2e7d28b3 | [
"Apache-2.0"
] | null | null | null | py/test/selenium/webdriver/remote/test_remote_interactions.py | chromium-googlesource-mirror/selenium | fcf26da81afa5d3e8edfc776f558eebf2e7d28b3 | [
"Apache-2.0"
] | null | null | null | py/test/selenium/webdriver/remote/test_remote_interactions.py | chromium-googlesource-mirror/selenium | fcf26da81afa5d3e8edfc776f558eebf2e7d28b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
from selenium.test.selenium.webdriver.common import interactions_tests
from selenium import webdriver
import pytest
from selenium.test.selenium.common import utils
def setup_module(module):
utils.start_server(module)
webserver = SimpleWebServer()
webserver.start()
RemoteAdvancedUserInteractionTest.webserver = webserver
RemoteAdvancedUserInteractionTest.driver = \
webdriver.Remote(desired_capabilities=webdriver.DesiredCapabilities.FIREFOX)
@pytest.mark.skipif('sys.platform == "darwin"')
class RemoteAdvancedUserInteractionTest(interactions_tests.AdvancedUserInteractionTest):
pass
def teardown_module(module):
try:
RemoteAdvancedUserInteractionTest.driver.quit()
except AttributeError:
pass
try:
RemoteAdvancedUserInteractionTest.webserver.stop()
except AttributeError:
pass
utils.stop_server(module)
| 35.021739 | 88 | 0.774674 |
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
from selenium.test.selenium.webdriver.common import interactions_tests
from selenium import webdriver
import pytest
from selenium.test.selenium.common import utils
def setup_module(module):
utils.start_server(module)
webserver = SimpleWebServer()
webserver.start()
RemoteAdvancedUserInteractionTest.webserver = webserver
RemoteAdvancedUserInteractionTest.driver = \
webdriver.Remote(desired_capabilities=webdriver.DesiredCapabilities.FIREFOX)
@pytest.mark.skipif('sys.platform == "darwin"')
class RemoteAdvancedUserInteractionTest(interactions_tests.AdvancedUserInteractionTest):
pass
def teardown_module(module):
try:
RemoteAdvancedUserInteractionTest.driver.quit()
except AttributeError:
pass
try:
RemoteAdvancedUserInteractionTest.webserver.stop()
except AttributeError:
pass
utils.stop_server(module)
| true | true |
f7fd358c0dd3696a4d9617da9d7be40ad188b5b9 | 15,856 | py | Python | saleor/graphql/order/mutations/draft_orders.py | saurabhsingla15/himichain | f7e446c89951a60383632907fed14bde5c3ad6b5 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/order/mutations/draft_orders.py | saurabhsingla15/himichain | f7e446c89951a60383632907fed14bde5c3ad6b5 | [
"CC-BY-4.0"
] | 16 | 2020-02-12T03:06:29.000Z | 2022-02-10T20:29:25.000Z | saleor/graphql/order/mutations/draft_orders.py | saurabhsingla15/himichain | f7e446c89951a60383632907fed14bde5c3ad6b5 | [
"CC-BY-4.0"
] | null | null | null | import graphene
from django.core.exceptions import ValidationError
from graphene.types import InputObjectType
from ....account.models import User
from ....core.exceptions import InsufficientStock
from ....core.taxes import zero_taxed_money
from ....order import OrderStatus, events, models
from ....order.actions import order_created
from ....order.error_codes import OrderErrorCode
from ....order.utils import (
add_variant_to_order,
allocate_stock,
change_order_line_quantity,
delete_order_line,
recalculate_order,
update_order_prices,
)
from ...account.i18n import I18nMixin
from ...account.types import AddressInput
from ...core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ...core.scalars import Decimal
from ...core.types.common import OrderError
from ...product.types import ProductVariant
from ..types import Order, OrderLine
from ..utils import validate_draft_order
class OrderLineInput(graphene.InputObjectType):
quantity = Decimal(
description="Number of variant items ordered.", required=True
)
class OrderLineCreateInput(OrderLineInput):
variant_id = graphene.ID(
description="Product variant ID.", name="variantId", required=True
)
class DraftOrderInput(InputObjectType):
billing_address = AddressInput(description="Billing address of the customer.")
user = graphene.ID(
descripton="Customer associated with the draft order.", name="user"
)
user_email = graphene.String(description="Email address of the customer.")
discount = Decimal(description="Discount amount for the order.")
shipping_address = AddressInput(description="Shipping address of the customer.")
shipping_method = graphene.ID(
description="ID of a selected shipping method.", name="shippingMethod"
)
voucher = graphene.ID(
description="ID of the voucher associated with the order.", name="voucher"
)
class DraftOrderCreateInput(DraftOrderInput):
lines = graphene.List(
OrderLineCreateInput,
description=(
"Variant line input consisting of variant ID and quantity of products."
),
)
class DraftOrderCreate(ModelMutation, I18nMixin):
class Arguments:
input = DraftOrderCreateInput(
required=True, description="Fields required to create an order."
)
class Meta:
description = "Creates a new draft order."
model = models.Order
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, instance, data):
shipping_address = data.pop("shipping_address", None)
billing_address = data.pop("billing_address", None)
cleaned_input = super().clean_input(info, instance, data)
lines = data.pop("lines", None)
if lines:
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variants", ProductVariant)
quantities = [line.get("quantity") for line in lines]
cleaned_input["variants"] = variants
cleaned_input["quantities"] = quantities
cleaned_input["status"] = OrderStatus.DRAFT
display_gross_prices = info.context.site.settings.display_gross_prices
cleaned_input["display_gross_prices"] = display_gross_prices
# Set up default addresses if possible
user = cleaned_input.get("user")
if user and not shipping_address:
cleaned_input["shipping_address"] = user.default_shipping_address
if user and not billing_address:
cleaned_input["billing_address"] = user.default_billing_address
if shipping_address:
shipping_address = cls.validate_address(
shipping_address, instance=instance.shipping_address
)
shipping_address = info.context.extensions.change_user_address(
shipping_address, "shipping", user=instance
)
cleaned_input["shipping_address"] = shipping_address
if billing_address:
billing_address = cls.validate_address(
billing_address, instance=instance.billing_address
)
billing_address = info.context.extensions.change_user_address(
billing_address, "billing", user=instance
)
cleaned_input["billing_address"] = billing_address
return cleaned_input
@staticmethod
def _save_addresses(info, instance: models.Order, cleaned_input):
# Create the draft creation event
shipping_address = cleaned_input.get("shipping_address")
if shipping_address:
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
billing_address = cleaned_input.get("billing_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
@staticmethod
def _save_lines(info, instance, quantities, variants):
if variants and quantities:
lines = []
for variant, quantity in zip(variants, quantities):
lines.append((quantity, variant))
add_variant_to_order(
instance,
variant,
quantity,
allow_overselling=True,
track_inventory=False,
)
# New event
events.draft_order_added_products_event(
order=instance, user=info.context.user, order_lines=lines
)
@classmethod
def _commit_changes(cls, info, instance, cleaned_input):
created = instance.pk
super().save(info, instance, cleaned_input)
# Create draft created event if the instance is from scratch
if not created:
events.draft_order_created_event(order=instance, user=info.context.user)
instance.save(update_fields=["billing_address", "shipping_address"])
@classmethod
def _refresh_lines_unit_price(cls, info, instance, cleaned_input, new_instance):
if new_instance:
# It is a new instance, all new lines have already updated prices.
return
shipping_address = cleaned_input.get("shipping_address")
if shipping_address and instance.is_shipping_required():
update_order_prices(instance, info.context.discounts)
billing_address = cleaned_input.get("billing_address")
if billing_address and not instance.is_shipping_required():
update_order_prices(instance, info.context.discounts)
@classmethod
def save(cls, info, instance, cleaned_input):
new_instance = not bool(instance.pk)
# Process addresses
cls._save_addresses(info, instance, cleaned_input)
# Save any changes create/update the draft
cls._commit_changes(info, instance, cleaned_input)
# Process any lines to add
cls._save_lines(
info,
instance,
cleaned_input.get("quantities"),
cleaned_input.get("variants"),
)
cls._refresh_lines_unit_price(info, instance, cleaned_input, new_instance)
# Post-process the results
recalculate_order(instance)
class DraftOrderUpdate(DraftOrderCreate):
class Arguments:
id = graphene.ID(required=True, description="ID of an order to update.")
input = DraftOrderInput(
required=True, description="Fields required to update an order."
)
class Meta:
description = "Updates a draft order."
model = models.Order
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
class DraftOrderDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a draft order to delete.")
class Meta:
description = "Deletes a draft order."
model = models.Order
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
class DraftOrderComplete(BaseMutation):
order = graphene.Field(Order, description="Completed order.")
class Arguments:
id = graphene.ID(
required=True, description="ID of the order that will be completed."
)
class Meta:
description = "Completes creating an order."
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def update_user_fields(cls, order):
if order.user:
order.user_email = order.user.email
elif order.user_email:
try:
order.user = User.objects.get(email=order.user_email)
except User.DoesNotExist:
order.user = None
@classmethod
def perform_mutation(cls, _root, info, id):
order = cls.get_node_or_error(info, id, only_type=Order)
validate_draft_order(order)
cls.update_user_fields(order)
order.status = OrderStatus.UNFULFILLED
if not order.is_shipping_required():
order.shipping_method_name = None
order.shipping_price = zero_taxed_money()
if order.shipping_address:
order.shipping_address.delete()
order.save()
oversold_items = []
for line in order:
try:
line.variant.check_quantity(line.quantity)
allocate_stock(line.variant, line.quantity)
except InsufficientStock:
allocate_stock(line.variant, line.variant.quantity_available)
oversold_items.append(str(line))
order_created(order, user=info.context.user, from_draft=True)
if oversold_items:
events.draft_order_oversold_items_event(
order=order, user=info.context.user, oversold_items=oversold_items
)
return DraftOrderComplete(order=order)
class DraftOrderLinesCreate(BaseMutation):
order = graphene.Field(Order, description="A related draft order.")
order_lines = graphene.List(
graphene.NonNull(OrderLine), description="List of newly added order lines."
)
class Arguments:
id = graphene.ID(
required=True, description="ID of the draft order to add the lines to."
)
input = graphene.List(
OrderLineCreateInput,
required=True,
description="Fields required to add order lines.",
)
class Meta:
description = "Create order lines for a draft order."
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
order = cls.get_node_or_error(info, data.get("id"), only_type=Order)
if order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
lines_to_add = []
for input_line in data.get("input"):
variant_id = input_line["variant_id"]
variant = cls.get_node_or_error(
info, variant_id, "variant_id", only_type=ProductVariant
)
quantity = input_line["quantity"]
if quantity > 0:
if variant:
lines_to_add.append((quantity, variant))
else:
raise ValidationError(
{
"quantity": ValidationError(
"Ensure this value is greater than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
# Add the lines
lines = [
add_variant_to_order(order, variant, quantity, allow_overselling=True)
for quantity, variant in lines_to_add
]
# Create the event
events.draft_order_added_products_event(
order=order, user=info.context.user, order_lines=lines_to_add
)
recalculate_order(order)
return DraftOrderLinesCreate(order=order, order_lines=lines)
class DraftOrderLineDelete(BaseMutation):
order = graphene.Field(Order, description="A related draft order.")
order_line = graphene.Field(
OrderLine, description="An order line that was deleted."
)
class Arguments:
id = graphene.ID(description="ID of the order line to delete.", required=True)
class Meta:
description = "Deletes an order line from a draft order."
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, id):
line = cls.get_node_or_error(info, id, only_type=OrderLine)
order = line.order
if order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
db_id = line.id
delete_order_line(line)
line.id = db_id
# Create the removal event
events.draft_order_removed_products_event(
order=order, user=info.context.user, order_lines=[(line.quantity, line)]
)
recalculate_order(order)
return DraftOrderLineDelete(order=order, order_line=line)
class DraftOrderLineUpdate(ModelMutation):
order = graphene.Field(Order, description="A related draft order.")
class Arguments:
id = graphene.ID(description="ID of the order line to update.", required=True)
input = OrderLineInput(
required=True, description="Fields required to update an order line."
)
class Meta:
description = "Updates an order line of a draft order."
model = models.OrderLine
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, instance, data):
instance.old_quantity = instance.quantity
cleaned_input = super().clean_input(info, instance, data)
if instance.order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
quantity = data["quantity"]
if quantity <= 0:
raise ValidationError(
{
"quantity": ValidationError(
"Ensure this value is greater than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
change_order_line_quantity(
info.context.user, instance, instance.old_quantity, instance.quantity
)
recalculate_order(instance.order)
@classmethod
def success_response(cls, instance):
response = super().success_response(instance)
response.order = instance.order
return response
| 35.235556 | 86 | 0.627964 | import graphene
from django.core.exceptions import ValidationError
from graphene.types import InputObjectType
from ....account.models import User
from ....core.exceptions import InsufficientStock
from ....core.taxes import zero_taxed_money
from ....order import OrderStatus, events, models
from ....order.actions import order_created
from ....order.error_codes import OrderErrorCode
from ....order.utils import (
add_variant_to_order,
allocate_stock,
change_order_line_quantity,
delete_order_line,
recalculate_order,
update_order_prices,
)
from ...account.i18n import I18nMixin
from ...account.types import AddressInput
from ...core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ...core.scalars import Decimal
from ...core.types.common import OrderError
from ...product.types import ProductVariant
from ..types import Order, OrderLine
from ..utils import validate_draft_order
class OrderLineInput(graphene.InputObjectType):
quantity = Decimal(
description="Number of variant items ordered.", required=True
)
class OrderLineCreateInput(OrderLineInput):
variant_id = graphene.ID(
description="Product variant ID.", name="variantId", required=True
)
class DraftOrderInput(InputObjectType):
billing_address = AddressInput(description="Billing address of the customer.")
user = graphene.ID(
descripton="Customer associated with the draft order.", name="user"
)
user_email = graphene.String(description="Email address of the customer.")
discount = Decimal(description="Discount amount for the order.")
shipping_address = AddressInput(description="Shipping address of the customer.")
shipping_method = graphene.ID(
description="ID of a selected shipping method.", name="shippingMethod"
)
voucher = graphene.ID(
description="ID of the voucher associated with the order.", name="voucher"
)
class DraftOrderCreateInput(DraftOrderInput):
lines = graphene.List(
OrderLineCreateInput,
description=(
"Variant line input consisting of variant ID and quantity of products."
),
)
class DraftOrderCreate(ModelMutation, I18nMixin):
class Arguments:
input = DraftOrderCreateInput(
required=True, description="Fields required to create an order."
)
class Meta:
description = "Creates a new draft order."
model = models.Order
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, instance, data):
shipping_address = data.pop("shipping_address", None)
billing_address = data.pop("billing_address", None)
cleaned_input = super().clean_input(info, instance, data)
lines = data.pop("lines", None)
if lines:
variant_ids = [line.get("variant_id") for line in lines]
variants = cls.get_nodes_or_error(variant_ids, "variants", ProductVariant)
quantities = [line.get("quantity") for line in lines]
cleaned_input["variants"] = variants
cleaned_input["quantities"] = quantities
cleaned_input["status"] = OrderStatus.DRAFT
display_gross_prices = info.context.site.settings.display_gross_prices
cleaned_input["display_gross_prices"] = display_gross_prices
user = cleaned_input.get("user")
if user and not shipping_address:
cleaned_input["shipping_address"] = user.default_shipping_address
if user and not billing_address:
cleaned_input["billing_address"] = user.default_billing_address
if shipping_address:
shipping_address = cls.validate_address(
shipping_address, instance=instance.shipping_address
)
shipping_address = info.context.extensions.change_user_address(
shipping_address, "shipping", user=instance
)
cleaned_input["shipping_address"] = shipping_address
if billing_address:
billing_address = cls.validate_address(
billing_address, instance=instance.billing_address
)
billing_address = info.context.extensions.change_user_address(
billing_address, "billing", user=instance
)
cleaned_input["billing_address"] = billing_address
return cleaned_input
@staticmethod
def _save_addresses(info, instance: models.Order, cleaned_input):
shipping_address = cleaned_input.get("shipping_address")
if shipping_address:
shipping_address.save()
instance.shipping_address = shipping_address.get_copy()
billing_address = cleaned_input.get("billing_address")
if billing_address:
billing_address.save()
instance.billing_address = billing_address.get_copy()
@staticmethod
def _save_lines(info, instance, quantities, variants):
if variants and quantities:
lines = []
for variant, quantity in zip(variants, quantities):
lines.append((quantity, variant))
add_variant_to_order(
instance,
variant,
quantity,
allow_overselling=True,
track_inventory=False,
)
events.draft_order_added_products_event(
order=instance, user=info.context.user, order_lines=lines
)
@classmethod
def _commit_changes(cls, info, instance, cleaned_input):
created = instance.pk
super().save(info, instance, cleaned_input)
if not created:
events.draft_order_created_event(order=instance, user=info.context.user)
instance.save(update_fields=["billing_address", "shipping_address"])
@classmethod
def _refresh_lines_unit_price(cls, info, instance, cleaned_input, new_instance):
if new_instance:
return
shipping_address = cleaned_input.get("shipping_address")
if shipping_address and instance.is_shipping_required():
update_order_prices(instance, info.context.discounts)
billing_address = cleaned_input.get("billing_address")
if billing_address and not instance.is_shipping_required():
update_order_prices(instance, info.context.discounts)
@classmethod
def save(cls, info, instance, cleaned_input):
new_instance = not bool(instance.pk)
cls._save_addresses(info, instance, cleaned_input)
cls._commit_changes(info, instance, cleaned_input)
cls._save_lines(
info,
instance,
cleaned_input.get("quantities"),
cleaned_input.get("variants"),
)
cls._refresh_lines_unit_price(info, instance, cleaned_input, new_instance)
recalculate_order(instance)
class DraftOrderUpdate(DraftOrderCreate):
class Arguments:
id = graphene.ID(required=True, description="ID of an order to update.")
input = DraftOrderInput(
required=True, description="Fields required to update an order."
)
class Meta:
description = "Updates a draft order."
model = models.Order
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
class DraftOrderDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(required=True, description="ID of a draft order to delete.")
class Meta:
description = "Deletes a draft order."
model = models.Order
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
class DraftOrderComplete(BaseMutation):
order = graphene.Field(Order, description="Completed order.")
class Arguments:
id = graphene.ID(
required=True, description="ID of the order that will be completed."
)
class Meta:
description = "Completes creating an order."
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def update_user_fields(cls, order):
if order.user:
order.user_email = order.user.email
elif order.user_email:
try:
order.user = User.objects.get(email=order.user_email)
except User.DoesNotExist:
order.user = None
@classmethod
def perform_mutation(cls, _root, info, id):
order = cls.get_node_or_error(info, id, only_type=Order)
validate_draft_order(order)
cls.update_user_fields(order)
order.status = OrderStatus.UNFULFILLED
if not order.is_shipping_required():
order.shipping_method_name = None
order.shipping_price = zero_taxed_money()
if order.shipping_address:
order.shipping_address.delete()
order.save()
oversold_items = []
for line in order:
try:
line.variant.check_quantity(line.quantity)
allocate_stock(line.variant, line.quantity)
except InsufficientStock:
allocate_stock(line.variant, line.variant.quantity_available)
oversold_items.append(str(line))
order_created(order, user=info.context.user, from_draft=True)
if oversold_items:
events.draft_order_oversold_items_event(
order=order, user=info.context.user, oversold_items=oversold_items
)
return DraftOrderComplete(order=order)
class DraftOrderLinesCreate(BaseMutation):
order = graphene.Field(Order, description="A related draft order.")
order_lines = graphene.List(
graphene.NonNull(OrderLine), description="List of newly added order lines."
)
class Arguments:
id = graphene.ID(
required=True, description="ID of the draft order to add the lines to."
)
input = graphene.List(
OrderLineCreateInput,
required=True,
description="Fields required to add order lines.",
)
class Meta:
description = "Create order lines for a draft order."
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
order = cls.get_node_or_error(info, data.get("id"), only_type=Order)
if order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
lines_to_add = []
for input_line in data.get("input"):
variant_id = input_line["variant_id"]
variant = cls.get_node_or_error(
info, variant_id, "variant_id", only_type=ProductVariant
)
quantity = input_line["quantity"]
if quantity > 0:
if variant:
lines_to_add.append((quantity, variant))
else:
raise ValidationError(
{
"quantity": ValidationError(
"Ensure this value is greater than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
lines = [
add_variant_to_order(order, variant, quantity, allow_overselling=True)
for quantity, variant in lines_to_add
]
events.draft_order_added_products_event(
order=order, user=info.context.user, order_lines=lines_to_add
)
recalculate_order(order)
return DraftOrderLinesCreate(order=order, order_lines=lines)
class DraftOrderLineDelete(BaseMutation):
order = graphene.Field(Order, description="A related draft order.")
order_line = graphene.Field(
OrderLine, description="An order line that was deleted."
)
class Arguments:
id = graphene.ID(description="ID of the order line to delete.", required=True)
class Meta:
description = "Deletes an order line from a draft order."
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def perform_mutation(cls, _root, info, id):
line = cls.get_node_or_error(info, id, only_type=OrderLine)
order = line.order
if order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
db_id = line.id
delete_order_line(line)
line.id = db_id
events.draft_order_removed_products_event(
order=order, user=info.context.user, order_lines=[(line.quantity, line)]
)
recalculate_order(order)
return DraftOrderLineDelete(order=order, order_line=line)
class DraftOrderLineUpdate(ModelMutation):
order = graphene.Field(Order, description="A related draft order.")
class Arguments:
id = graphene.ID(description="ID of the order line to update.", required=True)
input = OrderLineInput(
required=True, description="Fields required to update an order line."
)
class Meta:
description = "Updates an order line of a draft order."
model = models.OrderLine
permissions = ("order.manage_orders",)
error_type_class = OrderError
error_type_field = "order_errors"
@classmethod
def clean_input(cls, info, instance, data):
instance.old_quantity = instance.quantity
cleaned_input = super().clean_input(info, instance, data)
if instance.order.status != OrderStatus.DRAFT:
raise ValidationError(
{
"id": ValidationError(
"Only draft orders can be edited.",
code=OrderErrorCode.NOT_EDITABLE,
)
}
)
quantity = data["quantity"]
if quantity <= 0:
raise ValidationError(
{
"quantity": ValidationError(
"Ensure this value is greater than 0.",
code=OrderErrorCode.ZERO_QUANTITY,
)
}
)
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
change_order_line_quantity(
info.context.user, instance, instance.old_quantity, instance.quantity
)
recalculate_order(instance.order)
@classmethod
def success_response(cls, instance):
response = super().success_response(instance)
response.order = instance.order
return response
| true | true |
f7fd3724d446da397e0bbd941cb3349fe603c71d | 9,125 | py | Python | aioarangodb/cursor.py | kumsumit/aioarangodb | 2cac1717b4a9a84387f0de9e7563452a566a6e88 | [
"MIT"
] | null | null | null | aioarangodb/cursor.py | kumsumit/aioarangodb | 2cac1717b4a9a84387f0de9e7563452a566a6e88 | [
"MIT"
] | null | null | null | aioarangodb/cursor.py | kumsumit/aioarangodb | 2cac1717b4a9a84387f0de9e7563452a566a6e88 | [
"MIT"
] | null | null | null | __all__ = ["Cursor"]
from collections import deque
from typing import Any, Deque, Optional, Sequence
from .connection import BaseConnection
from .exceptions import (
CursorCloseError,
CursorCountError,
CursorEmptyError,
CursorNextError,
CursorStateError,
)
from .request import Request
from .typings import Json
class Cursor:
"""Cursor API wrapper.
Cursors fetch query results from ArangoDB server in batches. Cursor objects
are *stateful* as they store the fetched items in-memory. They must not be
shared across threads without proper locking mechanism.
:param connection: HTTP connection.
:param init_data: Cursor initialization data.
:type init_data: dict
:param cursor_type: Cursor type ("cursor" or "export").
:type cursor_type: str
"""
__slots__ = [
"_conn",
"_type",
"_id",
"_count",
"_cached",
"_stats",
"_profile",
"_warnings",
"_has_more",
"_batch",
]
def __init__(
self,
connection: BaseConnection,
init_data: Json,
cursor_type: str = "cursor",
) -> None:
self._conn = connection
self._type = cursor_type
self._batch: Deque[Any] = deque()
self._id = None
self._count: Optional[int] = None
self._cached = None
self._stats = None
self._profile = None
self._warnings = None
self._update(init_data)
def __iter__(self) -> "Cursor":
return self
async def __anext__(self) -> Any: # pragma: no cover
return await self.next()
async def __aenter__(self) -> "Cursor":
return self
def __len__(self) -> int:
if self._count is None:
raise CursorCountError("cursor count not enabled")
return self._count
async def __aexit__(self, *_: Any) -> None:
await self.close(ignore_missing=True)
def __repr__(self) -> str:
return f"<Cursor {self._id}>" if self._id else "<Cursor>"
def _update(self, data: Json) -> Json:
"""Update the cursor using data from ArangoDB server.
:param data: Cursor data from ArangoDB server (e.g. results).
:type data: dict
:return: Update cursor data.
:rtype: dict
"""
result: Json = {}
if "id" in data:
self._id = data["id"]
result["id"] = data["id"]
if "count" in data:
self._count = data["count"]
result["count"] = data["count"]
if "cached" in data:
self._cached = data["cached"]
result["cached"] = data["cached"]
self._has_more = bool(data["hasMore"])
result["has_more"] = data["hasMore"]
self._batch.extend(data["result"])
result["batch"] = data["result"]
if "extra" in data:
extra = data["extra"]
if "profile" in extra:
self._profile = extra["profile"]
result["profile"] = extra["profile"]
if "warnings" in extra:
self._warnings = extra["warnings"]
result["warnings"] = extra["warnings"]
if "stats" in extra:
stats = extra["stats"]
if "writesExecuted" in stats:
stats["modified"] = stats.pop("writesExecuted")
if "writesIgnored" in stats:
stats["ignored"] = stats.pop("writesIgnored")
if "scannedFull" in stats:
stats["scanned_full"] = stats.pop("scannedFull")
if "scannedIndex" in stats:
stats["scanned_index"] = stats.pop("scannedIndex")
if "executionTime" in stats:
stats["execution_time"] = stats.pop("executionTime")
if "httpRequests" in stats:
stats["http_requests"] = stats.pop("httpRequests")
self._stats = stats
result["statistics"] = stats
return result
@property
def id(self) -> Optional[str]:
"""Return the cursor ID.
:return: Cursor ID.
:rtype: str
"""
return self._id
@property
def type(self) -> str:
"""Return the cursor type.
:return: Cursor type ("cursor" or "export").
:rtype: str
"""
return self._type
def batch(self) -> Optional[Deque[Any]]:
"""Return the current batch of results.
:return: Current batch.
:rtype: collections.deque
"""
return self._batch
def has_more(self) -> Optional[bool]:
"""Return True if more results are available on the server.
:return: True if more results are available on the server.
:rtype: bool
"""
return self._has_more
def count(self) -> Optional[int]:
"""Return the total number of documents in the entire result set.
:return: Total number of documents, or None if the count option
was not enabled during cursor initialization.
:rtype: int | None
"""
return self._count
def cached(self) -> Optional[bool]:
"""Return True if results are cached.
:return: True if results are cached.
:rtype: bool
"""
return self._cached
def statistics(self) -> Optional[Json]:
"""Return cursor statistics.
:return: Cursor statistics.
:rtype: dict
"""
return self._stats
def profile(self) -> Optional[Json]:
"""Return cursor performance profile.
:return: Cursor performance profile.
:rtype: dict
"""
return self._profile
def warnings(self) -> Optional[Sequence[Json]]:
"""Return any warnings from the query execution.
:return: Warnings, or None if there are none.
:rtype: [str]
"""
return self._warnings
def empty(self) -> bool:
"""Check if the current batch is empty.
:return: True if current batch is empty, False otherwise.
:rtype: bool
"""
return len(self._batch) == 0
async def next(self) -> Any:
"""Pop the next item from the current batch.
If current batch is empty/depleted, an API request is automatically
sent to ArangoDB server to fetch the next batch and update the cursor.
:return: Next item in current batch.
:raise StopIteration: If the result set is depleted.
:raise arango.exceptions.CursorNextError: If batch retrieval fails.
:raise arango.exceptions.CursorStateError: If cursor ID is not set.
"""
if self.empty():
if not self.has_more():
raise StopIteration
await self.fetch()
return self.pop()
def pop(self) -> Any:
"""Pop the next item from current batch.
If current batch is empty/depleted, an exception is raised. You must
call :func:`arango.cursor.Cursor.fetch` to manually fetch the next
batch from server.
:return: Next item in current batch.
:raise arango.exceptions.CursorEmptyError: If current batch is empty.
"""
if len(self._batch) == 0:
raise CursorEmptyError("current batch is empty")
return self._batch.popleft()
async def fetch(self) -> Json:
"""Fetch the next batch from server and update the cursor.
:return: New batch details.
:rtype: dict
:raise arango.exceptions.CursorNextError: If batch retrieval fails.
:raise arango.exceptions.CursorStateError: If cursor ID is not set.
"""
if self._id is None:
raise CursorStateError("cursor ID not set")
request = Request(method="put", endpoint=f"/_api/{self._type}/{self._id}")
resp = await self._conn.send_request(request)
if not resp.is_success:
raise CursorNextError(resp, request)
return self._update(resp.body)
async def close(self, ignore_missing: bool = False) -> Optional[bool]:
"""Close the cursor and free any server resources tied to it.
:param ignore_missing: Do not raise exception on missing cursors.
:type ignore_missing: bool
:return: True if cursor was closed successfully, False if cursor was
missing on the server and **ignore_missing** was set to True, None
if there are no cursors to close server-side (e.g. result set is
smaller than the batch size).
:rtype: bool | None
:raise arango.exceptions.CursorCloseError: If operation fails.
:raise arango.exceptions.CursorStateError: If cursor ID is not set.
"""
if self._id is None:
return None
request = Request(method="delete", endpoint=f"/_api/{self._type}/{self._id}")
resp = await self._conn.send_request(request)
if resp.is_success:
return True
if resp.status_code == 404 and ignore_missing:
return False
raise CursorCloseError(resp, request)
| 31.143345 | 85 | 0.58389 | __all__ = ["Cursor"]
from collections import deque
from typing import Any, Deque, Optional, Sequence
from .connection import BaseConnection
from .exceptions import (
CursorCloseError,
CursorCountError,
CursorEmptyError,
CursorNextError,
CursorStateError,
)
from .request import Request
from .typings import Json
class Cursor:
__slots__ = [
"_conn",
"_type",
"_id",
"_count",
"_cached",
"_stats",
"_profile",
"_warnings",
"_has_more",
"_batch",
]
def __init__(
self,
connection: BaseConnection,
init_data: Json,
cursor_type: str = "cursor",
) -> None:
self._conn = connection
self._type = cursor_type
self._batch: Deque[Any] = deque()
self._id = None
self._count: Optional[int] = None
self._cached = None
self._stats = None
self._profile = None
self._warnings = None
self._update(init_data)
def __iter__(self) -> "Cursor":
return self
async def __anext__(self) -> Any:
return await self.next()
async def __aenter__(self) -> "Cursor":
return self
def __len__(self) -> int:
if self._count is None:
raise CursorCountError("cursor count not enabled")
return self._count
async def __aexit__(self, *_: Any) -> None:
await self.close(ignore_missing=True)
def __repr__(self) -> str:
return f"<Cursor {self._id}>" if self._id else "<Cursor>"
def _update(self, data: Json) -> Json:
result: Json = {}
if "id" in data:
self._id = data["id"]
result["id"] = data["id"]
if "count" in data:
self._count = data["count"]
result["count"] = data["count"]
if "cached" in data:
self._cached = data["cached"]
result["cached"] = data["cached"]
self._has_more = bool(data["hasMore"])
result["has_more"] = data["hasMore"]
self._batch.extend(data["result"])
result["batch"] = data["result"]
if "extra" in data:
extra = data["extra"]
if "profile" in extra:
self._profile = extra["profile"]
result["profile"] = extra["profile"]
if "warnings" in extra:
self._warnings = extra["warnings"]
result["warnings"] = extra["warnings"]
if "stats" in extra:
stats = extra["stats"]
if "writesExecuted" in stats:
stats["modified"] = stats.pop("writesExecuted")
if "writesIgnored" in stats:
stats["ignored"] = stats.pop("writesIgnored")
if "scannedFull" in stats:
stats["scanned_full"] = stats.pop("scannedFull")
if "scannedIndex" in stats:
stats["scanned_index"] = stats.pop("scannedIndex")
if "executionTime" in stats:
stats["execution_time"] = stats.pop("executionTime")
if "httpRequests" in stats:
stats["http_requests"] = stats.pop("httpRequests")
self._stats = stats
result["statistics"] = stats
return result
@property
def id(self) -> Optional[str]:
return self._id
@property
def type(self) -> str:
return self._type
def batch(self) -> Optional[Deque[Any]]:
return self._batch
def has_more(self) -> Optional[bool]:
return self._has_more
def count(self) -> Optional[int]:
return self._count
def cached(self) -> Optional[bool]:
return self._cached
def statistics(self) -> Optional[Json]:
return self._stats
def profile(self) -> Optional[Json]:
return self._profile
def warnings(self) -> Optional[Sequence[Json]]:
return self._warnings
def empty(self) -> bool:
return len(self._batch) == 0
async def next(self) -> Any:
if self.empty():
if not self.has_more():
raise StopIteration
await self.fetch()
return self.pop()
def pop(self) -> Any:
if len(self._batch) == 0:
raise CursorEmptyError("current batch is empty")
return self._batch.popleft()
async def fetch(self) -> Json:
if self._id is None:
raise CursorStateError("cursor ID not set")
request = Request(method="put", endpoint=f"/_api/{self._type}/{self._id}")
resp = await self._conn.send_request(request)
if not resp.is_success:
raise CursorNextError(resp, request)
return self._update(resp.body)
async def close(self, ignore_missing: bool = False) -> Optional[bool]:
if self._id is None:
return None
request = Request(method="delete", endpoint=f"/_api/{self._type}/{self._id}")
resp = await self._conn.send_request(request)
if resp.is_success:
return True
if resp.status_code == 404 and ignore_missing:
return False
raise CursorCloseError(resp, request)
| true | true |
f7fd3739101e4408dce2a93c96579bb1715063e9 | 577 | py | Python | regexlib/python_re_test_file/regexlib_8131.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | 1 | 2022-01-24T14:43:23.000Z | 2022-01-24T14:43:23.000Z | regexlib/python_re_test_file/regexlib_8131.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | regexlib/python_re_test_file/regexlib_8131.py | yetingli/ReDoS-Benchmarks | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | [
"MIT"
] | null | null | null | # 8131
# '`.*?((http|ftp|https)://[\w#$&+,\/:;=?@.-]+)[^\w#$&+,\/:;=?@.-]*?`i'
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:""+"'`"*5000+"! _1SLQ_2"
import re
from time import perf_counter
regex = """'`.*?((http|ftp|https)://[\w#$&+,\/:;=?@.-]+)[^\w#$&+,\/:;=?@.-]*?`i'"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "\'`" * i * 10000 + "! _1SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | 30.368421 | 83 | 0.514731 |
import re
from time import perf_counter
regex = """'`.*?((http|ftp|https)://[\w#$&+,\/:;=?@.-]+)[^\w#$&+,\/:;=?@.-]*?`i'"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "\'`" * i * 10000 + "! _1SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | true | true |
f7fd38ef88c6efd439543a27298b821758c6f8eb | 25,836 | py | Python | src/sage/topology/filtered_simplicial_complex.py | yzpopulation/sage | d2dc2f80b5a8e039701e292653e25366e3e5ec1e | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/topology/filtered_simplicial_complex.py | yzpopulation/sage | d2dc2f80b5a8e039701e292653e25366e3e5ec1e | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/topology/filtered_simplicial_complex.py | yzpopulation/sage | d2dc2f80b5a8e039701e292653e25366e3e5ec1e | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | # -*- coding: utf-8 -*-
r"""
Finite filtered complexes
AUTHORS:
- Guillaume Rousseau (2021-05)
This module implements the basic structures of finite filtered complexes.
A filtered complex is a simplicial complex, where each simplex is given
a weight, or "filtration value", such that the weight of a simplex is
greater than the weight of each of its faces.
The algorithm used in this module comes from [ZC2005]_.
EXAMPLES::
sage: FilteredSimplicialComplex([([0], 0), ([1], 0), ([0, 1], 1)])
Filtered complex on vertex set (0, 1) and with simplices ((0,) : 0), ((1,) : 0), ((0, 1) : 1)
Sage can compute persistent homology of simplicial complexes::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0, 1], 1)])
sage: X.persistence_intervals(0)
[(0, 1), (0, +Infinity)]
FilteredSimplicialComplex objects are mutable. Filtration values can be
set with the ``filtration`` method as follows::
sage: X = FilteredSimplicialComplex() # returns an empty complex
sage: X.persistence_intervals(1)
[]
sage: X.filtration(Simplex([0, 2]), 0) # recursively adds faces
sage: X.filtration(Simplex([0, 1]), 0)
sage: X.filtration(Simplex([1, 2]), 0)
sage: X.filtration(Simplex([0, 1, 2]), 1) # closes the circle
sage: X.persistence_intervals(1)
[(0, 1)]
The filtration value of a simplex can be accessed as well with the
``filtration`` method, by not specifying a filtration value in
the arguments. If the simplex is not in the complex, this returns
``None``::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0,1], 1)])
sage: X.filtration(Simplex([0]))
0
sage: X.filtration(Simplex([1,2])) is None
True
Filtration values can be accessed with function call and list
syntax as follows::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0,1], 1)])
sage: s_1 = Simplex([0])
sage: X[s_1]
0
sage: X(Simplex([0,1]))
1
sage: X(Simplex(['baba']))
<BLANKLINE>
It is also possible to set the filtration value of a simplex with
the ``insert`` method, which takes as argument a list of vertices
rather than a ``Simplex``. This can make code more readable / clear::
sage: X = FilteredSimplicialComplex()
sage: X.insert(['a'], 0)
sage: X.insert(['b', 'c'], 1)
sage: X
Filtered complex on vertex set ('a', 'b', 'c') and with simplices
(('a',) : 0), (('c',) : 1), (('b',) : 1), (('b', 'c') : 1)
"""
# ****************************************************************************
# Copyright (C) 2013 GUILLAUME ROUSSEAU <guillaume.rousseau@ens-lyon.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.structure.sage_object import SageObject
from sage.topology.simplicial_complex import Simplex, SimplicialComplex
from sage.modules.free_module import FreeModule
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
from sage.misc.cachefunc import cached_method
class FilteredSimplicialComplex(SageObject):
r"""
Define a filtered complex.
INPUT:
- ``simplices`` -- list of simplices and filtration values
- ``verbose`` -- (default: ``False``) if ``True``, any change to
the filtration value of a simplex will be printed
``simplices`` should be a list of tuples ``(l, v)``, where
``l`` is a list of vertices and ``v`` is the corresponding
filtration value.
EXAMPLES::
sage: FilteredSimplicialComplex([([0], 0), ([1], 0), ([2], 1), ([0,1], 2.27)])
Filtered complex on vertex set (0, 1, 2) and with simplices
((0,) : 0), ((1,) : 0), ((2,) : 1), ((0, 1) : 2.27000000000000)
"""
def __init__(self, simplices=[], verbose=False):
"""
Initialize ``self``.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([2], 1), ([0,1], 2.27)])
sage: TestSuite(X).run()
"""
# _vertices is the set of vertices on which the complex
# is constructed
self._vertices = set()
# _filtration_dict has simplices as keys
# and entries are corresponding filtration values
self._filtration_dict = {}
self._dimension = 0
self._max_value = 0
# when _verbose is set to True, insertion
# will warn the user when something non-trivial
# happens.
self._verbose = verbose
# Insert all simplices in the initial list
for l, v in simplices:
self.insert(l, v)
def __eq__(self, other):
"""
Check equality.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([2], 1), ([0,1], 2.27)])
sage: Y = FilteredSimplicialComplex()
sage: Y.insert([0], 0)
sage: Y.insert([1], 0)
sage: Y.insert([2], 1)
sage: Y.insert([0,1], 2.27)
sage: X == Y
True
sage: Y.filtration([1,2], 2)
sage: X == Y
False
sage: Y = FilteredSimplicialComplex([([0], 0), ([1], 0), ([2], 1), ([0,1], 2)])
sage: X == Y
False
"""
return (isinstance(other, FilteredSimplicialComplex)
and self._vertices == other._vertices
and self._filtration_dict == other._filtration_dict)
def __ne__(self, other):
"""
Check inequality.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([2], 1), ([0,1], 2.27)])
sage: Y = FilteredSimplicialComplex([([0], 0), ([1], 0), ([2], 1), ([0,1], 3)])
sage: X != Y
True
sage: Y.filtration([0,1], 2.27)
sage: X != Y
False
"""
return not (self == other)
def _get_value(self, s):
r"""
Return the filtration value of a simplex ``s`` in the complex.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 1), ([1], 2)])
sage: X._get_value(Simplex([0]))
1
This also works for the call and getitem syntax as a shorthand::
sage: X(Simplex([0]))
1
sage: X[Simplex([0])]
1
"""
if s in self._filtration_dict:
return self._filtration_dict[s]
else:
return None
__call__ = _get_value
__getitem__ = _get_value
def _insert(self, simplex, filtration_value):
r"""
Add a simplex to the complex.
All faces of the simplex are added recursively if they are
not already present, with the same value.
If the simplex is already present, and the new value is lower
than its current value in the complex, the value gets updated,
otherwise it does not change. This propagates recursively to faces.
If verbose has been enabled, this method will describe what it
is doing during an insertion.
INPUT:
- ``simplex`` -- :class:`Simplex`; simplex to be inserted
- ``filtration_value`` -- value of the simplex
EXAMPLES::
sage: X = FilteredSimplicialComplex()
sage: X._insert(Simplex([0]), 3)
sage: X
Filtered complex on vertex set (0,) and with simplices ((0,) : 3)
"""
# Keep track of whether the simplex is already in the complex
# and if it should be updated or not
curr_value = self[simplex]
if curr_value is not None:
if self._verbose:
print("Face {} is already in the complex.".format(simplex))
if curr_value > filtration_value:
if self._verbose:
verbose_string = "However its value is {}".format(curr_value)
verbose_string += ": updating it to {}".format(filtration_value)
print(verbose_string)
self._filtration_dict.pop(simplex)
else:
if self._verbose:
print("Its value is {}: keeping it that way".format(curr_value))
return
# check that all faces are in the complex already.
# If not, warn the user and add faces (recursively)
faces = simplex.faces()
if simplex.dimension() > 0:
for f in faces:
if self._verbose:
print("Also inserting face {} with value {}".format(f, filtration_value))
self._insert(f, filtration_value)
self._filtration_dict[simplex] = filtration_value
self._dimension = max(self._dimension, simplex.dimension())
self._max_value = max(self._max_value, filtration_value)
self._vertices.update(simplex.set())
self._persistent_homology.clear_cache()
def insert(self, vertex_list, filtration_value):
r"""
Add a simplex to the complex.
All faces of the simplex are added recursively if they are
not already present, with the same value.
If the simplex is already present, and the new value is lower
than its current value in the complex, the value gets updated,
otherwise it does not change. This propagates recursively to faces.
If verbose has been enabled, this method will describe what it
is doing during an insertion.
INPUT:
- ``vertex_list`` -- list of vertices
- ``filtration_value`` -- desired value of the simplex to be added
EXAMPLES::
sage: X = FilteredSimplicialComplex()
sage: X.insert(Simplex([0]),3)
sage: X
Filtered complex on vertex set (0,) and with simplices ((0,) : 3)
If the verbose parameter was set to true, this method will print
some info::
sage: X = FilteredSimplicialComplex(verbose=True)
sage: X.insert(Simplex([0, 1]), 2)
Also inserting face (1,) with value 2
Also inserting face (0,) with value 2
sage: X.insert(Simplex([0]),1)
Face (0,) is already in the complex.
However its value is 2: updating it to 1
sage: X.insert(Simplex([0]), 77)
Face (0,) is already in the complex.
Its value is 1: keeping it that way
"""
self._insert(Simplex(vertex_list), filtration_value)
def filtration(self, s, filtration_value=None):
r"""
Set filtration value of a simplex, or return value
of existing simplex.
INPUT:
- ``s`` -- :class:`Simplex` for which to set or obtain the
value of
- ``filtration_value`` -- (optional) filtration value
for the simplex
If no filtration value is specified, this returns the value of
the simplex in the complex. If the simplex is not in the complex,
this returns ``None``.
If ``filtration_value`` is set, this function inserts the
simplex into the complex with the specified value.
See documentation of ``insert`` for more details.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 1)])
sage: X.filtration(Simplex([0, 1])) is None
True
sage: X.filtration(Simplex([0, 1]), 2)
sage: X.filtration([0, 1])
2
"""
s = Simplex(s)
if filtration_value is None:
return self._get_value(s)
else:
self._insert(s, filtration_value)
def prune(self,threshold):
r"""
Return a copy of the filtered complex, where simplices above
the threshold value have been removed.
INPUT:
- ``threshold`` -- a real value, above which simplices are discarded
Simplices with filtration value exactly equal to ``threshold``
are kept in the result.
EXAMPLES::
sage: a = FilteredSimplicialComplex()
sage: a.insert([0], 0)
sage: a.insert([0, 1], 1)
sage: a.insert([0, 2], 2)
sage: b = a.prune(1)
sage: b
Filtered complex on vertex set (0, 1) and with simplices ((0,) : 0), ((1,) : 1), ((0, 1) : 1)
"""
result_complex = FilteredSimplicialComplex()
for s in self._filtration_dict:
if self[s] <= threshold:
result_complex._insert(s, self[s])
return result_complex
@cached_method(key=lambda self,f,s,v:(f,s))
def _persistent_homology(self, field=2, strict=True, verbose=False):
"""
Compute the homology intervals of the complex.
INPUT:
- ``field`` -- (default: 2) prime number modulo which the homology
is computed
- ``strict`` -- (default: ``True``) if ``False``, takes into account
intervals of persistence 0
- ``verbose`` -- (default: ``False``) if ``True``, prints the
progress of computation
This method is called whenever Betti numbers or intervals are
computed, and the result is cached. It returns the list of
intervals.
ALGORITHM:
The computation behind persistent homology is a matrix reduction.
The algorithm implemented is described in [ZC2005]_.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0,1], 2)])
sage: X._persistent_homology()[0]
[(0, 2), (0, +Infinity)]
Some homology elements may have a lifespan or persistence of 0.
They are usually discarded, but can be kept if necessary::
sage: X = FilteredSimplicialComplex()
sage: X.insert([0,1],1) # opens a hole and closes it instantly
sage: X._persistent_homology(strict=False)[0]
[(1, 1), (1, +Infinity)]
REFERENCES:
- [ZC2005]_
TESTS:
This complex is used as a running example in [ZC2005]_::
sage: l = [([0], 0), ([1], 0), ([2], 1), ([3], 1), ([0, 1], 1),
....: ([1, 2], 1), ([0, 3], 2), ([2, 3], 2), ([0, 2], 3),
....: ([0, 1, 2], 4), ([0, 2, 3], 5)]
sage: X = FilteredSimplicialComplex(l)
sage: X.persistence_intervals(0)
[(0, 1), (1, 2), (0, +Infinity)]
sage: X.persistence_intervals(1)
[(3, 4), (2, 5)]
sage: X.persistence_intervals(0, strict=False)
[(0, 1), (1, 1), (1, 2), (0, +Infinity)]
"""
# first, order the simplices in lexico order
# on dimension, value and then arbitrary order
# defined by the Simplex class.
def key(s):
d = self._get_value(s)
return (s.dimension(), d, s)
simplices = list(self._filtration_dict)
simplices.sort(key=key)
# remember the index of each simplex in a dict
self._index_of_simplex = {}
n = len(simplices)
for i in range(n):
self._index_of_simplex[simplices[i]] = i
self._field_int = field
self._field = GF(field)
self._chaingroup = FreeModule(self._field, rank_or_basis_keys=simplices)
# Initialize data structures for the algo
self._marked = [False] * n
self._T = [None] * n
intervals = [[] for i in range(self._dimension+1)]
self.pairs = []
self._strict = strict
self._verbose = verbose
if self._verbose:
print("Beginning first pass")
for j in range(n):
# if being verbose, print progress
# every 1000 simplices.
if self._verbose and j % 1000 == 0:
print('{}/{}'.format(j, n))
s = simplices[j]
d = self._remove_pivot_rows(s, simplices)
if d == 0:
self._marked[j] = True
else:
max_index = self._max_index(d)
t = simplices[max_index]
self._T[max_index] = (s, d)
self._add_interval(t, s, intervals)
if self._verbose:
print("First pass over, beginning second pass")
for j in range(n):
if self._verbose and j % 1000 == 0:
print('{}/{}'.format(j, n))
s = simplices[j]
if self._marked[j] and not self._T[j]:
self._add_interval(s, None, intervals)
if self._verbose:
print("Second pass over")
return intervals
def _add_interval(self, s, t, intervals):
r"""
Add a new interval (i.e. homology element).
This method should not be called by users, it is used in
the ``_compute_persistence`` method. The simplex of
death may be ``None``, in which case the interval is infinite.
INPUT:
- ``s`` -- birth simplex
- ``t`` -- death simplex
- ``intervals`` -- list of current intervals
If ``t`` is not ``None``, its dimension should be
one more than the dimension of ``s``.
TESTS::
sage: X = FilteredSimplicialComplex([([0], 0), ([1, 2], 10)])
sage: int_list = X._persistent_homology()
sage: int_list[0]
[(0, +Infinity), (10, +Infinity)]
sage: X._add_interval(Simplex([0]), Simplex([1, 2]),int_list)
sage: int_list[0]
[(0, +Infinity), (10, +Infinity), (0, 10)]
Infinite interval::
sage: int_list2 = [[],[]]
sage: X._add_interval(Simplex([1, 2]), None, int_list2)
sage: int_list2[1]
[(10, +Infinity)]
"""
# figure out dimension of homology element
# and indices of the two simplices. If the
# closing simplex is None, then the interval
# is infinite.
k = s.dimension()
i = self._filtration_dict[s]
if not t:
j = infinity
else:
j = self._filtration_dict[t]
# Only add intervals of length 0 if
# strict mode is not enabled.
if i != j or (not self._strict):
intervals[k].append((i, j))
self.pairs.append((s, t))
def _remove_pivot_rows(self, s, simplices):
r"""
Return the boundary chain of a simplex,
from which pivot elements have been removed.
This method implements the subroutine of the same name
in [ZC2005]_. This method should not be called by users,
it is used in the ``compute_persistence`` method.
TESTS::
sage: l = [([0], 0), ([1], 0), ([2], 1), ([3], 1), ([0, 1], 1), ([1, 2], 1),
....: ([0, 3], 2), ([2, 3], 2), ([0, 2], 3), ([0, 1, 2], 4)]
sage: X = FilteredSimplicialComplex(l)
sage: X._persistent_homology()
[[(0, 1), (1, 2), (0, +Infinity)], [(3, 4), (2, +Infinity)], []]
sage: X._remove_pivot_rows(Simplex([0,1,2]), list(X._filtration_dict))
0
sage: X.insert([0,2,3],5)
sage: X._remove_pivot_rows(Simplex([0,2,3]), list(X._filtration_dict))
B[(2, 3)]
"""
d = self._chaingroup()
# Handle the case when the simplex is a vertex
if s.dimension() == 0:
return d
# Initialize the boundary chain
for (i, f) in enumerate(s.faces()):
d += (-1)**i * self._chaingroup(f)
# Remove all unmarked elements
for (s, x_s) in d:
j = self._index_of_simplex[s]
if not self._marked[j]:
d = d - x_s * self._chaingroup(s)
# Reduce d until it is empty or until the simplex
# with maximum index in the complex among all
# non-zero terms is not in T.
while d != 0:
max_index = self._max_index(d)
t = simplices[max_index]
if not self._T[max_index]:
break
c = self._T[max_index][1]
q = c[t]
d = d - ((q**(-1))*c)
return d
def _max_index(self, d):
r"""
Return the maximal index of all simplices with nonzero
coefficient in ``d``.
This method is called in ``_remove_pivot_rows`` and
``compute_persistence``. It should not be called by users
outside of those methods.
TESTS::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 5), ([0, 1], 18), ([0, 2, 3], 32)])
sage: X._persistent_homology()
[[(5, 18), (0, +Infinity)], [], []]
sage: a = X._chaingroup(Simplex([0, 1]))
sage: b = X._chaingroup(Simplex([0, 3]))
sage: d = a + b
sage: X._max_index(d)
6
"""
currmax = -1
for (s, x_s) in d:
j = self._index_of_simplex[s]
if j > currmax:
currmax = j
return currmax
def persistence_intervals(self, dimension, field=2, strict=True, verbose=None):
r"""
Return the list of `d`-dimensional homology elements.
INPUT:
- ``dimension`` -- integer; dimension `d` for which to
return intervals
- ``field`` -- prime number (default: 2); modulo which persistent
homology is computed
- ``strict`` -- (default: ``True``) if ``False``, takes into account
intervals of persistence 0
- ``verbose`` -- (optional) if ``True``, print the steps of the
persistent homology computation; the default is the verbosity
of ``self``
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 1), ([0,1], 2)])
sage: X.persistence_intervals(0)
[(1, 2), (0, +Infinity)]
"""
if verbose is None:
verbose = self._verbose
intervals = self._persistent_homology(field, strict, verbose=verbose)
if dimension < len(intervals):
return intervals[dimension][:]
else:
return []
def betti_number(self, k, a, b, field=2, strict=True, verbose=None):
r"""
Return the ``k``-dimensional Betti number from ``a`` to ``a + b``.
INPUT:
- ``k`` -- the dimension for the Betti number
- ``a`` -- the lower filtration value
- ``b`` -- the size of the interval
- ``field`` -- prime number (default: 2); modulo which persistent
homology is computed
- ``strict`` -- (default: ``True``) if ``False``, takes into account
intervals of persistence 0
- ``verbose`` -- (optional) if ``True``, print the steps of the
persistent homology computation; the default is the verbosity
of ``self``
The Betti number ``\beta_k^{a,a+b}`` counts the number of
homology elements which are alive throughout the whole
duration ``[a, a+b]``.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0,1], 2)])
sage: X.betti_number(0, 0.5, 1)
2
sage: X.betti_number(0, 1.5, 1)
1
If an element vanishes at time ``a + b`` exactly,
it does not count towards the Betti number::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0,1], 2)])
sage: X.betti_number(0, 1.5, 0.5)
1
"""
if verbose is None:
verbose = self._verbose
intervals = self._persistent_homology(field, strict, verbose=verbose)
return Integer(sum(1 for (i, j) in intervals[k]
if (i <= a and a + b < j) and a >= 0))
def _repr_(self):
"""
Print representation.
If there are more than 10 simplices or vertices, only prints the
count for each.
EXAMPLES::
sage: X = FilteredSimplicialComplex([([0], 0), ([1], 0), ([0, 1], 1)])
sage: X
Filtered complex on vertex set (0, 1) and with simplices ((0,) : 0), ((1,) : 0), ((0, 1) : 1)
sage: X.insert([0, 1, 2, 3, 4], 8)
sage: X
Filtered complex on 5 vertices and with 31 simplices
"""
vert_count = len(self._vertices)
simp_count = len(self._filtration_dict)
if simp_count > 10 or vert_count > 10:
vertex_string = "on {} vertices".format(vert_count)
simplex_string = "with {} simplices".format(simp_count)
else:
vertex_string = "on vertex set {}".format(tuple(sorted(self._vertices)))
simplex_string = "with simplices "
simplex_list = ["({} : {})".format(s, self._filtration_dict[s]) for s in self._filtration_dict]
simplex_string += ", ".join(simplex_list)
return "Filtered complex " + vertex_string + " and " + simplex_string
def _simplicial_(self):
"""
Return the associated simplicial complex
All simplices of the filtered simplicial complex are
included in the resulting simplicial complex.
EXAMPLES::
sage: l = [([0],0), ([1],0), ([2],1), ([3],1), ([0, 1],1), ([1, 2],1), ([0, 3],2),
....: ([2, 3],2), ([0, 2],3), ([0, 1, 2],4), ([0, 2, 3],5)]
sage: a = FilteredSimplicialComplex(l)
sage: b = SimplicialComplex(a)
sage: b
Simplicial complex with vertex set (0, 1, 2, 3) and facets {(0, 1, 2), (0, 2, 3)}
"""
return SimplicialComplex(self._filtration_dict)
| 34.960758 | 107 | 0.549543 |
from sage.structure.sage_object import SageObject
from sage.topology.simplicial_complex import Simplex, SimplicialComplex
from sage.modules.free_module import FreeModule
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
from sage.misc.cachefunc import cached_method
class FilteredSimplicialComplex(SageObject):
def __init__(self, simplices=[], verbose=False):
self._vertices = set()
self._filtration_dict = {}
self._dimension = 0
self._max_value = 0
self._verbose = verbose
for l, v in simplices:
self.insert(l, v)
def __eq__(self, other):
return (isinstance(other, FilteredSimplicialComplex)
and self._vertices == other._vertices
and self._filtration_dict == other._filtration_dict)
def __ne__(self, other):
return not (self == other)
def _get_value(self, s):
if s in self._filtration_dict:
return self._filtration_dict[s]
else:
return None
__call__ = _get_value
__getitem__ = _get_value
def _insert(self, simplex, filtration_value):
curr_value = self[simplex]
if curr_value is not None:
if self._verbose:
print("Face {} is already in the complex.".format(simplex))
if curr_value > filtration_value:
if self._verbose:
verbose_string = "However its value is {}".format(curr_value)
verbose_string += ": updating it to {}".format(filtration_value)
print(verbose_string)
self._filtration_dict.pop(simplex)
else:
if self._verbose:
print("Its value is {}: keeping it that way".format(curr_value))
return
faces = simplex.faces()
if simplex.dimension() > 0:
for f in faces:
if self._verbose:
print("Also inserting face {} with value {}".format(f, filtration_value))
self._insert(f, filtration_value)
self._filtration_dict[simplex] = filtration_value
self._dimension = max(self._dimension, simplex.dimension())
self._max_value = max(self._max_value, filtration_value)
self._vertices.update(simplex.set())
self._persistent_homology.clear_cache()
def insert(self, vertex_list, filtration_value):
self._insert(Simplex(vertex_list), filtration_value)
def filtration(self, s, filtration_value=None):
s = Simplex(s)
if filtration_value is None:
return self._get_value(s)
else:
self._insert(s, filtration_value)
def prune(self,threshold):
result_complex = FilteredSimplicialComplex()
for s in self._filtration_dict:
if self[s] <= threshold:
result_complex._insert(s, self[s])
return result_complex
@cached_method(key=lambda self,f,s,v:(f,s))
def _persistent_homology(self, field=2, strict=True, verbose=False):
def key(s):
d = self._get_value(s)
return (s.dimension(), d, s)
simplices = list(self._filtration_dict)
simplices.sort(key=key)
self._index_of_simplex = {}
n = len(simplices)
for i in range(n):
self._index_of_simplex[simplices[i]] = i
self._field_int = field
self._field = GF(field)
self._chaingroup = FreeModule(self._field, rank_or_basis_keys=simplices)
self._marked = [False] * n
self._T = [None] * n
intervals = [[] for i in range(self._dimension+1)]
self.pairs = []
self._strict = strict
self._verbose = verbose
if self._verbose:
print("Beginning first pass")
for j in range(n):
if self._verbose and j % 1000 == 0:
print('{}/{}'.format(j, n))
s = simplices[j]
d = self._remove_pivot_rows(s, simplices)
if d == 0:
self._marked[j] = True
else:
max_index = self._max_index(d)
t = simplices[max_index]
self._T[max_index] = (s, d)
self._add_interval(t, s, intervals)
if self._verbose:
print("First pass over, beginning second pass")
for j in range(n):
if self._verbose and j % 1000 == 0:
print('{}/{}'.format(j, n))
s = simplices[j]
if self._marked[j] and not self._T[j]:
self._add_interval(s, None, intervals)
if self._verbose:
print("Second pass over")
return intervals
def _add_interval(self, s, t, intervals):
k = s.dimension()
i = self._filtration_dict[s]
if not t:
j = infinity
else:
j = self._filtration_dict[t]
if i != j or (not self._strict):
intervals[k].append((i, j))
self.pairs.append((s, t))
def _remove_pivot_rows(self, s, simplices):
d = self._chaingroup()
if s.dimension() == 0:
return d
for (i, f) in enumerate(s.faces()):
d += (-1)**i * self._chaingroup(f)
for (s, x_s) in d:
j = self._index_of_simplex[s]
if not self._marked[j]:
d = d - x_s * self._chaingroup(s)
while d != 0:
max_index = self._max_index(d)
t = simplices[max_index]
if not self._T[max_index]:
break
c = self._T[max_index][1]
q = c[t]
d = d - ((q**(-1))*c)
return d
def _max_index(self, d):
currmax = -1
for (s, x_s) in d:
j = self._index_of_simplex[s]
if j > currmax:
currmax = j
return currmax
def persistence_intervals(self, dimension, field=2, strict=True, verbose=None):
if verbose is None:
verbose = self._verbose
intervals = self._persistent_homology(field, strict, verbose=verbose)
if dimension < len(intervals):
return intervals[dimension][:]
else:
return []
def betti_number(self, k, a, b, field=2, strict=True, verbose=None):
if verbose is None:
verbose = self._verbose
intervals = self._persistent_homology(field, strict, verbose=verbose)
return Integer(sum(1 for (i, j) in intervals[k]
if (i <= a and a + b < j) and a >= 0))
def _repr_(self):
vert_count = len(self._vertices)
simp_count = len(self._filtration_dict)
if simp_count > 10 or vert_count > 10:
vertex_string = "on {} vertices".format(vert_count)
simplex_string = "with {} simplices".format(simp_count)
else:
vertex_string = "on vertex set {}".format(tuple(sorted(self._vertices)))
simplex_string = "with simplices "
simplex_list = ["({} : {})".format(s, self._filtration_dict[s]) for s in self._filtration_dict]
simplex_string += ", ".join(simplex_list)
return "Filtered complex " + vertex_string + " and " + simplex_string
def _simplicial_(self):
return SimplicialComplex(self._filtration_dict)
| true | true |
f7fd3914e3813b7167ac47088231c76e579ea698 | 3,828 | py | Python | adafruit_ble_adafruit/adafruit_service.py | tekktrik/Adafruit_CircuitPython_BLE_Adafruit | ac24f9c333b80e23510fc2b78349d342984c2973 | [
"MIT"
] | null | null | null | adafruit_ble_adafruit/adafruit_service.py | tekktrik/Adafruit_CircuitPython_BLE_Adafruit | ac24f9c333b80e23510fc2b78349d342984c2973 | [
"MIT"
] | null | null | null | adafruit_ble_adafruit/adafruit_service.py | tekktrik/Adafruit_CircuitPython_BLE_Adafruit | ac24f9c333b80e23510fc2b78349d342984c2973 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2020 Dan Halbert for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_ble_adafruit.adafruit_service`
================================================================================
Access to sensors and hardware on or connected to BLE-capable boards.
* Author(s): Dan Halbert
Implementation Notes
--------------------
**Hardware:**
* `Adafruit CircuitPlayground Bluefruit <https://www.adafruit.com/product/4333>`_
* `Adafruit CLUE nRF52840 Express <https://www.adafruit.com/product/4500>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Adafruit.git"
import struct
from micropython import const
from adafruit_ble.advertising import Advertisement, LazyObjectField
from adafruit_ble.advertising.standard import ManufacturerData, ManufacturerDataField
from adafruit_ble.advertising.adafruit import (
MANUFACTURING_DATA_ADT,
ADAFRUIT_COMPANY_ID,
)
from adafruit_ble.attributes import Attribute
from adafruit_ble.characteristics import Characteristic
from adafruit_ble.characteristics.int import Int32Characteristic, Uint32Characteristic
from adafruit_ble.uuid import VendorUUID
from adafruit_ble.services import Service
try:
from typing import Optional
from _bleio import ScanEntry
except ImportError:
pass
_PID_DATA_ID = const(0x0001) # This is the same as the Radio data id, unfortunately.
class AdafruitServerAdvertisement(
Advertisement
): # pylint: disable=too-few-public-methods
"""Advertise the Adafruit company ID and the board USB PID."""
match_prefixes = (
struct.pack(
"<BHBH",
MANUFACTURING_DATA_ADT,
ADAFRUIT_COMPANY_ID,
struct.calcsize("<HH"),
_PID_DATA_ID,
),
)
manufacturer_data = LazyObjectField(
ManufacturerData,
"manufacturer_data",
advertising_data_type=MANUFACTURING_DATA_ADT,
company_id=ADAFRUIT_COMPANY_ID,
key_encoding="<H",
)
pid = ManufacturerDataField(_PID_DATA_ID, "<H")
"""The USB PID (product id) for this board."""
def __init__(self, *, entry: Optional[ScanEntry] = None) -> None:
super().__init__(entry=entry)
# Return early if things have been set by an existing ScanEntry.
if entry:
return
# Creating an advertisement to send.
self.connectable = True
self.flags.general_discovery = True
self.flags.le_only = True
class AdafruitService(Service):
"""Common superclass for all Adafruit board services."""
@staticmethod
def adafruit_service_uuid(n: int) -> VendorUUID:
"""Generate a VendorUUID which fills in a 16-bit value in the standard
Adafruit Service UUID: ADAFnnnn-C332-42A8-93BD-25E905756CB8.
"""
return VendorUUID("ADAF{:04x}-C332-42A8-93BD-25E905756CB8".format(n))
@classmethod
def measurement_period_charac(cls, msecs: int = 1000) -> Int32Characteristic:
"""Create a measurement_period Characteristic for use by a subclass."""
return Int32Characteristic(
uuid=cls.adafruit_service_uuid(0x0001),
properties=(Characteristic.READ | Characteristic.WRITE),
initial_value=msecs,
)
@classmethod
def service_version_charac(cls, version: int = 1) -> Uint32Characteristic:
"""Create a service_version Characteristic for use by a subclass."""
return Uint32Characteristic(
uuid=cls.adafruit_service_uuid(0x0002),
properties=Characteristic.READ,
write_perm=Attribute.NO_ACCESS,
initial_value=version,
)
| 32.168067 | 86 | 0.6907 |
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Adafruit.git"
import struct
from micropython import const
from adafruit_ble.advertising import Advertisement, LazyObjectField
from adafruit_ble.advertising.standard import ManufacturerData, ManufacturerDataField
from adafruit_ble.advertising.adafruit import (
MANUFACTURING_DATA_ADT,
ADAFRUIT_COMPANY_ID,
)
from adafruit_ble.attributes import Attribute
from adafruit_ble.characteristics import Characteristic
from adafruit_ble.characteristics.int import Int32Characteristic, Uint32Characteristic
from adafruit_ble.uuid import VendorUUID
from adafruit_ble.services import Service
try:
from typing import Optional
from _bleio import ScanEntry
except ImportError:
pass
_PID_DATA_ID = const(0x0001)
class AdafruitServerAdvertisement(
Advertisement
):
match_prefixes = (
struct.pack(
"<BHBH",
MANUFACTURING_DATA_ADT,
ADAFRUIT_COMPANY_ID,
struct.calcsize("<HH"),
_PID_DATA_ID,
),
)
manufacturer_data = LazyObjectField(
ManufacturerData,
"manufacturer_data",
advertising_data_type=MANUFACTURING_DATA_ADT,
company_id=ADAFRUIT_COMPANY_ID,
key_encoding="<H",
)
pid = ManufacturerDataField(_PID_DATA_ID, "<H")
def __init__(self, *, entry: Optional[ScanEntry] = None) -> None:
super().__init__(entry=entry)
if entry:
return
self.connectable = True
self.flags.general_discovery = True
self.flags.le_only = True
class AdafruitService(Service):
@staticmethod
def adafruit_service_uuid(n: int) -> VendorUUID:
return VendorUUID("ADAF{:04x}-C332-42A8-93BD-25E905756CB8".format(n))
@classmethod
def measurement_period_charac(cls, msecs: int = 1000) -> Int32Characteristic:
return Int32Characteristic(
uuid=cls.adafruit_service_uuid(0x0001),
properties=(Characteristic.READ | Characteristic.WRITE),
initial_value=msecs,
)
@classmethod
def service_version_charac(cls, version: int = 1) -> Uint32Characteristic:
return Uint32Characteristic(
uuid=cls.adafruit_service_uuid(0x0002),
properties=Characteristic.READ,
write_perm=Attribute.NO_ACCESS,
initial_value=version,
)
| true | true |
f7fd3a7d568678282b974af71a21c89e99333d61 | 6,059 | py | Python | train.py | embracesource-cv-com/ruler_detection | 89318b46b213ffb7774119d502b5aa520d34b50a | [
"Apache-2.0"
] | null | null | null | train.py | embracesource-cv-com/ruler_detection | 89318b46b213ffb7774119d502b5aa520d34b50a | [
"Apache-2.0"
] | null | null | null | train.py | embracesource-cv-com/ruler_detection | 89318b46b213ffb7774119d502b5aa520d34b50a | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import collections
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import model
from utils import _transfer_pretrained_weights
from dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Normalizer
from augment import Augmentation
from torch.utils.data import DataLoader
import csv_eval
from tensorboardX import SummaryWriter
from datetime import datetime
torch.cuda.empty_cache()
# torch.cuda.set_device(1)
# device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
def main(args=None):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='csv')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)',
default='./csv/train_annots_div.csv')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)',
default='./csv/class_list_div.csv')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)',
default='./csv/val_annots_div.csv')
parser.add_argument('--weights', help='ckpt', default='./csv/coco_resnet_50_map_0_335_state_dict.pt')
parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)
parser.add_argument('--epochs', help='Number of epochs', type=int, default=100)
parser = parser.parse_args(args)
# Create the data loaders
dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Augmentation(), Resizer()]))
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Resizer()]))
print('Num training images: {}'.format(len(dataset_train)))
print('Num validation images: {}'.format(len(dataset_val)))
sampler = AspectRatioBasedSampler(dataset_train, batch_size=4, drop_last=False)
dataloader_train = DataLoader(dataset_train, num_workers=4, collate_fn=collater, batch_sampler=sampler)
# sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=3, drop_last=False)
# dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val)
# Create the model
if parser.depth == 18:
retinanet = model.resnet18(num_classes=dataset_train.num_classes(), )
elif parser.depth == 34:
retinanet = model.resnet34(num_classes=dataset_train.num_classes(), )
elif parser.depth == 50:
retinanet = model.resnet50(num_classes=dataset_train.num_classes(), )
elif parser.depth == 101:
retinanet = model.resnet101(num_classes=dataset_train.num_classes(), )
elif parser.depth == 152:
retinanet = model.resnet152(num_classes=dataset_train.num_classes(), )
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
retinanet = _transfer_pretrained_weights(retinanet, parser.weights)
# PATH = '/home/github/ruler_detection/logs/Dec30_15-57-21/csv_retinanet_alldiv_best.pth'
# retinanet = torch.load(PATH)
# retinanet = retinanet.cuda()
retinanet = torch.nn.DataParallel(retinanet).cuda()
retinanet.training = True
optimizer = optim.Adam(retinanet.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)
loss_hist = collections.deque(maxlen=500)
log_dir = os.path.join('./logs', datetime.now().strftime('%b%d_%H-%M-%S'))
mAP_best = 0
for epoch_num in range(parser.epochs):
retinanet.train()
retinanet.module.freeze_bn()
epoch_loss = []
for iter_num, data in enumerate(dataloader_train):
optimizer.zero_grad()
cls_loss, regr_loss = retinanet([data['img'].cuda().float(), data['annot'].cuda()])
cls_loss = cls_loss.mean()
regr_loss = regr_loss.mean()
loss = cls_loss + regr_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
loss_hist.append(float(loss))
epoch_loss.append(float(loss))
print('Epoch: {} | Iteration: {} | cls loss: {:1.5f} | regr loss: {:1.5f} | Running loss: {:1.5f}'.format(
epoch_num, iter_num, float(cls_loss), float(regr_loss), np.mean(loss_hist)))
print('Evaluating dataset')
retinanet.eval()
APs, mAP = csv_eval.evaluate(dataset_val, retinanet)
with SummaryWriter(log_dir=log_dir, comment='train') as writer: # 可以直接使用python的with语法,自动调用close方法
writer.add_scalar('loss/classification', cls_loss, epoch_num)
writer.add_scalar('loss/regression', regr_loss, epoch_num)
writer.add_scalar('loss/total loss', loss, epoch_num)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch_num)
writer.add_scalar('acc/mAP', mAP, epoch_num)
writer.add_scalars('acc/AP', {'AP_0': APs[0][0], 'AP_1': APs[1][0], 'AP_2': APs[2][0], 'AP_3': APs[3][0],
'AP_4': APs[4][0], 'AP_5': APs[5][0], 'AP_6': APs[6][0], 'AP_7': APs[7][0],
'AP_8': APs[8][0], 'AP_9': APs[9][0], 'AP_10': APs[10][0]}, epoch_num)
scheduler.step(np.mean(epoch_loss))
if mAP > mAP_best:
mAP_best = mAP
torch.save(retinanet.module, os.path.join(log_dir, '{}_retinanet_alldiv_best.pth'.format(parser.dataset)))
if __name__ == '__main__':
main()
| 50.915966 | 118 | 0.667437 | import os
import argparse
import collections
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import model
from utils import _transfer_pretrained_weights
from dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Normalizer
from augment import Augmentation
from torch.utils.data import DataLoader
import csv_eval
from tensorboardX import SummaryWriter
from datetime import datetime
torch.cuda.empty_cache()
def main(args=None):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='csv')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)',
default='./csv/train_annots_div.csv')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)',
default='./csv/class_list_div.csv')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)',
default='./csv/val_annots_div.csv')
parser.add_argument('--weights', help='ckpt', default='./csv/coco_resnet_50_map_0_335_state_dict.pt')
parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)
parser.add_argument('--epochs', help='Number of epochs', type=int, default=100)
parser = parser.parse_args(args)
dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Augmentation(), Resizer()]))
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Resizer()]))
print('Num training images: {}'.format(len(dataset_train)))
print('Num validation images: {}'.format(len(dataset_val)))
sampler = AspectRatioBasedSampler(dataset_train, batch_size=4, drop_last=False)
dataloader_train = DataLoader(dataset_train, num_workers=4, collate_fn=collater, batch_sampler=sampler)
if parser.depth == 18:
retinanet = model.resnet18(num_classes=dataset_train.num_classes(), )
elif parser.depth == 34:
retinanet = model.resnet34(num_classes=dataset_train.num_classes(), )
elif parser.depth == 50:
retinanet = model.resnet50(num_classes=dataset_train.num_classes(), )
elif parser.depth == 101:
retinanet = model.resnet101(num_classes=dataset_train.num_classes(), )
elif parser.depth == 152:
retinanet = model.resnet152(num_classes=dataset_train.num_classes(), )
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
retinanet = _transfer_pretrained_weights(retinanet, parser.weights)
retinanet = torch.nn.DataParallel(retinanet).cuda()
retinanet.training = True
optimizer = optim.Adam(retinanet.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)
loss_hist = collections.deque(maxlen=500)
log_dir = os.path.join('./logs', datetime.now().strftime('%b%d_%H-%M-%S'))
mAP_best = 0
for epoch_num in range(parser.epochs):
retinanet.train()
retinanet.module.freeze_bn()
epoch_loss = []
for iter_num, data in enumerate(dataloader_train):
optimizer.zero_grad()
cls_loss, regr_loss = retinanet([data['img'].cuda().float(), data['annot'].cuda()])
cls_loss = cls_loss.mean()
regr_loss = regr_loss.mean()
loss = cls_loss + regr_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
loss_hist.append(float(loss))
epoch_loss.append(float(loss))
print('Epoch: {} | Iteration: {} | cls loss: {:1.5f} | regr loss: {:1.5f} | Running loss: {:1.5f}'.format(
epoch_num, iter_num, float(cls_loss), float(regr_loss), np.mean(loss_hist)))
print('Evaluating dataset')
retinanet.eval()
APs, mAP = csv_eval.evaluate(dataset_val, retinanet)
with SummaryWriter(log_dir=log_dir, comment='train') as writer:
writer.add_scalar('loss/classification', cls_loss, epoch_num)
writer.add_scalar('loss/regression', regr_loss, epoch_num)
writer.add_scalar('loss/total loss', loss, epoch_num)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch_num)
writer.add_scalar('acc/mAP', mAP, epoch_num)
writer.add_scalars('acc/AP', {'AP_0': APs[0][0], 'AP_1': APs[1][0], 'AP_2': APs[2][0], 'AP_3': APs[3][0],
'AP_4': APs[4][0], 'AP_5': APs[5][0], 'AP_6': APs[6][0], 'AP_7': APs[7][0],
'AP_8': APs[8][0], 'AP_9': APs[9][0], 'AP_10': APs[10][0]}, epoch_num)
scheduler.step(np.mean(epoch_loss))
if mAP > mAP_best:
mAP_best = mAP
torch.save(retinanet.module, os.path.join(log_dir, '{}_retinanet_alldiv_best.pth'.format(parser.dataset)))
if __name__ == '__main__':
main()
| true | true |
f7fd3a9f5a9ee863ee157dd34058d82e7651a113 | 101 | py | Python | nupyserver/v2/__init__.py | BxNiom/Python.NuPyServer | be2a9e8f366806117bf767ab96d67635fff6029e | [
"MIT"
] | 1 | 2021-12-18T11:30:58.000Z | 2021-12-18T11:30:58.000Z | nupyserver/v2/__init__.py | BxNiom/Python.NuPyServer | be2a9e8f366806117bf767ab96d67635fff6029e | [
"MIT"
] | null | null | null | nupyserver/v2/__init__.py | BxNiom/Python.NuPyServer | be2a9e8f366806117bf767ab96d67635fff6029e | [
"MIT"
] | null | null | null | # Protocol v2
# https://joelverhagen.github.io/NuGetUndocs/?http#introduction
__version__ = "0.1.0"
| 20.2 | 63 | 0.752475 |
= "0.1.0"
| true | true |
f7fd3b60c8a9d4f0687e5b60308a048085288f91 | 1,018 | py | Python | 台海舆论-数据分析(可视化系统)/demo/Taiwan_pie.py | 13060923171/Crawl-Project2 | effab1bf31979635756fc272a7bcc666bb499be2 | [
"MIT"
] | 14 | 2020-10-27T05:52:20.000Z | 2021-11-07T20:24:55.000Z | 台海舆论-数据分析(可视化系统)/demo/Taiwan_pie.py | 13060923171/Crawl-Project2 | effab1bf31979635756fc272a7bcc666bb499be2 | [
"MIT"
] | 1 | 2021-09-17T07:40:00.000Z | 2021-09-17T07:40:00.000Z | 台海舆论-数据分析(可视化系统)/demo/Taiwan_pie.py | 13060923171/Crawl-Project2 | effab1bf31979635756fc272a7bcc666bb499be2 | [
"MIT"
] | 8 | 2020-11-18T14:23:12.000Z | 2021-11-12T08:55:08.000Z | import pyecharts.options as opts
from pyecharts.charts import Pie
from pyecharts.globals import ThemeType
x_data = ["文化", "经贸", "媒体专栏", "网友专栏", "两岸专家","两岸","台商","部委","台海时事","网友快言","海峡时评","两岸快评"]
y_data = [553,553,35,39,448,465,553,321,553,406,553,556]
def taiwan_pie():
c = (
Pie(init_opts=opts.InitOpts(theme=ThemeType.VINTAGE))
.add(
series_name="中国台湾网",
data_pair=[list(z) for z in zip(x_data, y_data)],
radius=["50%", "70%"],
label_opts=opts.LabelOpts(is_show=False, position="center"),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="台湾网各项比重",pos_left='35%',pos_top='5%'),
legend_opts=opts.LegendOpts(pos_left="legft", orient="vertical"))
.set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
.render("./templates/台湾网占比图.html")
)
if __name__ == '__main__':
taiwan_pie() | 31.8125 | 88 | 0.577603 | import pyecharts.options as opts
from pyecharts.charts import Pie
from pyecharts.globals import ThemeType
x_data = ["文化", "经贸", "媒体专栏", "网友专栏", "两岸专家","两岸","台商","部委","台海时事","网友快言","海峡时评","两岸快评"]
y_data = [553,553,35,39,448,465,553,321,553,406,553,556]
def taiwan_pie():
c = (
Pie(init_opts=opts.InitOpts(theme=ThemeType.VINTAGE))
.add(
series_name="中国台湾网",
data_pair=[list(z) for z in zip(x_data, y_data)],
radius=["50%", "70%"],
label_opts=opts.LabelOpts(is_show=False, position="center"),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="台湾网各项比重",pos_left='35%',pos_top='5%'),
legend_opts=opts.LegendOpts(pos_left="legft", orient="vertical"))
.set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
.render("./templates/台湾网占比图.html")
)
if __name__ == '__main__':
taiwan_pie() | true | true |
f7fd3c11b4e40baa2ec6478624dc9f3894b3fc34 | 104 | py | Python | Task/Logical-operations/Python/logical-operations.py | mullikine/RosettaCodeData | 4f0027c6ce83daa36118ee8b67915a13cd23ab67 | [
"Info-ZIP"
] | 1 | 2021-05-05T13:42:20.000Z | 2021-05-05T13:42:20.000Z | Task/Logical-operations/Python/logical-operations.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Logical-operations/Python/logical-operations.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | def logic(a, b):
print('a and b:', a and b)
print('a or b:', a or b)
print('not a:', not a)
| 20.8 | 30 | 0.490385 | def logic(a, b):
print('a and b:', a and b)
print('a or b:', a or b)
print('not a:', not a)
| true | true |
f7fd3ca233d4f786f7b65a43a25fc84cfa2ba2df | 12,347 | py | Python | tests/test_dataservice.py | romepeng/jaqsplus | 2923c01213d1e2f90ddcd95eb722f8120aa74426 | [
"MIT"
] | null | null | null | tests/test_dataservice.py | romepeng/jaqsplus | 2923c01213d1e2f90ddcd95eb722f8120aa74426 | [
"MIT"
] | 1 | 2020-01-30T04:37:38.000Z | 2020-01-30T04:37:38.000Z | tests/test_dataservice.py | romepeng/jaqsplus | 2923c01213d1e2f90ddcd95eb722f8120aa74426 | [
"MIT"
] | null | null | null | # encoding: UTF-8
import pytest
from jaqs.data import RemoteDataService
import jaqs.util as jutil
from config_path import DATA_CONFIG_PATH
data_config = jutil.read_json(DATA_CONFIG_PATH)
def test_remote_data_service_daily():
# test daily
res, msg = ds.daily('002422.SZ,601607.SH', fields="",
start_date=20180101, end_date=20180512,
adjust_mode=None)
assert msg == '0,'
stk1 = res.loc[res.loc[:, 'symbol'] == '002422.SZ', :]
stk2 = res.loc[res.loc[:, 'symbol'] == '601607.SH', :]
#print(stk1.shape)
assert set(stk1.columns) == {'close', 'code', 'freq', 'high', 'low', 'oi', 'open', 'preclose',
'presettle', 'settle', 'symbol', 'trade_date', 'trade_status',
'turnover', 'volume', 'vwap'}
assert stk1.shape == (85, 16)
# print(stk1.loc[:, 'volume'].values[0])
assert stk1.loc[:, 'volume'].values[0] == 10828204.0
# print(stk2.loc[:, 'volume'].values[0])
assert stk2.loc[:, 'volume'].values[0] == 9171414.0
def test_remote_data_service_daily_quited():
# test daily
res, msg = ds.daily('601607.SH', fields="",
start_date=20140828, end_date=20170831,
adjust_mode=None)
assert msg == '0,'
# print(res.shape)
assert res.shape == (735, 16)
def test_remote_data_service_bar():
# test bar
res2, msg2 = ds.bar('rb1710.SHF,600662.SH', start_time=200000, end_time=160000, trade_date=20170831, fields="")
assert msg2 == '0,'
rb2 = res2.loc[res2.loc[:, 'symbol'] == 'rb1710.SHF', :]
stk2 = res2.loc[res2.loc[:, 'symbol'] == '600662.SH', :]
assert set(rb2.columns) == {u'close', u'code', u'date', u'freq', u'high', u'low', u'oi', u'open',
u'settle', u'symbol', u'time', u'trade_date', u'turnover', u'volume',
u'vwap'}
assert abs(rb2.loc[:, 'settle'].values[0] - 0.0) < 1e-3
assert rb2.shape == (345, 15)
assert stk2.shape == (240, 15)
assert rb2.loc[:, 'volume'].values[344] == 3366
def test_remote_data_serviece_quote():
res, msg = ds.quote('000001.SH')
assert msg == '0,'
def test_remote_data_service_lb():
# test lb.secDailyIndicator
fields = "pb,pe,free_share,net_assets,limit_status"
for res3, msg3 in [ds.query("lb.secDailyIndicator", fields=fields,
filter="symbol=600030.SH&start_date=20170907&end_date=20170907",
orderby="trade_date"),
ds.query_lb_dailyindicator('600030.SH', 20170907, 20170907, fields)]:
assert msg3 == '0,'
assert abs(res3.loc[0, 'pb'] - 1.5135) < 1e-4
assert abs(res3.loc[0, 'free_share'] - 781496.5954) < 1e-4
assert abs(res3.loc[0, 'net_assets'] - 1.437e11) < 1e8
assert res3.loc[0, 'limit_status'] == 0
# test lb.income
for res4, msg4 in [ds.query("lb.income", fields="",
filter="symbol=600000.SH&start_date=20150101&end_date=20170101&report_type=408001000",
order_by="report_date"),
ds.query_lb_fin_stat('income', '600000.SH', 20150101, 20170101, fields="")]:
assert msg4 == '0,'
assert res4.shape == (8, 12)
assert abs(res4.loc[4, 'oper_rev'] - 120928000000) < 1
def test_remote_data_service_daily_ind_performance():
hs300 = ds.query_index_member('000300.SH', 20151001, 20170101)
hs300_str = ','.join(hs300)
fields = "pb,pe,share_float_free,net_assets,limit_status"
res, msg = ds.query("lb.secDailyIndicator", fields=fields,
filter=("symbol=" + hs300_str
+ "&start_date=20160907&end_date=20170907"),
orderby="trade_date")
assert msg == '0,'
def test_remote_data_service_components():
res = ds.query_index_member_daily(index='000300.SH', start_date=20140101, end_date=20170505)
assert res.shape == (814, 430)
arr = ds.query_index_member(index='000300.SH', start_date=20140101, end_date=20170505)
assert len(arr) == 430
def test_remote_data_service_industry():
from jaqs.data.align import align
import pandas as pd
arr = ds.query_index_member(index='000300.SH', start_date=20130101, end_date=20170505)
df = ds.query_industry_raw(symbol=','.join(arr), type_='SW')
df = ds.query_industry_raw(symbol=','.join(arr), type_='ZZ')
# errors
try:
ds.query_industry_raw(symbol=','.join(arr), type_='ZZ', level=5)
except ValueError:
pass
try:
ds.query_industry_raw(symbol=','.join(arr), type_='blabla')
except ValueError:
pass
# df_ann = df.loc[:, ['in_date', 'symbol']]
# df_ann = df_ann.set_index(['symbol', 'in_date'])
# df_ann = df_ann.unstack(level='symbol')
from jaqs.data import DataView
dic_sec = jutil.group_df_to_dict(df, by='symbol')
dic_sec = {sec: df.reset_index() for sec, df in dic_sec.items()}
df_ann = pd.concat([df.loc[:, 'in_date'].rename(sec) for sec, df in dic_sec.items()], axis=1)
df_value = pd.concat([df.loc[:, 'industry1_code'].rename(sec) for sec, df in dic_sec.items()], axis=1)
dates_arr = ds.query_trade_dates(20140101, 20170505)
res = align(df_value, df_ann, dates_arr)
# df_ann = df.pivot(index='in_date', columns='symbol', values='in_date')
# df_value = df.pivot(index=None, columns='symbol', values='industry1_code')
def align_single_df(df_one_sec):
df_value = df_one_sec.loc[:, ['industry1_code']]
df_ann = df_one_sec.loc[:, ['in_date']]
res = align(df_value, df_ann, dates_arr)
return res
# res_list = [align_single_df(df) for sec, df in dic_sec.items()]
res_list = [align_single_df(df) for df in list(dic_sec.values())[:10]]
res = pd.concat(res_list, axis=1)
def test_remote_data_service_industry_df():
# from jaqs.data import Calendar
arr = ds.query_index_member(index='000300.SH', start_date=20130101, end_date=20170505)
symbol_arr = ','.join(arr)
sec = '000008.SZ'
type_ = 'ZZ'
df_raw = ds.query_industry_raw(symbol=sec, type_=type_)
df = ds.query_industry_daily(symbol=symbol_arr,
start_date=df_raw['in_date'].min(), end_date=20170505,
type_=type_, level=1)
for idx, row in df_raw.iterrows():
in_date = row['in_date']
value = row['industry1_code']
if in_date in df.index:
assert df.loc[in_date, sec] == value
else:
idx = ds.query_next_trade_date(in_date)
assert df.loc[idx, sec] == value
def test_remote_data_service_fin_indicator():
symbol = '000008.SZ'
filter_argument = ds._dic2url({'symbol': symbol})
df_raw, msg = ds.query("lb.finIndicator", fields="",
filter=filter_argument, orderby="symbol")
def test_remote_data_service_adj_factor():
arr = ds.query_index_member(index='000300.SH', start_date=20160101, end_date=20170505)
symbol_arr = ','.join(arr)
res = ds.query_adj_factor_daily(symbol_arr, start_date=20160101, end_date=20170101, div=False)
assert abs(res.loc[20160408, '300024.SZ'] - 10.735) < 1e-3
assert abs(res.loc[20160412, '300024.SZ'] - 23.658) < 1e-3
res = ds.query_adj_factor_daily(symbol_arr, start_date=20160101, end_date=20170101, div=True)
def test_remote_data_service_dividend():
arr = ds.query_index_member(index='000300.SH', start_date=20160101, end_date=20170505)
symbol_arr = ','.join(arr)
df, msg = ds.query_dividend(symbol_arr, start_date=20160101, end_date=20170101)
df2 = df.pivot(index='exdiv_date', columns='symbol', values='share_ratio')
assert abs(df.loc[(df['exdiv_date'] == 20160504) & (df['symbol'] == '002085.SZ'), 'share_ratio'] - 0.20).iat[0] < 1e-2
def test_remote_data_service_inst_info():
sec = '000001.SZ'
res = ds.query_inst_info(sec, fields='status,selllot,buylot,pricetick,multiplier,product')
assert res.at[sec, 'multiplier'] == 1
assert abs(res.at[sec, 'pricetick'] - 0.01) < 1e-2
assert res.at[sec, 'buylot'] == 100
res = ds.query_inst_info('000001.SH')
assert not res.empty
def test_remote_data_service_index_weight():
df = ds.query_index_weights_raw(index='000300.SH', trade_date=20140101)
assert df.shape[0] == 300
assert abs(df['weight'].sum() - 1.0) < 1.0
df = ds.query_index_weights_range(index='000300.SH', start_date=20140101, end_date=20140305)
df = ds.query_index_weights_raw(index='000016.SH', trade_date=20140101)
assert df.shape[0] == 50
assert abs(df['weight'].sum() - 1.0) < 1.0
df = ds.query_index_weights_daily(index='000300.SH', start_date=20150101, end_date=20151221)
assert abs(df.at[20150120, '000001.SZ'] - 1.07e-2) < 1e-2
assert df.shape == (236, 321)
def test_remote_data_service_initialize():
import jaqs.data.dataservice as jads
data_config2 = {k: v for k, v in data_config.items()}
data_config2['remote.data.password'] = ''
try:
ds.init_from_config(data_config2)
except jads.InitializeError:
pass
data_config2['remote.data.password'] = '123'
msg = ds.init_from_config(data_config2)
assert msg.split(',')[0] == '-1000'
try:
ds.daily('000001.SH', start_date=20170101, end_date=20170109)
except jads.NotLoginError:
pass
msg = ds.init_from_config(data_config)
assert msg.split(',')[0] == '0'
msg = ds.init_from_config(data_config)
assert msg.split(',')[0] == '0'
def test_remote_data_service_subscribe():
ds.subscribe('000001.SH')
def test_remote_data_bar_quote():
df, msg = ds.bar_quote('000001.SZ', trade_date=20171009, freq='1M')
assert msg == '0,'
assert df['askvolume1'].all()
assert abs(df['bidprice1'].iat[1] - 11.52) < 1e-2
def test_remote_data_service_mkt_data_callback():
from jaqs.data.basic import Quote
q = Quote()
ds.mkt_data_callback(key='quote', quote=q)
def test_calendar():
ds = RemoteDataService()
ds.init_from_config(data_config)
res1 = ds.query_trade_dates(20121224, 20130201)
assert len(res1) == 27
day_zero = 20170102
res2 = ds.query_next_trade_date(day_zero)
assert res2 == 20170103
res2_last = ds.query_last_trade_date(res2)
assert res2_last == 20161230
res3 = ds.query_next_trade_date(20170104)
assert res3 == 20170105
res4 = ds.query_last_trade_date(res3)
assert res4 == 20170104
res11 = ds.query_trade_dates(20161224, 20170201)
assert len(res11) == 23
assert not ds.is_trade_date(20150101)
assert not ds.is_trade_date(20130501)
'''
def test_remote_data_service_exception():
from jaqs.data.dataservice import NotLoginError, InitializeError
del ds
ds2 = RemoteDataService()
try:
ds2.daily('000001.SH', 20170101, 20170109)
except NotLoginError:
pass
except Exception as exc:
raise exc
try:
ds2.init_from_config({'remote.data.address': 'blabla'})
except InitializeError:
pass
except Exception as exc:
raise exc
'''
@pytest.fixture(autouse=True)
def my_globals(request):
ds = RemoteDataService()
ds.init_from_config(data_config)
request.function.__globals__.update({'ds': ds})
if __name__ == "__main__":
import time
t_start = time.time()
ds = RemoteDataService()
ds.init_from_config(data_config)
g = globals()
#print(g)
g = {k: v for k, v in g.items() if k.startswith('test_') and callable(v)}
#print(g)
for test_name, test_func in g.items():
print("\n==========\nTesting {:s}...".format(test_name))
test_func()
print("Test Complete.")
t3 = time.time() - t_start
print("\n\n\nTime lapsed in total: {:.1f}".format(t3)) | 36.208211 | 123 | 0.611566 |
import pytest
from jaqs.data import RemoteDataService
import jaqs.util as jutil
from config_path import DATA_CONFIG_PATH
data_config = jutil.read_json(DATA_CONFIG_PATH)
def test_remote_data_service_daily():
res, msg = ds.daily('002422.SZ,601607.SH', fields="",
start_date=20180101, end_date=20180512,
adjust_mode=None)
assert msg == '0,'
stk1 = res.loc[res.loc[:, 'symbol'] == '002422.SZ', :]
stk2 = res.loc[res.loc[:, 'symbol'] == '601607.SH', :]
assert set(stk1.columns) == {'close', 'code', 'freq', 'high', 'low', 'oi', 'open', 'preclose',
'presettle', 'settle', 'symbol', 'trade_date', 'trade_status',
'turnover', 'volume', 'vwap'}
assert stk1.shape == (85, 16)
assert stk1.loc[:, 'volume'].values[0] == 10828204.0
assert stk2.loc[:, 'volume'].values[0] == 9171414.0
def test_remote_data_service_daily_quited():
res, msg = ds.daily('601607.SH', fields="",
start_date=20140828, end_date=20170831,
adjust_mode=None)
assert msg == '0,'
assert res.shape == (735, 16)
def test_remote_data_service_bar():
res2, msg2 = ds.bar('rb1710.SHF,600662.SH', start_time=200000, end_time=160000, trade_date=20170831, fields="")
assert msg2 == '0,'
rb2 = res2.loc[res2.loc[:, 'symbol'] == 'rb1710.SHF', :]
stk2 = res2.loc[res2.loc[:, 'symbol'] == '600662.SH', :]
assert set(rb2.columns) == {u'close', u'code', u'date', u'freq', u'high', u'low', u'oi', u'open',
u'settle', u'symbol', u'time', u'trade_date', u'turnover', u'volume',
u'vwap'}
assert abs(rb2.loc[:, 'settle'].values[0] - 0.0) < 1e-3
assert rb2.shape == (345, 15)
assert stk2.shape == (240, 15)
assert rb2.loc[:, 'volume'].values[344] == 3366
def test_remote_data_serviece_quote():
res, msg = ds.quote('000001.SH')
assert msg == '0,'
def test_remote_data_service_lb():
fields = "pb,pe,free_share,net_assets,limit_status"
for res3, msg3 in [ds.query("lb.secDailyIndicator", fields=fields,
filter="symbol=600030.SH&start_date=20170907&end_date=20170907",
orderby="trade_date"),
ds.query_lb_dailyindicator('600030.SH', 20170907, 20170907, fields)]:
assert msg3 == '0,'
assert abs(res3.loc[0, 'pb'] - 1.5135) < 1e-4
assert abs(res3.loc[0, 'free_share'] - 781496.5954) < 1e-4
assert abs(res3.loc[0, 'net_assets'] - 1.437e11) < 1e8
assert res3.loc[0, 'limit_status'] == 0
for res4, msg4 in [ds.query("lb.income", fields="",
filter="symbol=600000.SH&start_date=20150101&end_date=20170101&report_type=408001000",
order_by="report_date"),
ds.query_lb_fin_stat('income', '600000.SH', 20150101, 20170101, fields="")]:
assert msg4 == '0,'
assert res4.shape == (8, 12)
assert abs(res4.loc[4, 'oper_rev'] - 120928000000) < 1
def test_remote_data_service_daily_ind_performance():
hs300 = ds.query_index_member('000300.SH', 20151001, 20170101)
hs300_str = ','.join(hs300)
fields = "pb,pe,share_float_free,net_assets,limit_status"
res, msg = ds.query("lb.secDailyIndicator", fields=fields,
filter=("symbol=" + hs300_str
+ "&start_date=20160907&end_date=20170907"),
orderby="trade_date")
assert msg == '0,'
def test_remote_data_service_components():
res = ds.query_index_member_daily(index='000300.SH', start_date=20140101, end_date=20170505)
assert res.shape == (814, 430)
arr = ds.query_index_member(index='000300.SH', start_date=20140101, end_date=20170505)
assert len(arr) == 430
def test_remote_data_service_industry():
from jaqs.data.align import align
import pandas as pd
arr = ds.query_index_member(index='000300.SH', start_date=20130101, end_date=20170505)
df = ds.query_industry_raw(symbol=','.join(arr), type_='SW')
df = ds.query_industry_raw(symbol=','.join(arr), type_='ZZ')
try:
ds.query_industry_raw(symbol=','.join(arr), type_='ZZ', level=5)
except ValueError:
pass
try:
ds.query_industry_raw(symbol=','.join(arr), type_='blabla')
except ValueError:
pass
from jaqs.data import DataView
dic_sec = jutil.group_df_to_dict(df, by='symbol')
dic_sec = {sec: df.reset_index() for sec, df in dic_sec.items()}
df_ann = pd.concat([df.loc[:, 'in_date'].rename(sec) for sec, df in dic_sec.items()], axis=1)
df_value = pd.concat([df.loc[:, 'industry1_code'].rename(sec) for sec, df in dic_sec.items()], axis=1)
dates_arr = ds.query_trade_dates(20140101, 20170505)
res = align(df_value, df_ann, dates_arr)
def align_single_df(df_one_sec):
df_value = df_one_sec.loc[:, ['industry1_code']]
df_ann = df_one_sec.loc[:, ['in_date']]
res = align(df_value, df_ann, dates_arr)
return res
res_list = [align_single_df(df) for df in list(dic_sec.values())[:10]]
res = pd.concat(res_list, axis=1)
def test_remote_data_service_industry_df():
arr = ds.query_index_member(index='000300.SH', start_date=20130101, end_date=20170505)
symbol_arr = ','.join(arr)
sec = '000008.SZ'
type_ = 'ZZ'
df_raw = ds.query_industry_raw(symbol=sec, type_=type_)
df = ds.query_industry_daily(symbol=symbol_arr,
start_date=df_raw['in_date'].min(), end_date=20170505,
type_=type_, level=1)
for idx, row in df_raw.iterrows():
in_date = row['in_date']
value = row['industry1_code']
if in_date in df.index:
assert df.loc[in_date, sec] == value
else:
idx = ds.query_next_trade_date(in_date)
assert df.loc[idx, sec] == value
def test_remote_data_service_fin_indicator():
symbol = '000008.SZ'
filter_argument = ds._dic2url({'symbol': symbol})
df_raw, msg = ds.query("lb.finIndicator", fields="",
filter=filter_argument, orderby="symbol")
def test_remote_data_service_adj_factor():
arr = ds.query_index_member(index='000300.SH', start_date=20160101, end_date=20170505)
symbol_arr = ','.join(arr)
res = ds.query_adj_factor_daily(symbol_arr, start_date=20160101, end_date=20170101, div=False)
assert abs(res.loc[20160408, '300024.SZ'] - 10.735) < 1e-3
assert abs(res.loc[20160412, '300024.SZ'] - 23.658) < 1e-3
res = ds.query_adj_factor_daily(symbol_arr, start_date=20160101, end_date=20170101, div=True)
def test_remote_data_service_dividend():
arr = ds.query_index_member(index='000300.SH', start_date=20160101, end_date=20170505)
symbol_arr = ','.join(arr)
df, msg = ds.query_dividend(symbol_arr, start_date=20160101, end_date=20170101)
df2 = df.pivot(index='exdiv_date', columns='symbol', values='share_ratio')
assert abs(df.loc[(df['exdiv_date'] == 20160504) & (df['symbol'] == '002085.SZ'), 'share_ratio'] - 0.20).iat[0] < 1e-2
def test_remote_data_service_inst_info():
sec = '000001.SZ'
res = ds.query_inst_info(sec, fields='status,selllot,buylot,pricetick,multiplier,product')
assert res.at[sec, 'multiplier'] == 1
assert abs(res.at[sec, 'pricetick'] - 0.01) < 1e-2
assert res.at[sec, 'buylot'] == 100
res = ds.query_inst_info('000001.SH')
assert not res.empty
def test_remote_data_service_index_weight():
df = ds.query_index_weights_raw(index='000300.SH', trade_date=20140101)
assert df.shape[0] == 300
assert abs(df['weight'].sum() - 1.0) < 1.0
df = ds.query_index_weights_range(index='000300.SH', start_date=20140101, end_date=20140305)
df = ds.query_index_weights_raw(index='000016.SH', trade_date=20140101)
assert df.shape[0] == 50
assert abs(df['weight'].sum() - 1.0) < 1.0
df = ds.query_index_weights_daily(index='000300.SH', start_date=20150101, end_date=20151221)
assert abs(df.at[20150120, '000001.SZ'] - 1.07e-2) < 1e-2
assert df.shape == (236, 321)
def test_remote_data_service_initialize():
import jaqs.data.dataservice as jads
data_config2 = {k: v for k, v in data_config.items()}
data_config2['remote.data.password'] = ''
try:
ds.init_from_config(data_config2)
except jads.InitializeError:
pass
data_config2['remote.data.password'] = '123'
msg = ds.init_from_config(data_config2)
assert msg.split(',')[0] == '-1000'
try:
ds.daily('000001.SH', start_date=20170101, end_date=20170109)
except jads.NotLoginError:
pass
msg = ds.init_from_config(data_config)
assert msg.split(',')[0] == '0'
msg = ds.init_from_config(data_config)
assert msg.split(',')[0] == '0'
def test_remote_data_service_subscribe():
ds.subscribe('000001.SH')
def test_remote_data_bar_quote():
df, msg = ds.bar_quote('000001.SZ', trade_date=20171009, freq='1M')
assert msg == '0,'
assert df['askvolume1'].all()
assert abs(df['bidprice1'].iat[1] - 11.52) < 1e-2
def test_remote_data_service_mkt_data_callback():
from jaqs.data.basic import Quote
q = Quote()
ds.mkt_data_callback(key='quote', quote=q)
def test_calendar():
ds = RemoteDataService()
ds.init_from_config(data_config)
res1 = ds.query_trade_dates(20121224, 20130201)
assert len(res1) == 27
day_zero = 20170102
res2 = ds.query_next_trade_date(day_zero)
assert res2 == 20170103
res2_last = ds.query_last_trade_date(res2)
assert res2_last == 20161230
res3 = ds.query_next_trade_date(20170104)
assert res3 == 20170105
res4 = ds.query_last_trade_date(res3)
assert res4 == 20170104
res11 = ds.query_trade_dates(20161224, 20170201)
assert len(res11) == 23
assert not ds.is_trade_date(20150101)
assert not ds.is_trade_date(20130501)
@pytest.fixture(autouse=True)
def my_globals(request):
ds = RemoteDataService()
ds.init_from_config(data_config)
request.function.__globals__.update({'ds': ds})
if __name__ == "__main__":
import time
t_start = time.time()
ds = RemoteDataService()
ds.init_from_config(data_config)
g = globals()
g = {k: v for k, v in g.items() if k.startswith('test_') and callable(v)}
for test_name, test_func in g.items():
print("\n==========\nTesting {:s}...".format(test_name))
test_func()
print("Test Complete.")
t3 = time.time() - t_start
print("\n\n\nTime lapsed in total: {:.1f}".format(t3)) | true | true |
f7fd3d4118993a6fffaf5aabee52c620001dd9f7 | 409 | py | Python | payments/razorpayapp/migrations/0006_auto_20200801_0100.py | Titan-BT-7274/Payment-Gateway | 075417fa1d37446dde789acf50bea89e91dbc80c | [
"MIT"
] | 1 | 2021-07-13T16:57:27.000Z | 2021-07-13T16:57:27.000Z | payments/razorpayapp/migrations/0006_auto_20200801_0100.py | Titan-BT-7274/Payment-Gateway | 075417fa1d37446dde789acf50bea89e91dbc80c | [
"MIT"
] | 3 | 2021-04-08T21:40:44.000Z | 2021-11-28T05:56:32.000Z | payments/razorpayapp/migrations/0006_auto_20200801_0100.py | Titan-BT-7274/Payment-Gateway | 075417fa1d37446dde789acf50bea89e91dbc80c | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-01 01:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('razorpayapp', '0005_remove_razorpayhistory_card_number'),
]
operations = [
migrations.AlterField(
model_name='razorpayhistory',
name='txn_amount',
field=models.IntegerField(),
),
]
| 21.526316 | 67 | 0.623472 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('razorpayapp', '0005_remove_razorpayhistory_card_number'),
]
operations = [
migrations.AlterField(
model_name='razorpayhistory',
name='txn_amount',
field=models.IntegerField(),
),
]
| true | true |
f7fd3eaf0bbb9367b562c3b47ecf60568cb29e8b | 4,796 | py | Python | src/craftr/stdlib/net.craftr.backend/ninja/build_server.py | creator-build/craftr | a2b95618d990baee192bb2d2f6bb4453ce83a005 | [
"MIT"
] | 1 | 2022-02-04T21:51:36.000Z | 2022-02-04T21:51:36.000Z | src/craftr/stdlib/net.craftr.backend/ninja/build_server.py | craftr-build/craftr-build-4.x | a2b95618d990baee192bb2d2f6bb4453ce83a005 | [
"MIT"
] | 9 | 2015-08-31T15:39:20.000Z | 2015-09-05T19:22:20.000Z | src/craftr/stdlib/net.craftr.backend/ninja/build_server.py | craftr-build/craftr-build-4.x | a2b95618d990baee192bb2d2f6bb4453ce83a005 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Craftr uses a "Build Server" that serves the action information from the
master process to the Craftr slave process (invoked via the build backend).
This is to avoid that the slave process has to parse the build graph file
every time.
[ Craftr Master Process ] <- communicates with -\
\-> [ Build Backend (eg. Ninja) ] |
\-> [ Craftr Slave Process (invokes the actual build commands) ]
"""
import concurrent.futures
import json
import shlex
import socket
import socketserver
import struct
import threading
class JsonifyProxy:
def __init__(self, obj, **kwargs):
self._obj = obj
self._kwargs = kwargs
def to_json(self, *args, **kwargs):
kwargs.update(self._kwargs)
return self._obj.to_json(*args, **kwargs)
class RequestHandler(socketserver.BaseRequestHandler):
master = None
additional_args = None
def handle(self):
try:
while True:
data = self.request.recv(4)
if len(data) == 0: break
request_size = struct.unpack('!I', data)[0]
request = json.loads(self.request.recv(request_size).decode('utf8'))
if 'reload_build_server' in request:
self.master.reload()
response = {'status': 'ok'}
elif not all(x in request for x in ('target', 'operator', 'build_set')):
response = {'error': 'BadRequest'}
else:
try:
target = self.master.targets[request['target']]
operator = target.operators[request['operator']]
bset = operator.build_sets[request['build_set']]
except KeyError:
response = {'error': 'DoesNotExist'}
else:
proxy = JsonifyProxy(operator, build_sets=[bset])
proxy = JsonifyProxy(target, operators=[proxy])
data = {
'target': proxy.to_json(),
'hash': bset.compute_hash(),
'additional_args': self._get_additional_args(target, operator, bset)
}
response = {'data': data}
response = json.dumps(response).encode('utf8')
self.request.sendall(struct.pack('!I', len(response)))
self.request.sendall(response)
self.request.close()
except ConnectionResetError:
pass
def _get_additional_args(self, target: 'Target', operator: 'Operator', bset: 'BuildSet'):
if bset.additional_args:
return shlex.split(bset.additional_args)
return []
class BuildServer:
def __init__(self, master, additional_args=None):
self._master = master
self._additional_args = additional_args or {}
self._server = socketserver.ThreadingTCPServer(('localhost', 0), self._request_handler)
self._server.timeout = 0.5
self._thread = None
self._pool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
def __enter__(self):
self._pool.__enter__()
self.serve()
return self
def __exit__(self, *args):
self._pool.__exit__(*args)
self.shutdown()
def _request_handler(self, *args, **kwargs):
handler = object.__new__(RequestHandler)
handler.master = self._master
handler.additional_args = self._additional_args
handler.__init__(*args, **kwargs)
#self._pool.submit(handler.__init__, *args, **kwargs)
def address(self):
return self._server.server_address
def serve(self):
if self._thread and self._thread.is_alive():
raise RuntimeError('BuildServer already/still running.')
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.start()
def shutdown(self, wait=True):
self._server.shutdown()
if wait and self._thread:
self._thread.join()
| 33.538462 | 91 | 0.681401 |
import concurrent.futures
import json
import shlex
import socket
import socketserver
import struct
import threading
class JsonifyProxy:
def __init__(self, obj, **kwargs):
self._obj = obj
self._kwargs = kwargs
def to_json(self, *args, **kwargs):
kwargs.update(self._kwargs)
return self._obj.to_json(*args, **kwargs)
class RequestHandler(socketserver.BaseRequestHandler):
master = None
additional_args = None
def handle(self):
try:
while True:
data = self.request.recv(4)
if len(data) == 0: break
request_size = struct.unpack('!I', data)[0]
request = json.loads(self.request.recv(request_size).decode('utf8'))
if 'reload_build_server' in request:
self.master.reload()
response = {'status': 'ok'}
elif not all(x in request for x in ('target', 'operator', 'build_set')):
response = {'error': 'BadRequest'}
else:
try:
target = self.master.targets[request['target']]
operator = target.operators[request['operator']]
bset = operator.build_sets[request['build_set']]
except KeyError:
response = {'error': 'DoesNotExist'}
else:
proxy = JsonifyProxy(operator, build_sets=[bset])
proxy = JsonifyProxy(target, operators=[proxy])
data = {
'target': proxy.to_json(),
'hash': bset.compute_hash(),
'additional_args': self._get_additional_args(target, operator, bset)
}
response = {'data': data}
response = json.dumps(response).encode('utf8')
self.request.sendall(struct.pack('!I', len(response)))
self.request.sendall(response)
self.request.close()
except ConnectionResetError:
pass
def _get_additional_args(self, target: 'Target', operator: 'Operator', bset: 'BuildSet'):
if bset.additional_args:
return shlex.split(bset.additional_args)
return []
class BuildServer:
def __init__(self, master, additional_args=None):
self._master = master
self._additional_args = additional_args or {}
self._server = socketserver.ThreadingTCPServer(('localhost', 0), self._request_handler)
self._server.timeout = 0.5
self._thread = None
self._pool = concurrent.futures.ThreadPoolExecutor(max_workers=10)
def __enter__(self):
self._pool.__enter__()
self.serve()
return self
def __exit__(self, *args):
self._pool.__exit__(*args)
self.shutdown()
def _request_handler(self, *args, **kwargs):
handler = object.__new__(RequestHandler)
handler.master = self._master
handler.additional_args = self._additional_args
handler.__init__(*args, **kwargs)
def address(self):
return self._server.server_address
def serve(self):
if self._thread and self._thread.is_alive():
raise RuntimeError('BuildServer already/still running.')
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.start()
def shutdown(self, wait=True):
self._server.shutdown()
if wait and self._thread:
self._thread.join()
| true | true |
f7fd400b7a0645a6dee49c1abec5594c1c4a2fc8 | 30,384 | py | Python | elements_sdk/models/parameters.py | elements-storage/elements-sdk-python | 39c365fe079dcd5928c5fe1bbaa67389bd5a3d81 | [
"MIT"
] | 6 | 2020-11-16T23:15:18.000Z | 2022-03-14T03:56:12.000Z | elements_sdk/models/parameters.py | elements-storage/elements-sdk-python | 39c365fe079dcd5928c5fe1bbaa67389bd5a3d81 | [
"MIT"
] | 1 | 2021-07-28T13:03:49.000Z | 2021-08-25T12:24:01.000Z | elements_sdk/models/parameters.py | elements-storage/elements-sdk-python | 39c365fe079dcd5928c5fe1bbaa67389bd5a3d81 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from elements_sdk.configuration import Configuration
class Parameters(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'analytics': 'bool',
'branding_css': 'str',
'branding_logo': 'str',
'external_url': 'str',
'file_manager_recycle_bin': 'bool',
'https_redirect': 'str',
'language': 'str',
'ltfs_default_restore_to_original_location': 'bool',
'ltfs_default_search_directories': 'bool',
'ltfs_library_address': 'str',
'mail_styling': 'dict(str, str)',
'media_auto_play': 'bool',
'media_auto_proxy': 'bool',
'media_auto_scan': 'bool',
'media_auto_transport': 'bool',
'media_auto_veritone_upload': 'bool',
'media_default_custom_field_type': 'str',
'media_default_delete_behaviour': 'str',
'media_force_show_deleted': 'bool',
'media_keep_selection_when_browsing': 'bool',
'media_recycle_bin': 'bool',
'ntp_offer_sync': 'bool',
'otp_policy': 'str',
'tasks_run_scheduled': 'bool',
'users_default_permissions': 'str',
'workspaces_folder_template_path': 'str',
'workspaces_path': 'str'
}
attribute_map = {
'analytics': 'analytics',
'branding_css': 'branding_css',
'branding_logo': 'branding_logo',
'external_url': 'external_url',
'file_manager_recycle_bin': 'file_manager_recycle_bin',
'https_redirect': 'https_redirect',
'language': 'language',
'ltfs_default_restore_to_original_location': 'ltfs_default_restore_to_original_location',
'ltfs_default_search_directories': 'ltfs_default_search_directories',
'ltfs_library_address': 'ltfs_library_address',
'mail_styling': 'mail_styling',
'media_auto_play': 'media_auto_play',
'media_auto_proxy': 'media_auto_proxy',
'media_auto_scan': 'media_auto_scan',
'media_auto_transport': 'media_auto_transport',
'media_auto_veritone_upload': 'media_auto_veritone_upload',
'media_default_custom_field_type': 'media_default_custom_field_type',
'media_default_delete_behaviour': 'media_default_delete_behaviour',
'media_force_show_deleted': 'media_force_show_deleted',
'media_keep_selection_when_browsing': 'media_keep_selection_when_browsing',
'media_recycle_bin': 'media_recycle_bin',
'ntp_offer_sync': 'ntp_offer_sync',
'otp_policy': 'otp_policy',
'tasks_run_scheduled': 'tasks_run_scheduled',
'users_default_permissions': 'users_default_permissions',
'workspaces_folder_template_path': 'workspaces_folder_template_path',
'workspaces_path': 'workspaces_path'
}
def __init__(self, analytics=None, branding_css=None, branding_logo=None, external_url=None, file_manager_recycle_bin=None, https_redirect=None, language=None, ltfs_default_restore_to_original_location=None, ltfs_default_search_directories=None, ltfs_library_address=None, mail_styling=None, media_auto_play=None, media_auto_proxy=None, media_auto_scan=None, media_auto_transport=None, media_auto_veritone_upload=None, media_default_custom_field_type=None, media_default_delete_behaviour=None, media_force_show_deleted=None, media_keep_selection_when_browsing=None, media_recycle_bin=None, ntp_offer_sync=None, otp_policy=None, tasks_run_scheduled=None, users_default_permissions=None, workspaces_folder_template_path=None, workspaces_path=None, local_vars_configuration=None): # noqa: E501
"""Parameters - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._analytics = None
self._branding_css = None
self._branding_logo = None
self._external_url = None
self._file_manager_recycle_bin = None
self._https_redirect = None
self._language = None
self._ltfs_default_restore_to_original_location = None
self._ltfs_default_search_directories = None
self._ltfs_library_address = None
self._mail_styling = None
self._media_auto_play = None
self._media_auto_proxy = None
self._media_auto_scan = None
self._media_auto_transport = None
self._media_auto_veritone_upload = None
self._media_default_custom_field_type = None
self._media_default_delete_behaviour = None
self._media_force_show_deleted = None
self._media_keep_selection_when_browsing = None
self._media_recycle_bin = None
self._ntp_offer_sync = None
self._otp_policy = None
self._tasks_run_scheduled = None
self._users_default_permissions = None
self._workspaces_folder_template_path = None
self._workspaces_path = None
self.discriminator = None
if analytics is not None:
self.analytics = analytics
if branding_css is not None:
self.branding_css = branding_css
if branding_logo is not None:
self.branding_logo = branding_logo
self.external_url = external_url
if file_manager_recycle_bin is not None:
self.file_manager_recycle_bin = file_manager_recycle_bin
self.https_redirect = https_redirect
if language is not None:
self.language = language
if ltfs_default_restore_to_original_location is not None:
self.ltfs_default_restore_to_original_location = ltfs_default_restore_to_original_location
if ltfs_default_search_directories is not None:
self.ltfs_default_search_directories = ltfs_default_search_directories
self.ltfs_library_address = ltfs_library_address
if mail_styling is not None:
self.mail_styling = mail_styling
if media_auto_play is not None:
self.media_auto_play = media_auto_play
if media_auto_proxy is not None:
self.media_auto_proxy = media_auto_proxy
if media_auto_scan is not None:
self.media_auto_scan = media_auto_scan
if media_auto_transport is not None:
self.media_auto_transport = media_auto_transport
if media_auto_veritone_upload is not None:
self.media_auto_veritone_upload = media_auto_veritone_upload
if media_default_custom_field_type is not None:
self.media_default_custom_field_type = media_default_custom_field_type
if media_default_delete_behaviour is not None:
self.media_default_delete_behaviour = media_default_delete_behaviour
self.media_force_show_deleted = media_force_show_deleted
if media_keep_selection_when_browsing is not None:
self.media_keep_selection_when_browsing = media_keep_selection_when_browsing
if media_recycle_bin is not None:
self.media_recycle_bin = media_recycle_bin
if ntp_offer_sync is not None:
self.ntp_offer_sync = ntp_offer_sync
if otp_policy is not None:
self.otp_policy = otp_policy
if tasks_run_scheduled is not None:
self.tasks_run_scheduled = tasks_run_scheduled
if users_default_permissions is not None:
self.users_default_permissions = users_default_permissions
if workspaces_folder_template_path is not None:
self.workspaces_folder_template_path = workspaces_folder_template_path
if workspaces_path is not None:
self.workspaces_path = workspaces_path
@property
def analytics(self):
"""Gets the analytics of this Parameters. # noqa: E501
:return: The analytics of this Parameters. # noqa: E501
:rtype: bool
"""
return self._analytics
@analytics.setter
def analytics(self, analytics):
"""Sets the analytics of this Parameters.
:param analytics: The analytics of this Parameters. # noqa: E501
:type: bool
"""
self._analytics = analytics
@property
def branding_css(self):
"""Gets the branding_css of this Parameters. # noqa: E501
:return: The branding_css of this Parameters. # noqa: E501
:rtype: str
"""
return self._branding_css
@branding_css.setter
def branding_css(self, branding_css):
"""Sets the branding_css of this Parameters.
:param branding_css: The branding_css of this Parameters. # noqa: E501
:type: str
"""
self._branding_css = branding_css
@property
def branding_logo(self):
"""Gets the branding_logo of this Parameters. # noqa: E501
:return: The branding_logo of this Parameters. # noqa: E501
:rtype: str
"""
return self._branding_logo
@branding_logo.setter
def branding_logo(self, branding_logo):
"""Sets the branding_logo of this Parameters.
:param branding_logo: The branding_logo of this Parameters. # noqa: E501
:type: str
"""
self._branding_logo = branding_logo
@property
def external_url(self):
"""Gets the external_url of this Parameters. # noqa: E501
http://host/ # noqa: E501
:return: The external_url of this Parameters. # noqa: E501
:rtype: str
"""
return self._external_url
@external_url.setter
def external_url(self, external_url):
"""Sets the external_url of this Parameters.
http://host/ # noqa: E501
:param external_url: The external_url of this Parameters. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
external_url is not None and len(external_url) > 1023):
raise ValueError("Invalid value for `external_url`, length must be less than or equal to `1023`") # noqa: E501
self._external_url = external_url
@property
def file_manager_recycle_bin(self):
"""Gets the file_manager_recycle_bin of this Parameters. # noqa: E501
Recycle bins are usually either in Workspace/Share or Volume folder # noqa: E501
:return: The file_manager_recycle_bin of this Parameters. # noqa: E501
:rtype: bool
"""
return self._file_manager_recycle_bin
@file_manager_recycle_bin.setter
def file_manager_recycle_bin(self, file_manager_recycle_bin):
"""Sets the file_manager_recycle_bin of this Parameters.
Recycle bins are usually either in Workspace/Share or Volume folder # noqa: E501
:param file_manager_recycle_bin: The file_manager_recycle_bin of this Parameters. # noqa: E501
:type: bool
"""
self._file_manager_recycle_bin = file_manager_recycle_bin
@property
def https_redirect(self):
"""Gets the https_redirect of this Parameters. # noqa: E501
:return: The https_redirect of this Parameters. # noqa: E501
:rtype: str
"""
return self._https_redirect
@https_redirect.setter
def https_redirect(self, https_redirect):
"""Sets the https_redirect of this Parameters.
:param https_redirect: The https_redirect of this Parameters. # noqa: E501
:type: str
"""
allowed_values = [None,"domain", "on"] # noqa: E501
if self.local_vars_configuration.client_side_validation and https_redirect not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `https_redirect` ({0}), must be one of {1}" # noqa: E501
.format(https_redirect, allowed_values)
)
self._https_redirect = https_redirect
@property
def language(self):
"""Gets the language of this Parameters. # noqa: E501
:return: The language of this Parameters. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this Parameters.
:param language: The language of this Parameters. # noqa: E501
:type: str
"""
allowed_values = ["en", "fr", "de", "ru"] # noqa: E501
if self.local_vars_configuration.client_side_validation and language not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `language` ({0}), must be one of {1}" # noqa: E501
.format(language, allowed_values)
)
self._language = language
@property
def ltfs_default_restore_to_original_location(self):
"""Gets the ltfs_default_restore_to_original_location of this Parameters. # noqa: E501
:return: The ltfs_default_restore_to_original_location of this Parameters. # noqa: E501
:rtype: bool
"""
return self._ltfs_default_restore_to_original_location
@ltfs_default_restore_to_original_location.setter
def ltfs_default_restore_to_original_location(self, ltfs_default_restore_to_original_location):
"""Sets the ltfs_default_restore_to_original_location of this Parameters.
:param ltfs_default_restore_to_original_location: The ltfs_default_restore_to_original_location of this Parameters. # noqa: E501
:type: bool
"""
self._ltfs_default_restore_to_original_location = ltfs_default_restore_to_original_location
@property
def ltfs_default_search_directories(self):
"""Gets the ltfs_default_search_directories of this Parameters. # noqa: E501
:return: The ltfs_default_search_directories of this Parameters. # noqa: E501
:rtype: bool
"""
return self._ltfs_default_search_directories
@ltfs_default_search_directories.setter
def ltfs_default_search_directories(self, ltfs_default_search_directories):
"""Sets the ltfs_default_search_directories of this Parameters.
:param ltfs_default_search_directories: The ltfs_default_search_directories of this Parameters. # noqa: E501
:type: bool
"""
self._ltfs_default_search_directories = ltfs_default_search_directories
@property
def ltfs_library_address(self):
"""Gets the ltfs_library_address of this Parameters. # noqa: E501
:return: The ltfs_library_address of this Parameters. # noqa: E501
:rtype: str
"""
return self._ltfs_library_address
@ltfs_library_address.setter
def ltfs_library_address(self, ltfs_library_address):
"""Sets the ltfs_library_address of this Parameters.
:param ltfs_library_address: The ltfs_library_address of this Parameters. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
ltfs_library_address is not None and len(ltfs_library_address) > 255):
raise ValueError("Invalid value for `ltfs_library_address`, length must be less than or equal to `255`") # noqa: E501
self._ltfs_library_address = ltfs_library_address
@property
def mail_styling(self):
"""Gets the mail_styling of this Parameters. # noqa: E501
:return: The mail_styling of this Parameters. # noqa: E501
:rtype: dict(str, str)
"""
return self._mail_styling
@mail_styling.setter
def mail_styling(self, mail_styling):
"""Sets the mail_styling of this Parameters.
:param mail_styling: The mail_styling of this Parameters. # noqa: E501
:type: dict(str, str)
"""
self._mail_styling = mail_styling
@property
def media_auto_play(self):
"""Gets the media_auto_play of this Parameters. # noqa: E501
:return: The media_auto_play of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_auto_play
@media_auto_play.setter
def media_auto_play(self, media_auto_play):
"""Sets the media_auto_play of this Parameters.
:param media_auto_play: The media_auto_play of this Parameters. # noqa: E501
:type: bool
"""
self._media_auto_play = media_auto_play
@property
def media_auto_proxy(self):
"""Gets the media_auto_proxy of this Parameters. # noqa: E501
:return: The media_auto_proxy of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_auto_proxy
@media_auto_proxy.setter
def media_auto_proxy(self, media_auto_proxy):
"""Sets the media_auto_proxy of this Parameters.
:param media_auto_proxy: The media_auto_proxy of this Parameters. # noqa: E501
:type: bool
"""
self._media_auto_proxy = media_auto_proxy
@property
def media_auto_scan(self):
"""Gets the media_auto_scan of this Parameters. # noqa: E501
:return: The media_auto_scan of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_auto_scan
@media_auto_scan.setter
def media_auto_scan(self, media_auto_scan):
"""Sets the media_auto_scan of this Parameters.
:param media_auto_scan: The media_auto_scan of this Parameters. # noqa: E501
:type: bool
"""
self._media_auto_scan = media_auto_scan
@property
def media_auto_transport(self):
"""Gets the media_auto_transport of this Parameters. # noqa: E501
:return: The media_auto_transport of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_auto_transport
@media_auto_transport.setter
def media_auto_transport(self, media_auto_transport):
"""Sets the media_auto_transport of this Parameters.
:param media_auto_transport: The media_auto_transport of this Parameters. # noqa: E501
:type: bool
"""
self._media_auto_transport = media_auto_transport
@property
def media_auto_veritone_upload(self):
"""Gets the media_auto_veritone_upload of this Parameters. # noqa: E501
:return: The media_auto_veritone_upload of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_auto_veritone_upload
@media_auto_veritone_upload.setter
def media_auto_veritone_upload(self, media_auto_veritone_upload):
"""Sets the media_auto_veritone_upload of this Parameters.
:param media_auto_veritone_upload: The media_auto_veritone_upload of this Parameters. # noqa: E501
:type: bool
"""
self._media_auto_veritone_upload = media_auto_veritone_upload
@property
def media_default_custom_field_type(self):
"""Gets the media_default_custom_field_type of this Parameters. # noqa: E501
:return: The media_default_custom_field_type of this Parameters. # noqa: E501
:rtype: str
"""
return self._media_default_custom_field_type
@media_default_custom_field_type.setter
def media_default_custom_field_type(self, media_default_custom_field_type):
"""Sets the media_default_custom_field_type of this Parameters.
:param media_default_custom_field_type: The media_default_custom_field_type of this Parameters. # noqa: E501
:type: str
"""
allowed_values = ["file", "asset"] # noqa: E501
if self.local_vars_configuration.client_side_validation and media_default_custom_field_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `media_default_custom_field_type` ({0}), must be one of {1}" # noqa: E501
.format(media_default_custom_field_type, allowed_values)
)
self._media_default_custom_field_type = media_default_custom_field_type
@property
def media_default_delete_behaviour(self):
"""Gets the media_default_delete_behaviour of this Parameters. # noqa: E501
:return: The media_default_delete_behaviour of this Parameters. # noqa: E501
:rtype: str
"""
return self._media_default_delete_behaviour
@media_default_delete_behaviour.setter
def media_default_delete_behaviour(self, media_default_delete_behaviour):
"""Sets the media_default_delete_behaviour of this Parameters.
:param media_default_delete_behaviour: The media_default_delete_behaviour of this Parameters. # noqa: E501
:type: str
"""
allowed_values = ["disk", "database", "completely"] # noqa: E501
if self.local_vars_configuration.client_side_validation and media_default_delete_behaviour not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `media_default_delete_behaviour` ({0}), must be one of {1}" # noqa: E501
.format(media_default_delete_behaviour, allowed_values)
)
self._media_default_delete_behaviour = media_default_delete_behaviour
@property
def media_force_show_deleted(self):
"""Gets the media_force_show_deleted of this Parameters. # noqa: E501
:return: The media_force_show_deleted of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_force_show_deleted
@media_force_show_deleted.setter
def media_force_show_deleted(self, media_force_show_deleted):
"""Sets the media_force_show_deleted of this Parameters.
:param media_force_show_deleted: The media_force_show_deleted of this Parameters. # noqa: E501
:type: bool
"""
self._media_force_show_deleted = media_force_show_deleted
@property
def media_keep_selection_when_browsing(self):
"""Gets the media_keep_selection_when_browsing of this Parameters. # noqa: E501
:return: The media_keep_selection_when_browsing of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_keep_selection_when_browsing
@media_keep_selection_when_browsing.setter
def media_keep_selection_when_browsing(self, media_keep_selection_when_browsing):
"""Sets the media_keep_selection_when_browsing of this Parameters.
:param media_keep_selection_when_browsing: The media_keep_selection_when_browsing of this Parameters. # noqa: E501
:type: bool
"""
self._media_keep_selection_when_browsing = media_keep_selection_when_browsing
@property
def media_recycle_bin(self):
"""Gets the media_recycle_bin of this Parameters. # noqa: E501
Recycle bin is usually in the .recycle-bin folder in the volume root # noqa: E501
:return: The media_recycle_bin of this Parameters. # noqa: E501
:rtype: bool
"""
return self._media_recycle_bin
@media_recycle_bin.setter
def media_recycle_bin(self, media_recycle_bin):
"""Sets the media_recycle_bin of this Parameters.
Recycle bin is usually in the .recycle-bin folder in the volume root # noqa: E501
:param media_recycle_bin: The media_recycle_bin of this Parameters. # noqa: E501
:type: bool
"""
self._media_recycle_bin = media_recycle_bin
@property
def ntp_offer_sync(self):
"""Gets the ntp_offer_sync of this Parameters. # noqa: E501
:return: The ntp_offer_sync of this Parameters. # noqa: E501
:rtype: bool
"""
return self._ntp_offer_sync
@ntp_offer_sync.setter
def ntp_offer_sync(self, ntp_offer_sync):
"""Sets the ntp_offer_sync of this Parameters.
:param ntp_offer_sync: The ntp_offer_sync of this Parameters. # noqa: E501
:type: bool
"""
self._ntp_offer_sync = ntp_offer_sync
@property
def otp_policy(self):
"""Gets the otp_policy of this Parameters. # noqa: E501
:return: The otp_policy of this Parameters. # noqa: E501
:rtype: str
"""
return self._otp_policy
@otp_policy.setter
def otp_policy(self, otp_policy):
"""Sets the otp_policy of this Parameters.
:param otp_policy: The otp_policy of this Parameters. # noqa: E501
:type: str
"""
allowed_values = ["admin-only", "self-service-setup-only", "self-service-all"] # noqa: E501
if self.local_vars_configuration.client_side_validation and otp_policy not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `otp_policy` ({0}), must be one of {1}" # noqa: E501
.format(otp_policy, allowed_values)
)
self._otp_policy = otp_policy
@property
def tasks_run_scheduled(self):
"""Gets the tasks_run_scheduled of this Parameters. # noqa: E501
:return: The tasks_run_scheduled of this Parameters. # noqa: E501
:rtype: bool
"""
return self._tasks_run_scheduled
@tasks_run_scheduled.setter
def tasks_run_scheduled(self, tasks_run_scheduled):
"""Sets the tasks_run_scheduled of this Parameters.
:param tasks_run_scheduled: The tasks_run_scheduled of this Parameters. # noqa: E501
:type: bool
"""
self._tasks_run_scheduled = tasks_run_scheduled
@property
def users_default_permissions(self):
"""Gets the users_default_permissions of this Parameters. # noqa: E501
Copy this value from an existing user # noqa: E501
:return: The users_default_permissions of this Parameters. # noqa: E501
:rtype: str
"""
return self._users_default_permissions
@users_default_permissions.setter
def users_default_permissions(self, users_default_permissions):
"""Sets the users_default_permissions of this Parameters.
Copy this value from an existing user # noqa: E501
:param users_default_permissions: The users_default_permissions of this Parameters. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
users_default_permissions is not None and len(users_default_permissions) > 255):
raise ValueError("Invalid value for `users_default_permissions`, length must be less than or equal to `255`") # noqa: E501
self._users_default_permissions = users_default_permissions
@property
def workspaces_folder_template_path(self):
"""Gets the workspaces_folder_template_path of this Parameters. # noqa: E501
:return: The workspaces_folder_template_path of this Parameters. # noqa: E501
:rtype: str
"""
return self._workspaces_folder_template_path
@workspaces_folder_template_path.setter
def workspaces_folder_template_path(self, workspaces_folder_template_path):
"""Sets the workspaces_folder_template_path of this Parameters.
:param workspaces_folder_template_path: The workspaces_folder_template_path of this Parameters. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
workspaces_folder_template_path is not None and len(workspaces_folder_template_path) > 255):
raise ValueError("Invalid value for `workspaces_folder_template_path`, length must be less than or equal to `255`") # noqa: E501
self._workspaces_folder_template_path = workspaces_folder_template_path
@property
def workspaces_path(self):
"""Gets the workspaces_path of this Parameters. # noqa: E501
:return: The workspaces_path of this Parameters. # noqa: E501
:rtype: str
"""
return self._workspaces_path
@workspaces_path.setter
def workspaces_path(self, workspaces_path):
"""Sets the workspaces_path of this Parameters.
:param workspaces_path: The workspaces_path of this Parameters. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
workspaces_path is not None and len(workspaces_path) > 255):
raise ValueError("Invalid value for `workspaces_path`, length must be less than or equal to `255`") # noqa: E501
self._workspaces_path = workspaces_path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Parameters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Parameters):
return True
return self.to_dict() != other.to_dict()
| 36 | 795 | 0.672525 |
import pprint
import re
import six
from elements_sdk.configuration import Configuration
class Parameters(object):
openapi_types = {
'analytics': 'bool',
'branding_css': 'str',
'branding_logo': 'str',
'external_url': 'str',
'file_manager_recycle_bin': 'bool',
'https_redirect': 'str',
'language': 'str',
'ltfs_default_restore_to_original_location': 'bool',
'ltfs_default_search_directories': 'bool',
'ltfs_library_address': 'str',
'mail_styling': 'dict(str, str)',
'media_auto_play': 'bool',
'media_auto_proxy': 'bool',
'media_auto_scan': 'bool',
'media_auto_transport': 'bool',
'media_auto_veritone_upload': 'bool',
'media_default_custom_field_type': 'str',
'media_default_delete_behaviour': 'str',
'media_force_show_deleted': 'bool',
'media_keep_selection_when_browsing': 'bool',
'media_recycle_bin': 'bool',
'ntp_offer_sync': 'bool',
'otp_policy': 'str',
'tasks_run_scheduled': 'bool',
'users_default_permissions': 'str',
'workspaces_folder_template_path': 'str',
'workspaces_path': 'str'
}
attribute_map = {
'analytics': 'analytics',
'branding_css': 'branding_css',
'branding_logo': 'branding_logo',
'external_url': 'external_url',
'file_manager_recycle_bin': 'file_manager_recycle_bin',
'https_redirect': 'https_redirect',
'language': 'language',
'ltfs_default_restore_to_original_location': 'ltfs_default_restore_to_original_location',
'ltfs_default_search_directories': 'ltfs_default_search_directories',
'ltfs_library_address': 'ltfs_library_address',
'mail_styling': 'mail_styling',
'media_auto_play': 'media_auto_play',
'media_auto_proxy': 'media_auto_proxy',
'media_auto_scan': 'media_auto_scan',
'media_auto_transport': 'media_auto_transport',
'media_auto_veritone_upload': 'media_auto_veritone_upload',
'media_default_custom_field_type': 'media_default_custom_field_type',
'media_default_delete_behaviour': 'media_default_delete_behaviour',
'media_force_show_deleted': 'media_force_show_deleted',
'media_keep_selection_when_browsing': 'media_keep_selection_when_browsing',
'media_recycle_bin': 'media_recycle_bin',
'ntp_offer_sync': 'ntp_offer_sync',
'otp_policy': 'otp_policy',
'tasks_run_scheduled': 'tasks_run_scheduled',
'users_default_permissions': 'users_default_permissions',
'workspaces_folder_template_path': 'workspaces_folder_template_path',
'workspaces_path': 'workspaces_path'
}
def __init__(self, analytics=None, branding_css=None, branding_logo=None, external_url=None, file_manager_recycle_bin=None, https_redirect=None, language=None, ltfs_default_restore_to_original_location=None, ltfs_default_search_directories=None, ltfs_library_address=None, mail_styling=None, media_auto_play=None, media_auto_proxy=None, media_auto_scan=None, media_auto_transport=None, media_auto_veritone_upload=None, media_default_custom_field_type=None, media_default_delete_behaviour=None, media_force_show_deleted=None, media_keep_selection_when_browsing=None, media_recycle_bin=None, ntp_offer_sync=None, otp_policy=None, tasks_run_scheduled=None, users_default_permissions=None, workspaces_folder_template_path=None, workspaces_path=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._analytics = None
self._branding_css = None
self._branding_logo = None
self._external_url = None
self._file_manager_recycle_bin = None
self._https_redirect = None
self._language = None
self._ltfs_default_restore_to_original_location = None
self._ltfs_default_search_directories = None
self._ltfs_library_address = None
self._mail_styling = None
self._media_auto_play = None
self._media_auto_proxy = None
self._media_auto_scan = None
self._media_auto_transport = None
self._media_auto_veritone_upload = None
self._media_default_custom_field_type = None
self._media_default_delete_behaviour = None
self._media_force_show_deleted = None
self._media_keep_selection_when_browsing = None
self._media_recycle_bin = None
self._ntp_offer_sync = None
self._otp_policy = None
self._tasks_run_scheduled = None
self._users_default_permissions = None
self._workspaces_folder_template_path = None
self._workspaces_path = None
self.discriminator = None
if analytics is not None:
self.analytics = analytics
if branding_css is not None:
self.branding_css = branding_css
if branding_logo is not None:
self.branding_logo = branding_logo
self.external_url = external_url
if file_manager_recycle_bin is not None:
self.file_manager_recycle_bin = file_manager_recycle_bin
self.https_redirect = https_redirect
if language is not None:
self.language = language
if ltfs_default_restore_to_original_location is not None:
self.ltfs_default_restore_to_original_location = ltfs_default_restore_to_original_location
if ltfs_default_search_directories is not None:
self.ltfs_default_search_directories = ltfs_default_search_directories
self.ltfs_library_address = ltfs_library_address
if mail_styling is not None:
self.mail_styling = mail_styling
if media_auto_play is not None:
self.media_auto_play = media_auto_play
if media_auto_proxy is not None:
self.media_auto_proxy = media_auto_proxy
if media_auto_scan is not None:
self.media_auto_scan = media_auto_scan
if media_auto_transport is not None:
self.media_auto_transport = media_auto_transport
if media_auto_veritone_upload is not None:
self.media_auto_veritone_upload = media_auto_veritone_upload
if media_default_custom_field_type is not None:
self.media_default_custom_field_type = media_default_custom_field_type
if media_default_delete_behaviour is not None:
self.media_default_delete_behaviour = media_default_delete_behaviour
self.media_force_show_deleted = media_force_show_deleted
if media_keep_selection_when_browsing is not None:
self.media_keep_selection_when_browsing = media_keep_selection_when_browsing
if media_recycle_bin is not None:
self.media_recycle_bin = media_recycle_bin
if ntp_offer_sync is not None:
self.ntp_offer_sync = ntp_offer_sync
if otp_policy is not None:
self.otp_policy = otp_policy
if tasks_run_scheduled is not None:
self.tasks_run_scheduled = tasks_run_scheduled
if users_default_permissions is not None:
self.users_default_permissions = users_default_permissions
if workspaces_folder_template_path is not None:
self.workspaces_folder_template_path = workspaces_folder_template_path
if workspaces_path is not None:
self.workspaces_path = workspaces_path
@property
def analytics(self):
return self._analytics
@analytics.setter
def analytics(self, analytics):
self._analytics = analytics
@property
def branding_css(self):
return self._branding_css
@branding_css.setter
def branding_css(self, branding_css):
self._branding_css = branding_css
@property
def branding_logo(self):
return self._branding_logo
@branding_logo.setter
def branding_logo(self, branding_logo):
self._branding_logo = branding_logo
@property
def external_url(self):
return self._external_url
@external_url.setter
def external_url(self, external_url):
if (self.local_vars_configuration.client_side_validation and
external_url is not None and len(external_url) > 1023):
raise ValueError("Invalid value for `external_url`, length must be less than or equal to `1023`")
self._external_url = external_url
@property
def file_manager_recycle_bin(self):
return self._file_manager_recycle_bin
@file_manager_recycle_bin.setter
def file_manager_recycle_bin(self, file_manager_recycle_bin):
self._file_manager_recycle_bin = file_manager_recycle_bin
@property
def https_redirect(self):
return self._https_redirect
@https_redirect.setter
def https_redirect(self, https_redirect):
allowed_values = [None,"domain", "on"]
if self.local_vars_configuration.client_side_validation and https_redirect not in allowed_values:
raise ValueError(
"Invalid value for `https_redirect` ({0}), must be one of {1}"
.format(https_redirect, allowed_values)
)
self._https_redirect = https_redirect
@property
def language(self):
return self._language
@language.setter
def language(self, language):
allowed_values = ["en", "fr", "de", "ru"]
if self.local_vars_configuration.client_side_validation and language not in allowed_values:
raise ValueError(
"Invalid value for `language` ({0}), must be one of {1}"
.format(language, allowed_values)
)
self._language = language
@property
def ltfs_default_restore_to_original_location(self):
return self._ltfs_default_restore_to_original_location
@ltfs_default_restore_to_original_location.setter
def ltfs_default_restore_to_original_location(self, ltfs_default_restore_to_original_location):
self._ltfs_default_restore_to_original_location = ltfs_default_restore_to_original_location
@property
def ltfs_default_search_directories(self):
return self._ltfs_default_search_directories
@ltfs_default_search_directories.setter
def ltfs_default_search_directories(self, ltfs_default_search_directories):
self._ltfs_default_search_directories = ltfs_default_search_directories
@property
def ltfs_library_address(self):
return self._ltfs_library_address
@ltfs_library_address.setter
def ltfs_library_address(self, ltfs_library_address):
if (self.local_vars_configuration.client_side_validation and
ltfs_library_address is not None and len(ltfs_library_address) > 255):
raise ValueError("Invalid value for `ltfs_library_address`, length must be less than or equal to `255`")
self._ltfs_library_address = ltfs_library_address
@property
def mail_styling(self):
return self._mail_styling
@mail_styling.setter
def mail_styling(self, mail_styling):
self._mail_styling = mail_styling
@property
def media_auto_play(self):
return self._media_auto_play
@media_auto_play.setter
def media_auto_play(self, media_auto_play):
self._media_auto_play = media_auto_play
@property
def media_auto_proxy(self):
return self._media_auto_proxy
@media_auto_proxy.setter
def media_auto_proxy(self, media_auto_proxy):
self._media_auto_proxy = media_auto_proxy
@property
def media_auto_scan(self):
return self._media_auto_scan
@media_auto_scan.setter
def media_auto_scan(self, media_auto_scan):
self._media_auto_scan = media_auto_scan
@property
def media_auto_transport(self):
return self._media_auto_transport
@media_auto_transport.setter
def media_auto_transport(self, media_auto_transport):
self._media_auto_transport = media_auto_transport
@property
def media_auto_veritone_upload(self):
return self._media_auto_veritone_upload
@media_auto_veritone_upload.setter
def media_auto_veritone_upload(self, media_auto_veritone_upload):
self._media_auto_veritone_upload = media_auto_veritone_upload
@property
def media_default_custom_field_type(self):
return self._media_default_custom_field_type
@media_default_custom_field_type.setter
def media_default_custom_field_type(self, media_default_custom_field_type):
allowed_values = ["file", "asset"]
if self.local_vars_configuration.client_side_validation and media_default_custom_field_type not in allowed_values:
raise ValueError(
"Invalid value for `media_default_custom_field_type` ({0}), must be one of {1}"
.format(media_default_custom_field_type, allowed_values)
)
self._media_default_custom_field_type = media_default_custom_field_type
@property
def media_default_delete_behaviour(self):
return self._media_default_delete_behaviour
@media_default_delete_behaviour.setter
def media_default_delete_behaviour(self, media_default_delete_behaviour):
allowed_values = ["disk", "database", "completely"]
if self.local_vars_configuration.client_side_validation and media_default_delete_behaviour not in allowed_values:
raise ValueError(
"Invalid value for `media_default_delete_behaviour` ({0}), must be one of {1}"
.format(media_default_delete_behaviour, allowed_values)
)
self._media_default_delete_behaviour = media_default_delete_behaviour
@property
def media_force_show_deleted(self):
return self._media_force_show_deleted
@media_force_show_deleted.setter
def media_force_show_deleted(self, media_force_show_deleted):
self._media_force_show_deleted = media_force_show_deleted
@property
def media_keep_selection_when_browsing(self):
return self._media_keep_selection_when_browsing
@media_keep_selection_when_browsing.setter
def media_keep_selection_when_browsing(self, media_keep_selection_when_browsing):
self._media_keep_selection_when_browsing = media_keep_selection_when_browsing
@property
def media_recycle_bin(self):
return self._media_recycle_bin
@media_recycle_bin.setter
def media_recycle_bin(self, media_recycle_bin):
self._media_recycle_bin = media_recycle_bin
@property
def ntp_offer_sync(self):
return self._ntp_offer_sync
@ntp_offer_sync.setter
def ntp_offer_sync(self, ntp_offer_sync):
self._ntp_offer_sync = ntp_offer_sync
@property
def otp_policy(self):
return self._otp_policy
@otp_policy.setter
def otp_policy(self, otp_policy):
allowed_values = ["admin-only", "self-service-setup-only", "self-service-all"]
if self.local_vars_configuration.client_side_validation and otp_policy not in allowed_values:
raise ValueError(
"Invalid value for `otp_policy` ({0}), must be one of {1}"
.format(otp_policy, allowed_values)
)
self._otp_policy = otp_policy
@property
def tasks_run_scheduled(self):
return self._tasks_run_scheduled
@tasks_run_scheduled.setter
def tasks_run_scheduled(self, tasks_run_scheduled):
self._tasks_run_scheduled = tasks_run_scheduled
@property
def users_default_permissions(self):
return self._users_default_permissions
@users_default_permissions.setter
def users_default_permissions(self, users_default_permissions):
if (self.local_vars_configuration.client_side_validation and
users_default_permissions is not None and len(users_default_permissions) > 255):
raise ValueError("Invalid value for `users_default_permissions`, length must be less than or equal to `255`")
self._users_default_permissions = users_default_permissions
@property
def workspaces_folder_template_path(self):
return self._workspaces_folder_template_path
@workspaces_folder_template_path.setter
def workspaces_folder_template_path(self, workspaces_folder_template_path):
if (self.local_vars_configuration.client_side_validation and
workspaces_folder_template_path is not None and len(workspaces_folder_template_path) > 255):
raise ValueError("Invalid value for `workspaces_folder_template_path`, length must be less than or equal to `255`")
self._workspaces_folder_template_path = workspaces_folder_template_path
@property
def workspaces_path(self):
return self._workspaces_path
@workspaces_path.setter
def workspaces_path(self, workspaces_path):
if (self.local_vars_configuration.client_side_validation and
workspaces_path is not None and len(workspaces_path) > 255):
raise ValueError("Invalid value for `workspaces_path`, length must be less than or equal to `255`")
self._workspaces_path = workspaces_path
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Parameters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, Parameters):
return True
return self.to_dict() != other.to_dict()
| true | true |
f7fd416df8bd42b82e718c3ef1f64e6c2ef9d427 | 3,746 | py | Python | Omdena Projects/hourly_merge.py | DANancy/Coding-Playground | b82e3689ccc4771ee59c3472db78333ba17671b9 | [
"MIT"
] | null | null | null | Omdena Projects/hourly_merge.py | DANancy/Coding-Playground | b82e3689ccc4771ee59c3472db78333ba17671b9 | [
"MIT"
] | null | null | null | Omdena Projects/hourly_merge.py | DANancy/Coding-Playground | b82e3689ccc4771ee59c3472db78333ba17671b9 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from datetime import datetime
def main(consumption_data, weather_data, public_holidays_data, service_location_id):
# Process consumption data
df = consumption_data.astype({'date':'datetime64[ns]'}).rename(columns={'date':'datetime'}).set_index('datetime')
df = pd.DataFrame(df['consumption'])
df = df.asfreq('1H')
# Convert consumption column to kWH (its a more common metric than Wh)
df['consumption'] = df['consumption']/1000
df.rename(columns={'consumption':'consumption_kWh'}, inplace=True)
# Add season column
df['date'] = df.index.strftime('%Y-%m-%d')
df['year'] = df.index.year
df['dayOfYear'] = df.index.dayofyear
df['month'] = df.index.month
df['monthName'] = df.index.month_name()
df['week'] = df.index.isocalendar().week
df['day'] = df.index.day
df['dayName'] = df.index.day_name()
df['hour'] = df.index.hour
df['minute'] = df.index.minute
df['dayOfWeek'] = df.index.dayofweek
df['weekend'] = df['dayOfWeek'].apply(lambda x: 1 if x >= 5 else 0)
df['time'] = df.index.time
df['dayOfMonth'] = df.index.strftime('%m-%d')
df['hourMinute'] = df.index.strftime('%H:%M')
bins = [0,4,8,12,16,20,24]
#labels = ['Late Night', 'Early Morning','Morning','Noon','Eve','Night']
labels = [1, 2,3,4,5,6]
df['session'] = pd.cut(df['hour'], bins=bins, labels=labels, include_lowest=True)
def season_df(df):
if df['month'] == 12 | df['month'] == 1 | df['month'] == 2:
return 2 #'Summer'
elif df['month'] == 3 | df['month'] == 4 | df['month'] == 5:
return 3 #'Autumn'
elif df['month'] == 6 | df['month'] == 7 | df['month'] == 8:
return 4 #'Winter'
else:
return 1 #'Spring'
df['season'] = df.apply(season_df, axis = 1)
# Process weather data
weather_df = weather_data.astype({'datetime':'datetime64[ns]'})
weather_df = weather_df[['temp', 'humidity', 'clouds','datetime']].set_index('datetime')
weather_df = weather_df.asfreq('1H')
# Rename and divide by 100 to make it more ML friendly
weather_df['clouds'] = weather_df['clouds']/100
weather_df.rename(columns={'clouds':'cloud_cover'}, inplace=True)
# Temperature in degrees C, rename with units
weather_df.rename(columns={'temp':'temp_degreeC'}, inplace=True)
# Humidity is relative humidity as a %
# Rename and divide by 100 to make it more ML friendly
weather_df['humidity'] = weather_df['humidity']/100
weather_df.rename(columns={'humidity':'rel_humidity'}, inplace=True)
# Process holiday data
holiday_df = public_holidays_data
holiday_df = holiday_df[['day','holiday','holidayName']]
holiday_df.rename(columns = {'day':'date'},inplace=True)
# Merge all datasets
combined_df = df.join(weather_df)
combined_df['date'] = pd.to_datetime(combined_df['date'], utc = False)
holiday_df['date'] = pd.to_datetime(holiday_df['date'], utc = False)
combined_df = pd.merge(combined_df.reset_index(), holiday_df)
combined_df = combined_df.rename(columns={'index':'datetime'}).set_index('datetime')
# Replace Holiday 'Y' with 1
# Replace Holiday NaN with 0
combined_df['holiday'] = np.where(combined_df['holiday']=='Y',1,0)
# Add workingday or non-working day column
combined_df['workingDay'] = np.where(np.logical_and(combined_df['weekend']==0, combined_df['holiday']==0),1,0)
today = datetime.now()
new_time = str(int((today).timestamp()))
file_name = f'merged_{service_location_id}_timestamp_{new_time}.csv'
return file_name, combined_df
| 42.089888 | 118 | 0.628938 | import pandas as pd
import numpy as np
from datetime import datetime
def main(consumption_data, weather_data, public_holidays_data, service_location_id):
df = consumption_data.astype({'date':'datetime64[ns]'}).rename(columns={'date':'datetime'}).set_index('datetime')
df = pd.DataFrame(df['consumption'])
df = df.asfreq('1H')
df['consumption'] = df['consumption']/1000
df.rename(columns={'consumption':'consumption_kWh'}, inplace=True)
df['date'] = df.index.strftime('%Y-%m-%d')
df['year'] = df.index.year
df['dayOfYear'] = df.index.dayofyear
df['month'] = df.index.month
df['monthName'] = df.index.month_name()
df['week'] = df.index.isocalendar().week
df['day'] = df.index.day
df['dayName'] = df.index.day_name()
df['hour'] = df.index.hour
df['minute'] = df.index.minute
df['dayOfWeek'] = df.index.dayofweek
df['weekend'] = df['dayOfWeek'].apply(lambda x: 1 if x >= 5 else 0)
df['time'] = df.index.time
df['dayOfMonth'] = df.index.strftime('%m-%d')
df['hourMinute'] = df.index.strftime('%H:%M')
bins = [0,4,8,12,16,20,24]
labels = [1, 2,3,4,5,6]
df['session'] = pd.cut(df['hour'], bins=bins, labels=labels, include_lowest=True)
def season_df(df):
if df['month'] == 12 | df['month'] == 1 | df['month'] == 2:
return 2
elif df['month'] == 3 | df['month'] == 4 | df['month'] == 5:
return 3
elif df['month'] == 6 | df['month'] == 7 | df['month'] == 8:
return 4
else:
return 1
df['season'] = df.apply(season_df, axis = 1)
weather_df = weather_data.astype({'datetime':'datetime64[ns]'})
weather_df = weather_df[['temp', 'humidity', 'clouds','datetime']].set_index('datetime')
weather_df = weather_df.asfreq('1H')
weather_df['clouds'] = weather_df['clouds']/100
weather_df.rename(columns={'clouds':'cloud_cover'}, inplace=True)
weather_df.rename(columns={'temp':'temp_degreeC'}, inplace=True)
weather_df['humidity'] = weather_df['humidity']/100
weather_df.rename(columns={'humidity':'rel_humidity'}, inplace=True)
holiday_df = public_holidays_data
holiday_df = holiday_df[['day','holiday','holidayName']]
holiday_df.rename(columns = {'day':'date'},inplace=True)
combined_df = df.join(weather_df)
combined_df['date'] = pd.to_datetime(combined_df['date'], utc = False)
holiday_df['date'] = pd.to_datetime(holiday_df['date'], utc = False)
combined_df = pd.merge(combined_df.reset_index(), holiday_df)
combined_df = combined_df.rename(columns={'index':'datetime'}).set_index('datetime')
combined_df['holiday'] = np.where(combined_df['holiday']=='Y',1,0)
combined_df['workingDay'] = np.where(np.logical_and(combined_df['weekend']==0, combined_df['holiday']==0),1,0)
today = datetime.now()
new_time = str(int((today).timestamp()))
file_name = f'merged_{service_location_id}_timestamp_{new_time}.csv'
return file_name, combined_df
| true | true |
f7fd41d9446d61e4cdaed0640ec94a97e63efe89 | 5,198 | py | Python | tests/integration/shell/master.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2015-09-21T14:13:30.000Z | 2016-02-12T11:33:46.000Z | tests/integration/shell/master.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | tests/integration/shell/master.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2017-01-05T16:14:59.000Z | 2019-01-31T23:15:25.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.shell.master
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import yaml
import signal
import shutil
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import integration.utils
from integration.utils import testprogram
import salt.utils
class MasterTest(integration.ShellCase, testprogram.TestProgramCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-master'
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(integration.TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
config_file_name = 'master'
pid_path = os.path.join(config_dir, '{0}.pid'.format(config_file_name))
with salt.utils.fopen(self.get_config_file_path(config_file_name), 'r') as fhr:
config = yaml.load(fhr.read())
config['root_dir'] = config_dir
config['log_file'] = 'file:///tmp/log/LOG_LOCAL3'
config['ret_port'] = config['ret_port'] + 10
config['publish_port'] = config['publish_port'] + 10
with salt.utils.fopen(os.path.join(config_dir, config_file_name), 'w') as fhw:
fhw.write(
yaml.dump(config, default_flow_style=False)
)
ret = self.run_script(
self._call_binary_,
'--config-dir {0} --pid-file {1} -l debug'.format(
config_dir,
pid_path
),
timeout=5,
catch_stderr=True,
with_retcode=True
)
# Now kill it if still running
if os.path.exists(pid_path):
with salt.utils.fopen(pid_path) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError:
pass
try:
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_exit_status_unknown_user(self):
'''
Ensure correct exit status when the master is configured to run as an unknown user.
'''
master = testprogram.TestDaemonSaltMaster(
name='unknown_user',
configs={'master': {'map': {'user': 'some_unknown_user_xyz'}}},
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_NOUSER',
message='unknown user not on system',
stdout=stdout,
stderr=integration.utils.decode_byte_list(stderr)
)
# Although the start-up should fail, call shutdown() to set the internal
# _shutdown flag and avoid the registered atexit calls to cause timeout
# exeptions and respective traceback
master.shutdown()
# pylint: disable=invalid-name
def test_exit_status_unknown_argument(self):
'''
Ensure correct exit status when an unknown argument is passed to salt-master.
'''
master = testprogram.TestDaemonSaltMaster(
name='unknown_argument',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d', '--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout,
stderr=integration.utils.decode_byte_list(stderr)
)
# Although the start-up should fail, call shutdown() to set the internal
# _shutdown flag and avoid the registered atexit calls to cause timeout
# exeptions and respective traceback
master.shutdown()
def test_exit_status_correct_usage(self):
'''
Ensure correct exit status when salt-master starts correctly.
'''
master = testprogram.TestDaemonSaltMaster(
name='correct_usage',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout,
stderr=integration.utils.decode_byte_list(stderr)
)
master.shutdown()
if __name__ == '__main__':
integration.run_tests(MasterTest)
| 32.08642 | 108 | 0.590227 |
from __future__ import absolute_import
import os
import yaml
import signal
import shutil
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
import integration
import integration.utils
from integration.utils import testprogram
import salt.utils
class MasterTest(integration.ShellCase, testprogram.TestProgramCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-master'
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(integration.TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
config_file_name = 'master'
pid_path = os.path.join(config_dir, '{0}.pid'.format(config_file_name))
with salt.utils.fopen(self.get_config_file_path(config_file_name), 'r') as fhr:
config = yaml.load(fhr.read())
config['root_dir'] = config_dir
config['log_file'] = 'file:///tmp/log/LOG_LOCAL3'
config['ret_port'] = config['ret_port'] + 10
config['publish_port'] = config['publish_port'] + 10
with salt.utils.fopen(os.path.join(config_dir, config_file_name), 'w') as fhw:
fhw.write(
yaml.dump(config, default_flow_style=False)
)
ret = self.run_script(
self._call_binary_,
'--config-dir {0} --pid-file {1} -l debug'.format(
config_dir,
pid_path
),
timeout=5,
catch_stderr=True,
with_retcode=True
)
if os.path.exists(pid_path):
with salt.utils.fopen(pid_path) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError:
pass
try:
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_exit_status_unknown_user(self):
master = testprogram.TestDaemonSaltMaster(
name='unknown_user',
configs={'master': {'map': {'user': 'some_unknown_user_xyz'}}},
parent_dir=self._test_dir,
)
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_NOUSER',
message='unknown user not on system',
stdout=stdout,
stderr=integration.utils.decode_byte_list(stderr)
)
master.shutdown()
def test_exit_status_unknown_argument(self):
master = testprogram.TestDaemonSaltMaster(
name='unknown_argument',
parent_dir=self._test_dir,
)
master.setup()
stdout, stderr, status = master.run(
args=['-d', '--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout,
stderr=integration.utils.decode_byte_list(stderr)
)
master.shutdown()
def test_exit_status_correct_usage(self):
master = testprogram.TestDaemonSaltMaster(
name='correct_usage',
parent_dir=self._test_dir,
)
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout,
stderr=integration.utils.decode_byte_list(stderr)
)
master.shutdown()
if __name__ == '__main__':
integration.run_tests(MasterTest)
| true | true |
f7fd42a89bf93e0aeff0ec7ab6f06aeaea6b42e9 | 1,154 | py | Python | ProcessScripts/DevsetTransform.py | peterzheng98/fuzzy-system | 6ae14714c73d9a70b4d4c0a27e9da0d54a0fe5a8 | [
"MIT"
] | null | null | null | ProcessScripts/DevsetTransform.py | peterzheng98/fuzzy-system | 6ae14714c73d9a70b4d4c0a27e9da0d54a0fe5a8 | [
"MIT"
] | null | null | null | ProcessScripts/DevsetTransform.py | peterzheng98/fuzzy-system | 6ae14714c73d9a70b4d4c0a27e9da0d54a0fe5a8 | [
"MIT"
] | null | null | null | import pandas as pd
import sys
from collections import Counter
from tqdm import tqdm
import json
if __name__ == '__main__':
filepath = '../datasets/tokenized/in_domain_dev.tsv'
output_word_cab = '../datasets/tokenized/wordlist.txt'
df = pd.read_csv(filepath, sep='\t', header=0)
word_list_cnt = open(output_word_cab, 'r').readlines()
word_list_dict = {d.split('\t')[0]: i for i, d in enumerate(word_list_cnt)}
bar1 = tqdm(desc='Transform sentences', total=len(df))
sentence_List = []
label_List = []
for i in range(len(df)):
label1, verdict, human, sentences = df.iloc[i]
label_List.append(human * 2 + verdict)
word_sentence_list = sentences.split(' ')
word_ap = []
for word in word_sentence_list:
if word in word_list_dict.keys():
word_ap.append(word_list_dict[word])
else:
word_ap.append(len(word_list_dict))
sentence_List.append(json.dumps(word_ap))
bar1.update()
df = pd.DataFrame({'data': sentence_List, 'label': label_List})
df.to_csv('../datasets/tokenized/in_domain_dev.reformed.csv')
| 33.941176 | 79 | 0.646447 | import pandas as pd
import sys
from collections import Counter
from tqdm import tqdm
import json
if __name__ == '__main__':
filepath = '../datasets/tokenized/in_domain_dev.tsv'
output_word_cab = '../datasets/tokenized/wordlist.txt'
df = pd.read_csv(filepath, sep='\t', header=0)
word_list_cnt = open(output_word_cab, 'r').readlines()
word_list_dict = {d.split('\t')[0]: i for i, d in enumerate(word_list_cnt)}
bar1 = tqdm(desc='Transform sentences', total=len(df))
sentence_List = []
label_List = []
for i in range(len(df)):
label1, verdict, human, sentences = df.iloc[i]
label_List.append(human * 2 + verdict)
word_sentence_list = sentences.split(' ')
word_ap = []
for word in word_sentence_list:
if word in word_list_dict.keys():
word_ap.append(word_list_dict[word])
else:
word_ap.append(len(word_list_dict))
sentence_List.append(json.dumps(word_ap))
bar1.update()
df = pd.DataFrame({'data': sentence_List, 'label': label_List})
df.to_csv('../datasets/tokenized/in_domain_dev.reformed.csv')
| true | true |
f7fd42bd1bdd15a643d253b8ce19570f564b7368 | 759 | py | Python | cdk/kesher_service_cdk/service_stack/kesher_stack.py | CyberArkForTheCommunity/Kesher-Backend | 51a86ac1901c08ba932e81f5c0cd2ba81b05d8e6 | [
"MIT"
] | 1 | 2021-05-13T10:58:06.000Z | 2021-05-13T10:58:06.000Z | cdk/kesher_service_cdk/service_stack/kesher_stack.py | CyberArkForTheCommunity/Kesher-Backend | 51a86ac1901c08ba932e81f5c0cd2ba81b05d8e6 | [
"MIT"
] | null | null | null | cdk/kesher_service_cdk/service_stack/kesher_stack.py | CyberArkForTheCommunity/Kesher-Backend | 51a86ac1901c08ba932e81f5c0cd2ba81b05d8e6 | [
"MIT"
] | 2 | 2021-04-06T15:28:16.000Z | 2021-05-13T23:02:59.000Z | import getpass
from aws_cdk import core
from kesher_service_cdk.service_stack.kesher_construct import KesherServiceEnvironment
from kesher_service_cdk.service_stack.stack_utils import get_stack_name
from .auth_construct import KesherAuth
from .db_construct import DatabaseConstruct
class KesherStack(core.Stack):
# pylint: disable=redefined-builtin
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.kesher_auth = KesherAuth(self, f"{get_stack_name()}Auth")
self.kesher_service_env = KesherServiceEnvironment(self, "Service", self.kesher_auth.user_pool.user_pool_arn)
self.vpc = DatabaseConstruct(self, "db", self.kesher_auth.user_pool.user_pool_arn)
| 37.95 | 117 | 0.778656 | import getpass
from aws_cdk import core
from kesher_service_cdk.service_stack.kesher_construct import KesherServiceEnvironment
from kesher_service_cdk.service_stack.stack_utils import get_stack_name
from .auth_construct import KesherAuth
from .db_construct import DatabaseConstruct
class KesherStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.kesher_auth = KesherAuth(self, f"{get_stack_name()}Auth")
self.kesher_service_env = KesherServiceEnvironment(self, "Service", self.kesher_auth.user_pool.user_pool_arn)
self.vpc = DatabaseConstruct(self, "db", self.kesher_auth.user_pool.user_pool_arn)
| true | true |
f7fd43200a0198becb084621acb7b8574ad5ef45 | 6,077 | py | Python | libs/parametric_models.py | kamieen03/style-transfer-server | 91727ec62080215a0b870ce043faf0657137b84b | [
"BSD-2-Clause"
] | null | null | null | libs/parametric_models.py | kamieen03/style-transfer-server | 91727ec62080215a0b870ce043faf0657137b84b | [
"BSD-2-Clause"
] | null | null | null | libs/parametric_models.py | kamieen03/style-transfer-server | 91727ec62080215a0b870ce043faf0657137b84b | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn as nn
class encoder3(nn.Module):
def __init__(self, W, v2):
super(encoder3,self).__init__() # W - width
# vgg
# 224 x 224
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ZeroPad2d((1,1,1,1))
# 226 x 226
self.conv2 = nn.Conv2d(3,32 if v2 else int(64*W),3,1,0)
self.relu2 = nn.ReLU(inplace=True)
# 224 x 224
self.reflecPad3 = nn.ZeroPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(32 if v2 else int(64*W),int(64*W),3,1,0)
self.relu3 = nn.ReLU(inplace=True)
# 224 x 224
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)
# 112 x 112
self.reflecPad4 = nn.ZeroPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(int(64*W),int(128*W),3,1,0)
self.relu4 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad5 = nn.ZeroPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(int(128*W),int(128*W),3,1,0)
self.relu5 = nn.ReLU(inplace=True)
# 112 x 112
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)
# 56 x 56
self.reflecPad6 = nn.ZeroPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(int(128*W),int(256*W),3,1,0)
self.relu6 = nn.ReLU(inplace=True)
# 56 x 56
def forward(self,x):
x = x / 255.0
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
class decoder3(nn.Module):
def __init__(self, W, v2):
super(decoder3,self).__init__()
# decoder
self.reflecPad7 = nn.ZeroPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(int(256*W),int(128*W),3,1,0)
self.relu7 = nn.ReLU(inplace=True)
# 56 x 56
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
# 112 x 112
self.reflecPad8 = nn.ZeroPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(int(128*W),int(128*W),3,1,0)
self.relu8 = nn.ReLU(inplace=True)
# 112 x 112
self.reflecPad9 = nn.ZeroPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(int(128*W),int(64*W),3,1,0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
# 224 x 224
self.reflecPad10 = nn.ZeroPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(int(64*W),32 if v2 else int(64*W),3,1,0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ZeroPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(32 if v2 else int(64*W),3,3,1,0)
def forward(self,x):
output = {}
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = out.clamp(0,1)*255
return out
class CNN(nn.Module):
def __init__(self,W,matrixSize=32):
super(CNN,self).__init__()
# 256x64x64
self.convs = nn.Sequential(nn.Conv2d(int(256*W),int(128*W),3,1,1),
nn.ReLU(inplace=True),
nn.Conv2d(int(128*W),int(64*W),3,1,1),
nn.ReLU(inplace=True),
nn.Conv2d(int(64*W),matrixSize,3,1,1))
# 32x8x8
self.fc = nn.Linear(matrixSize*matrixSize,matrixSize*matrixSize)
def forward(self,x):
out = self.convs(x)
# 32x8x8
#b,c,h,w = out.size()
#print(1, b,c,h,w)
out = out.view(1,32, -1)
# 32x64
out = torch.bmm(out,out.transpose(1,2)).div(144*256)
#print(2,out.size())
# 32x32
out = out.view(1,-1)
return self.fc(out)
class MulLayer(nn.Module):
def __init__(self,W,matrixSize=32):
super(MulLayer,self).__init__()
self.snet = CNN(W,matrixSize)
self.cnet = CNN(W,matrixSize)
self.matrixSize = matrixSize
self.compress = nn.Conv2d(int(256*W),matrixSize,1,1,0)
self.unzip = nn.Conv2d(matrixSize,int(256*W),1,1,0)
self.transmatrix = None
def forward(self, cF, sF, alpha=1.0, trans=True):
#cFBK = cF.clone()
#cb, cc, ch, cw = cF.size()
cFF = cF.view(1, 64, -1)
cMean = torch.mean(cFF,dim=2,keepdim=True)
cMean = cMean.unsqueeze(3)
cF = cF - cMean
#sb, sc, sh, sw = sF.size()
sFF = sF.view(1, 64, -1)
sMean = torch.mean(sFF,dim=2,keepdim=True)
sMean = sMean.unsqueeze(3)
#self.sMeanC = sMean.expand_as(cF)
#sMeanS = sMean.expand_as(sF)
sF = sF - sMean
sF = sF * alpha + (1-alpha) * cF
compress_content = self.compress(cF)
#b,c,h,w = compress_content.size()
compress_content = compress_content.view(1,32,-1)
cMatrix = self.cnet(cF)
sMatrix = self.snet(sF)
sMatrix = sMatrix.view(1,self.matrixSize,self.matrixSize)
cMatrix = cMatrix.view(1,self.matrixSize,self.matrixSize)
self.transmatrix = torch.bmm(sMatrix,cMatrix)
transfeature = torch.bmm(self.transmatrix,compress_content).view(1,32,256,144)
out = self.unzip(transfeature.view(1,32,256,144))
out = out + sMean
return out
| 32.497326 | 86 | 0.552082 | import torch
import torch.nn as nn
class encoder3(nn.Module):
def __init__(self, W, v2):
super(encoder3,self).__init__()
self.conv1 = nn.Conv2d(3,3,1,1,0)
self.reflecPad1 = nn.ZeroPad2d((1,1,1,1))
self.conv2 = nn.Conv2d(3,32 if v2 else int(64*W),3,1,0)
self.relu2 = nn.ReLU(inplace=True)
self.reflecPad3 = nn.ZeroPad2d((1,1,1,1))
self.conv3 = nn.Conv2d(32 if v2 else int(64*W),int(64*W),3,1,0)
self.relu3 = nn.ReLU(inplace=True)
self.maxPool = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)
self.reflecPad4 = nn.ZeroPad2d((1,1,1,1))
self.conv4 = nn.Conv2d(int(64*W),int(128*W),3,1,0)
self.relu4 = nn.ReLU(inplace=True)
self.reflecPad5 = nn.ZeroPad2d((1,1,1,1))
self.conv5 = nn.Conv2d(int(128*W),int(128*W),3,1,0)
self.relu5 = nn.ReLU(inplace=True)
self.maxPool2 = nn.MaxPool2d(kernel_size=2,stride=2,return_indices = False)
self.reflecPad6 = nn.ZeroPad2d((1,1,1,1))
self.conv6 = nn.Conv2d(int(128*W),int(256*W),3,1,0)
self.relu6 = nn.ReLU(inplace=True)
def forward(self,x):
x = x / 255.0
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
class decoder3(nn.Module):
def __init__(self, W, v2):
super(decoder3,self).__init__()
self.reflecPad7 = nn.ZeroPad2d((1,1,1,1))
self.conv7 = nn.Conv2d(int(256*W),int(128*W),3,1,0)
self.relu7 = nn.ReLU(inplace=True)
self.unpool = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad8 = nn.ZeroPad2d((1,1,1,1))
self.conv8 = nn.Conv2d(int(128*W),int(128*W),3,1,0)
self.relu8 = nn.ReLU(inplace=True)
self.reflecPad9 = nn.ZeroPad2d((1,1,1,1))
self.conv9 = nn.Conv2d(int(128*W),int(64*W),3,1,0)
self.relu9 = nn.ReLU(inplace=True)
self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2)
self.reflecPad10 = nn.ZeroPad2d((1,1,1,1))
self.conv10 = nn.Conv2d(int(64*W),32 if v2 else int(64*W),3,1,0)
self.relu10 = nn.ReLU(inplace=True)
self.reflecPad11 = nn.ZeroPad2d((1,1,1,1))
self.conv11 = nn.Conv2d(32 if v2 else int(64*W),3,3,1,0)
def forward(self,x):
output = {}
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = out.clamp(0,1)*255
return out
class CNN(nn.Module):
def __init__(self,W,matrixSize=32):
super(CNN,self).__init__()
self.convs = nn.Sequential(nn.Conv2d(int(256*W),int(128*W),3,1,1),
nn.ReLU(inplace=True),
nn.Conv2d(int(128*W),int(64*W),3,1,1),
nn.ReLU(inplace=True),
nn.Conv2d(int(64*W),matrixSize,3,1,1))
self.fc = nn.Linear(matrixSize*matrixSize,matrixSize*matrixSize)
def forward(self,x):
out = self.convs(x)
out = out.view(1,32, -1)
out = torch.bmm(out,out.transpose(1,2)).div(144*256)
out = out.view(1,-1)
return self.fc(out)
class MulLayer(nn.Module):
def __init__(self,W,matrixSize=32):
super(MulLayer,self).__init__()
self.snet = CNN(W,matrixSize)
self.cnet = CNN(W,matrixSize)
self.matrixSize = matrixSize
self.compress = nn.Conv2d(int(256*W),matrixSize,1,1,0)
self.unzip = nn.Conv2d(matrixSize,int(256*W),1,1,0)
self.transmatrix = None
def forward(self, cF, sF, alpha=1.0, trans=True):
cFF = cF.view(1, 64, -1)
cMean = torch.mean(cFF,dim=2,keepdim=True)
cMean = cMean.unsqueeze(3)
cF = cF - cMean
sFF = sF.view(1, 64, -1)
sMean = torch.mean(sFF,dim=2,keepdim=True)
sMean = sMean.unsqueeze(3)
sF = sF - sMean
sF = sF * alpha + (1-alpha) * cF
compress_content = self.compress(cF)
compress_content = compress_content.view(1,32,-1)
cMatrix = self.cnet(cF)
sMatrix = self.snet(sF)
sMatrix = sMatrix.view(1,self.matrixSize,self.matrixSize)
cMatrix = cMatrix.view(1,self.matrixSize,self.matrixSize)
self.transmatrix = torch.bmm(sMatrix,cMatrix)
transfeature = torch.bmm(self.transmatrix,compress_content).view(1,32,256,144)
out = self.unzip(transfeature.view(1,32,256,144))
out = out + sMean
return out
| true | true |
f7fd43c48d9247f178140699008c87759857dff9 | 4,687 | py | Python | selfdrive/debug/mpc/tune_longitudinal.py | Neptos/openpilot | 01914a1a91ade18bd7aead99e7d1bf38cd22ad89 | [
"MIT"
] | 43 | 2019-10-23T13:02:21.000Z | 2021-09-09T04:41:49.000Z | selfdrive/debug/mpc/tune_longitudinal.py | Neptos/openpilot | 01914a1a91ade18bd7aead99e7d1bf38cd22ad89 | [
"MIT"
] | 16 | 2021-04-01T00:28:36.000Z | 2021-05-22T15:37:07.000Z | selfdrive/debug/mpc/tune_longitudinal.py | Neptos/openpilot | 01914a1a91ade18bd7aead99e7d1bf38cd22ad89 | [
"MIT"
] | 28 | 2019-04-23T09:19:26.000Z | 2022-03-14T04:01:09.000Z | #! /usr/bin/env python
# type: ignore
import numpy as np
import matplotlib.pyplot as plt
from selfdrive.controls.lib.longitudinal_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG
# plot liongitudinal MPC trajectory by defining boundary conditions:
# ego and lead vehicles state. Use this script to tune MPC costs
def RW(v_ego, v_l):
TR = 1.8
G = 9.81
return (v_ego * TR - (v_l - v_ego) * TR + v_ego*v_ego/(2*G) - v_l*v_l / (2*G))
def NORM_RW_ERROR(v_ego, v_l, p):
return (RW(v_ego, v_l) + 4.0 - p)
#return (RW(v_ego, v_l) + 4.0 - p) / (np.sqrt(v_ego + 0.5) + 0.1)
v_ego = 20.0
a_ego = 0
x_lead = 10.0
v_lead = 20.0
a_lead = -3.0
a_lead_tau = 0.
# v_ego = 7.02661012716
# a_ego = -1.26143024772
# x_lead = 29.625 + 20
# v_lead = 0.725235462189 + 1
# a_lead = -1.00025629997
# a_lead_tau = 2.90729817665
#min_a_lead_tau = (a_lead**2 * math.pi) / (2 * (v_lead + 0.01)**2)
min_a_lead_tau = 0.0
print(a_lead_tau, min_a_lead_tau)
a_lead_tau = max(a_lead_tau, min_a_lead_tau)
ffi, libmpc = libmpc_py.get_libmpc(1)
libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
libmpc.init_with_simulation(v_ego, x_lead, v_lead, a_lead, a_lead_tau)
cur_state = ffi.new("state_t *")
cur_state[0].x_ego = 0.0
cur_state[0].v_ego = v_ego
cur_state[0].a_ego = a_ego
cur_state[0].x_l = x_lead
cur_state[0].v_l = v_lead
mpc_solution = ffi.new("log_t *")
for _ in range(10):
print(libmpc.run_mpc(cur_state, mpc_solution, a_lead_tau, a_lead))
for i in range(21):
print("t: %.2f\t x_e: %.2f\t v_e: %.2f\t a_e: %.2f\t" % (mpc_solution[0].t[i], mpc_solution[0].x_ego[i], mpc_solution[0].v_ego[i], mpc_solution[0].a_ego[i]))
print("x_l: %.2f\t v_l: %.2f\t \t" % (mpc_solution[0].x_l[i], mpc_solution[0].v_l[i]))
t = np.hstack([np.arange(0., 1.0, 0.2), np.arange(1.0, 10.1, 0.6)])
print(map(float, mpc_solution[0].x_ego)[-1])
print(map(float, mpc_solution[0].x_l)[-1] - map(float, mpc_solution[0].x_ego)[-1])
plt.figure(figsize=(8, 8))
plt.subplot(4, 1, 1)
x_l = np.array(map(float, mpc_solution[0].x_l))
plt.plot(t, map(float, mpc_solution[0].x_ego))
plt.plot(t, x_l)
plt.legend(['ego', 'lead'])
plt.title('x')
plt.grid()
plt.subplot(4, 1, 2)
v_ego = np.array(map(float, mpc_solution[0].v_ego))
v_l = np.array(map(float, mpc_solution[0].v_l))
plt.plot(t, v_ego)
plt.plot(t, v_l)
plt.legend(['ego', 'lead'])
plt.ylim([-1, max(max(v_ego), max(v_l))])
plt.title('v')
plt.grid()
plt.subplot(4, 1, 3)
plt.plot(t, map(float, mpc_solution[0].a_ego))
plt.plot(t, map(float, mpc_solution[0].a_l))
plt.legend(['ego', 'lead'])
plt.title('a')
plt.grid()
plt.subplot(4, 1, 4)
d_l = np.array(map(float, mpc_solution[0].x_l)) - np.array(map(float, mpc_solution[0].x_ego))
desired = 4.0 + RW(v_ego, v_l)
plt.plot(t, d_l)
plt.plot(t, desired, '--')
plt.ylim(-1, max(max(desired), max(d_l)))
plt.legend(['relative distance', 'desired distance'])
plt.grid()
plt.show()
# c1 = np.exp(0.3 * NORM_RW_ERROR(v_ego, v_l, d_l))
# c2 = np.exp(4.5 - d_l)
# print(c1)
# print(c2)
# plt.figure()
# plt.plot(t, c1, label="NORM_RW_ERROR")
# plt.plot(t, c2, label="penalty function")
# plt.legend()
# ## OLD MPC
# a_lead_tau = 1.5
# a_lead_tau = max(a_lead_tau, -a_lead / (v_lead + 0.01))
# ffi, libmpc = libmpc_py.get_libmpc(1)
# libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
# libmpc.init_with_simulation(v_ego, x_lead, v_lead, a_lead, a_lead_tau)
# cur_state = ffi.new("state_t *")
# cur_state[0].x_ego = 0.0
# cur_state[0].v_ego = v_ego
# cur_state[0].a_ego = a_ego
# cur_state[0].x_lead = x_lead
# cur_state[0].v_lead = v_lead
# cur_state[0].a_lead = a_lead
# mpc_solution = ffi.new("log_t *")
# for _ in range(10):
# print libmpc.run_mpc(cur_state, mpc_solution, a_lead_tau)
# t = np.hstack([np.arange(0., 1.0, 0.2), np.arange(1.0, 10.1, 0.6)])
# print(map(float, mpc_solution[0].x_ego)[-1])
# print(map(float, mpc_solution[0].x_lead)[-1] - map(float, mpc_solution[0].x_ego)[-1])
# plt.subplot(4, 2, 2)
# plt.plot(t, map(float, mpc_solution[0].x_ego))
# plt.plot(t, map(float, mpc_solution[0].x_lead))
# plt.legend(['ego', 'lead'])
# plt.title('x')
# plt.subplot(4, 2, 4)
# plt.plot(t, map(float, mpc_solution[0].v_ego))
# plt.plot(t, map(float, mpc_solution[0].v_lead))
# plt.legend(['ego', 'lead'])
# plt.title('v')
# plt.subplot(4, 2, 6)
# plt.plot(t, map(float, mpc_solution[0].a_ego))
# plt.plot(t, map(float, mpc_solution[0].a_lead))
# plt.legend(['ego', 'lead'])
# plt.title('a')
# plt.subplot(4, 2, 8)
# plt.plot(t, np.array(map(float, mpc_solution[0].x_lead)) - np.array(map(float, mpc_solution[0].x_ego)))
# plt.show()
| 27.733728 | 159 | 0.671858 |
import numpy as np
import matplotlib.pyplot as plt
from selfdrive.controls.lib.longitudinal_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG
def RW(v_ego, v_l):
TR = 1.8
G = 9.81
return (v_ego * TR - (v_l - v_ego) * TR + v_ego*v_ego/(2*G) - v_l*v_l / (2*G))
def NORM_RW_ERROR(v_ego, v_l, p):
return (RW(v_ego, v_l) + 4.0 - p)
v_ego = 20.0
a_ego = 0
x_lead = 10.0
v_lead = 20.0
a_lead = -3.0
a_lead_tau = 0.
min_a_lead_tau = 0.0
print(a_lead_tau, min_a_lead_tau)
a_lead_tau = max(a_lead_tau, min_a_lead_tau)
ffi, libmpc = libmpc_py.get_libmpc(1)
libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
libmpc.init_with_simulation(v_ego, x_lead, v_lead, a_lead, a_lead_tau)
cur_state = ffi.new("state_t *")
cur_state[0].x_ego = 0.0
cur_state[0].v_ego = v_ego
cur_state[0].a_ego = a_ego
cur_state[0].x_l = x_lead
cur_state[0].v_l = v_lead
mpc_solution = ffi.new("log_t *")
for _ in range(10):
print(libmpc.run_mpc(cur_state, mpc_solution, a_lead_tau, a_lead))
for i in range(21):
print("t: %.2f\t x_e: %.2f\t v_e: %.2f\t a_e: %.2f\t" % (mpc_solution[0].t[i], mpc_solution[0].x_ego[i], mpc_solution[0].v_ego[i], mpc_solution[0].a_ego[i]))
print("x_l: %.2f\t v_l: %.2f\t \t" % (mpc_solution[0].x_l[i], mpc_solution[0].v_l[i]))
t = np.hstack([np.arange(0., 1.0, 0.2), np.arange(1.0, 10.1, 0.6)])
print(map(float, mpc_solution[0].x_ego)[-1])
print(map(float, mpc_solution[0].x_l)[-1] - map(float, mpc_solution[0].x_ego)[-1])
plt.figure(figsize=(8, 8))
plt.subplot(4, 1, 1)
x_l = np.array(map(float, mpc_solution[0].x_l))
plt.plot(t, map(float, mpc_solution[0].x_ego))
plt.plot(t, x_l)
plt.legend(['ego', 'lead'])
plt.title('x')
plt.grid()
plt.subplot(4, 1, 2)
v_ego = np.array(map(float, mpc_solution[0].v_ego))
v_l = np.array(map(float, mpc_solution[0].v_l))
plt.plot(t, v_ego)
plt.plot(t, v_l)
plt.legend(['ego', 'lead'])
plt.ylim([-1, max(max(v_ego), max(v_l))])
plt.title('v')
plt.grid()
plt.subplot(4, 1, 3)
plt.plot(t, map(float, mpc_solution[0].a_ego))
plt.plot(t, map(float, mpc_solution[0].a_l))
plt.legend(['ego', 'lead'])
plt.title('a')
plt.grid()
plt.subplot(4, 1, 4)
d_l = np.array(map(float, mpc_solution[0].x_l)) - np.array(map(float, mpc_solution[0].x_ego))
desired = 4.0 + RW(v_ego, v_l)
plt.plot(t, d_l)
plt.plot(t, desired, '--')
plt.ylim(-1, max(max(desired), max(d_l)))
plt.legend(['relative distance', 'desired distance'])
plt.grid()
plt.show()
| true | true |
f7fd43dd07736ba78ef3bcbcd4e5ba0fe1971312 | 142 | py | Python | Task/JSON/Python/json-2.py | mullikine/RosettaCodeData | 4f0027c6ce83daa36118ee8b67915a13cd23ab67 | [
"Info-ZIP"
] | 5 | 2021-01-29T20:08:05.000Z | 2022-03-22T06:16:05.000Z | Task/JSON/Python/json-2.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/JSON/Python/json-2.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | >>> true = True; false = False; null = None
>>> data = eval('{ "foo": 1, "bar": [10, "apples"] }')
>>> data
{'foo': 1, 'bar': [10, 'apples']}
| 28.4 | 54 | 0.478873 | >>> true = True; false = False; null = None
>>> data = eval('{ "foo": 1, "bar": [10, "apples"] }')
>>> data
{'foo': 1, 'bar': [10, 'apples']}
| false | true |
f7fd456591c952286c4147c312e1a8e9ae09e578 | 9,950 | py | Python | QQ_History.py | ZhangJun2017/QQ-History-Backup | 434cdea878798fe7c107a580b12dfbdba288db4d | [
"MIT"
] | null | null | null | QQ_History.py | ZhangJun2017/QQ-History-Backup | 434cdea878798fe7c107a580b12dfbdba288db4d | [
"MIT"
] | null | null | null | QQ_History.py | ZhangJun2017/QQ-History-Backup | 434cdea878798fe7c107a580b12dfbdba288db4d | [
"MIT"
] | null | null | null | import hashlib
import sqlite3
import time
import os
import traceback
import json
import base64
from proto.RichMsg_pb2 import PicRec
from proto.RichMsg_pb2 import Elem
_crc64_init = False
_crc64_table = [0] * 256
def crc64(s):
global _crc64_init
if not _crc64_init:
for i in range(256):
bf = i
for j in range(8):
if bf & 1 != 0:
bf = bf >> 1 ^ -7661587058870466123
else:
bf >>= 1
_crc64_table[i] = bf
_crc64_init = True
v = -1
for i in range(len(s)):
v = _crc64_table[(ord(s[i]) ^ v) & 255] ^ v >> 8
return v
class QQoutput():
def __init__(self, path, qq_self, qq, mode, emoji, with_img, combine_img):
self.db_path = path
self.key = self.get_key() # 解密用的密钥
db = os.path.join(path, "databases", qq_self + ".db")
self.c1 = sqlite3.connect(db).cursor()
db = os.path.join(path, "databases", "slowtable_" + qq_self + ".db")
self.c2 = sqlite3.connect(db).cursor()
self.qq_self = qq_self
self.qq = qq
self.mode = mode
self.emoji = emoji
self.with_img = with_img
self.combine_img = combine_img
self.num_to_name = {}
self.emoji_map = self.map_new_emoji()
def decrypt(self, data, msg_type=-1000):
msg = b''
if type(data) == bytes:
msg = b''
for i in range(0, len(data)):
msg += bytes([data[i] ^ ord(self.key[i % len(self.key)])])
elif type(data) == str:
msg = ''
for i in range(0, len(data)):
msg += chr(ord(data[i]) ^ ord(self.key[i % len(self.key)]))
return msg
if msg_type == -1000:
try:
return msg.decode('utf-8')
except:
# print(msg)
pass
return '[decode error]'
if not self.with_img:
return None
elif msg_type == -2000:
return self.decode_pic(msg)
elif msg_type == -1035:
return self.decode_mix_msg(msg)
elif msg_type == -5008:
return self.decode_share_url(msg)
# for debug
# return '[unknown msg_type {}]'.format(msg_type)
return None
def add_emoji(self, msg):
pos = msg.find('\x14')
while pos != -1:
lastpos = pos
num = ord(msg[pos + 1])
if str(num) in self.emoji_map:
index = self.emoji_map[str(num)]
if self.emoji == 1:
filename = "new/s" + index + ".png"
else:
filename = "old/" + index + ".gif"
emoticon_path = os.path.join('emoticon', filename)
if self.combine_img:
emoticon_path = self.get_base64_from_pic(emoticon_path)
msg = msg.replace(
msg[pos:pos + 2], '<img src="{}" alt="{}" />'.format(emoticon_path, index))
else:
msg = msg.replace(msg[pos:pos + 2],
'[emoji:{}]'.format(str(num)))
pos = msg.find('\x14')
if pos == lastpos:
break
return msg
def message(self):
# mode=1 friend
# mode=2 troop
num = self.qq.encode("utf-8")
md5num = hashlib.md5(num).hexdigest().upper()
if self.mode == 1:
cmd = "select msgData,senderuin,time,msgtype from mr_friend_{}_New order by time".format(
md5num)
self.get_friends()
else:
cmd = "select msgData,senderuin,time,msgtype from mr_troop_{}_New order by time".format(
md5num)
# print('Groups {} -> {}'.format(num, md5num))
self.get_troop_members()
cursors = self.fill_cursors(cmd)
allmsg = []
for cs in cursors:
for row in cs:
msgdata = row[0]
if not msgdata:
continue
uin = row[1]
ltime = time.localtime(row[2])
sendtime = time.strftime("%Y-%m-%d %H:%M:%S", ltime)
msg_type = row[3]
msg_final = self.decrypt(msgdata, msg_type)
if msg_final is None:
continue
allmsg.append(
[sendtime, msg_type, self.decrypt(uin), msg_final])
return allmsg
def get_friends(self):
cmd = "SELECT uin, remark FROM Friends"
cursors = self.fill_cursors(cmd)
for cs in cursors:
for row in cs:
num = self.decrypt(row[0])
name = self.decrypt(row[1])
self.num_to_name[num] = name
def get_troop_members(self):
cmd = "SELECT troopuin, memberuin, friendnick, troopnick FROM TroopMemberInfo"
cursors = self.fill_cursors(cmd)
for cs in cursors:
for row in cs:
if self.decrypt(row[0]) != self.qq:
continue
num = self.decrypt(row[1])
name = self.decrypt(row[3]) or self.decrypt(row[2])
self.num_to_name[num] = name
def fill_cursors(self, cmd):
cursors = []
# slowtable might not contain related message, so just skip it
try:
cursors.append(self.c2.execute(cmd))
except:
pass
cursors.append(self.c1.execute(cmd))
return cursors
def output(self):
name1 = "我"
file = str(self.qq) + ".html"
f2 = open(file, "w", encoding="utf-8")
f2.write(
"<head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" /></head>"
)
allmsg = self.message()
f2.write("<div style='white-space: pre-line'>")
for ts, _, uid, msg in allmsg:
if not msg:
continue
if uid == str(self.qq_self):
f2.write("<p align='right'>")
f2.write("<font color=\"green\">")
f2.write(ts)
f2.write("</font>-----<font color=\"blue\"><b>")
f2.write(name1)
f2.write("</font></b></br>")
else:
f2.write("<p align='left'>")
f2.write("<font color=\"blue\"><b>")
f2.write(self.num_to_name.get(uid) or uid)
f2.write("</b></font>-----<font color=\"green\">")
f2.write(ts)
f2.write("</font></br>")
f2.write(self.add_emoji(msg))
f2.write("</br></br>")
f2.write("</p>")
f2.write("</div>")
def get_key(self):
self.unify_path()
kc_path = os.path.join(self.db_path, "files", "kc")
kc_file = open(kc_path, "r")
return kc_file.read()
# unify databases path of different phones
def unify_path(self):
if os.path.isdir(os.path.join(self.db_path, "f")):
os.rename(os.path.join(self.db_path, "f"),
os.path.join(self.db_path, "files"))
if os.path.isdir(os.path.join(self.db_path, "db")):
os.rename(os.path.join(self.db_path, "db"),
os.path.join(self.db_path, "databases"))
if not os.path.isfile(os.path.join(self.db_path, "files", "kc")):
raise OSError(
"File not found. Please report your directory layout.")
def map_new_emoji(self):
with open('./emoticon/face_config.json', encoding='utf-8') as f:
emojis = json.load(f)
new_emoji_map = {}
for e in emojis['sysface']:
if self.emoji == 1:
new_emoji_map[e["AQLid"]] = e["QSid"]
else:
if len(e["EMCode"]) == 3:
new_emoji_map[e["AQLid"]] = str(int(e["EMCode"]) - 100)
return new_emoji_map
def get_base64_from_pic(self, path):
with open(path, "rb") as image_file:
return (b'data:image/png;base64,' + base64.b64encode(image_file.read())).decode("utf-8")
def decode_pic(self, data):
try:
doc = PicRec()
doc.ParseFromString(data)
url = 'chatimg:' + doc.md5
filename = hex(crc64(url))
filename = 'Cache_' + filename.replace('0x', '')
rel_path = os.path.join("./chatimg/", filename[-3:], filename)
if os.path.exists(rel_path):
w = 'auto' if doc.uint32_thumb_width == 0 else str(
doc.uint32_thumb_width)
h = 'auto' if doc.uint32_thumb_height == 0 else str(
doc.uint32_thumb_height)
if self.combine_img:
rel_path = self.get_base64_from_pic(rel_path)
return '<img src="{}" width="{}" height="{}" />'.format(rel_path, w, h)
except:
pass
return '[图片]'
def decode_mix_msg(self, data):
try:
doc = Elem()
doc.ParseFromString(data)
img_src = ''
if doc.picMsg:
img_src = self.decode_pic(doc.picMsg)
return img_src + doc.textMsg.decode('utf-8')
except:
pass
return '[混合消息]'
def decode_share_url(self, msg):
# TODO
return '[分享卡片]'
def main(db_path, qq_self, qq, mode, emoji, with_img, combine_img):
try:
q = QQoutput(db_path, qq_self, qq, mode, emoji, with_img, combine_img)
q.output()
except Exception as e:
with open('log.txt', 'w') as f:
f.write(repr(e))
f.write(traceback.format_exc())
print(traceback.format_exc())
if repr(e).split(":")[0] == "OperationalError('no such table":
raise ValueError("信息填入错误")
else:
raise BaseException("Error! See log.txt")
| 34.075342 | 101 | 0.500905 | import hashlib
import sqlite3
import time
import os
import traceback
import json
import base64
from proto.RichMsg_pb2 import PicRec
from proto.RichMsg_pb2 import Elem
_crc64_init = False
_crc64_table = [0] * 256
def crc64(s):
global _crc64_init
if not _crc64_init:
for i in range(256):
bf = i
for j in range(8):
if bf & 1 != 0:
bf = bf >> 1 ^ -7661587058870466123
else:
bf >>= 1
_crc64_table[i] = bf
_crc64_init = True
v = -1
for i in range(len(s)):
v = _crc64_table[(ord(s[i]) ^ v) & 255] ^ v >> 8
return v
class QQoutput():
def __init__(self, path, qq_self, qq, mode, emoji, with_img, combine_img):
self.db_path = path
self.key = self.get_key()
db = os.path.join(path, "databases", qq_self + ".db")
self.c1 = sqlite3.connect(db).cursor()
db = os.path.join(path, "databases", "slowtable_" + qq_self + ".db")
self.c2 = sqlite3.connect(db).cursor()
self.qq_self = qq_self
self.qq = qq
self.mode = mode
self.emoji = emoji
self.with_img = with_img
self.combine_img = combine_img
self.num_to_name = {}
self.emoji_map = self.map_new_emoji()
def decrypt(self, data, msg_type=-1000):
msg = b''
if type(data) == bytes:
msg = b''
for i in range(0, len(data)):
msg += bytes([data[i] ^ ord(self.key[i % len(self.key)])])
elif type(data) == str:
msg = ''
for i in range(0, len(data)):
msg += chr(ord(data[i]) ^ ord(self.key[i % len(self.key)]))
return msg
if msg_type == -1000:
try:
return msg.decode('utf-8')
except:
pass
return '[decode error]'
if not self.with_img:
return None
elif msg_type == -2000:
return self.decode_pic(msg)
elif msg_type == -1035:
return self.decode_mix_msg(msg)
elif msg_type == -5008:
return self.decode_share_url(msg)
return None
def add_emoji(self, msg):
pos = msg.find('\x14')
while pos != -1:
lastpos = pos
num = ord(msg[pos + 1])
if str(num) in self.emoji_map:
index = self.emoji_map[str(num)]
if self.emoji == 1:
filename = "new/s" + index + ".png"
else:
filename = "old/" + index + ".gif"
emoticon_path = os.path.join('emoticon', filename)
if self.combine_img:
emoticon_path = self.get_base64_from_pic(emoticon_path)
msg = msg.replace(
msg[pos:pos + 2], '<img src="{}" alt="{}" />'.format(emoticon_path, index))
else:
msg = msg.replace(msg[pos:pos + 2],
'[emoji:{}]'.format(str(num)))
pos = msg.find('\x14')
if pos == lastpos:
break
return msg
def message(self):
num = self.qq.encode("utf-8")
md5num = hashlib.md5(num).hexdigest().upper()
if self.mode == 1:
cmd = "select msgData,senderuin,time,msgtype from mr_friend_{}_New order by time".format(
md5num)
self.get_friends()
else:
cmd = "select msgData,senderuin,time,msgtype from mr_troop_{}_New order by time".format(
md5num)
self.get_troop_members()
cursors = self.fill_cursors(cmd)
allmsg = []
for cs in cursors:
for row in cs:
msgdata = row[0]
if not msgdata:
continue
uin = row[1]
ltime = time.localtime(row[2])
sendtime = time.strftime("%Y-%m-%d %H:%M:%S", ltime)
msg_type = row[3]
msg_final = self.decrypt(msgdata, msg_type)
if msg_final is None:
continue
allmsg.append(
[sendtime, msg_type, self.decrypt(uin), msg_final])
return allmsg
def get_friends(self):
cmd = "SELECT uin, remark FROM Friends"
cursors = self.fill_cursors(cmd)
for cs in cursors:
for row in cs:
num = self.decrypt(row[0])
name = self.decrypt(row[1])
self.num_to_name[num] = name
def get_troop_members(self):
cmd = "SELECT troopuin, memberuin, friendnick, troopnick FROM TroopMemberInfo"
cursors = self.fill_cursors(cmd)
for cs in cursors:
for row in cs:
if self.decrypt(row[0]) != self.qq:
continue
num = self.decrypt(row[1])
name = self.decrypt(row[3]) or self.decrypt(row[2])
self.num_to_name[num] = name
def fill_cursors(self, cmd):
cursors = []
try:
cursors.append(self.c2.execute(cmd))
except:
pass
cursors.append(self.c1.execute(cmd))
return cursors
def output(self):
name1 = "我"
file = str(self.qq) + ".html"
f2 = open(file, "w", encoding="utf-8")
f2.write(
"<head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" /></head>"
)
allmsg = self.message()
f2.write("<div style='white-space: pre-line'>")
for ts, _, uid, msg in allmsg:
if not msg:
continue
if uid == str(self.qq_self):
f2.write("<p align='right'>")
f2.write("<font color=\"green\">")
f2.write(ts)
f2.write("</font>-----<font color=\"blue\"><b>")
f2.write(name1)
f2.write("</font></b></br>")
else:
f2.write("<p align='left'>")
f2.write("<font color=\"blue\"><b>")
f2.write(self.num_to_name.get(uid) or uid)
f2.write("</b></font>-----<font color=\"green\">")
f2.write(ts)
f2.write("</font></br>")
f2.write(self.add_emoji(msg))
f2.write("</br></br>")
f2.write("</p>")
f2.write("</div>")
def get_key(self):
self.unify_path()
kc_path = os.path.join(self.db_path, "files", "kc")
kc_file = open(kc_path, "r")
return kc_file.read()
def unify_path(self):
if os.path.isdir(os.path.join(self.db_path, "f")):
os.rename(os.path.join(self.db_path, "f"),
os.path.join(self.db_path, "files"))
if os.path.isdir(os.path.join(self.db_path, "db")):
os.rename(os.path.join(self.db_path, "db"),
os.path.join(self.db_path, "databases"))
if not os.path.isfile(os.path.join(self.db_path, "files", "kc")):
raise OSError(
"File not found. Please report your directory layout.")
def map_new_emoji(self):
with open('./emoticon/face_config.json', encoding='utf-8') as f:
emojis = json.load(f)
new_emoji_map = {}
for e in emojis['sysface']:
if self.emoji == 1:
new_emoji_map[e["AQLid"]] = e["QSid"]
else:
if len(e["EMCode"]) == 3:
new_emoji_map[e["AQLid"]] = str(int(e["EMCode"]) - 100)
return new_emoji_map
def get_base64_from_pic(self, path):
with open(path, "rb") as image_file:
return (b'data:image/png;base64,' + base64.b64encode(image_file.read())).decode("utf-8")
def decode_pic(self, data):
try:
doc = PicRec()
doc.ParseFromString(data)
url = 'chatimg:' + doc.md5
filename = hex(crc64(url))
filename = 'Cache_' + filename.replace('0x', '')
rel_path = os.path.join("./chatimg/", filename[-3:], filename)
if os.path.exists(rel_path):
w = 'auto' if doc.uint32_thumb_width == 0 else str(
doc.uint32_thumb_width)
h = 'auto' if doc.uint32_thumb_height == 0 else str(
doc.uint32_thumb_height)
if self.combine_img:
rel_path = self.get_base64_from_pic(rel_path)
return '<img src="{}" width="{}" height="{}" />'.format(rel_path, w, h)
except:
pass
return '[图片]'
def decode_mix_msg(self, data):
try:
doc = Elem()
doc.ParseFromString(data)
img_src = ''
if doc.picMsg:
img_src = self.decode_pic(doc.picMsg)
return img_src + doc.textMsg.decode('utf-8')
except:
pass
return '[混合消息]'
def decode_share_url(self, msg):
return '[分享卡片]'
def main(db_path, qq_self, qq, mode, emoji, with_img, combine_img):
try:
q = QQoutput(db_path, qq_self, qq, mode, emoji, with_img, combine_img)
q.output()
except Exception as e:
with open('log.txt', 'w') as f:
f.write(repr(e))
f.write(traceback.format_exc())
print(traceback.format_exc())
if repr(e).split(":")[0] == "OperationalError('no such table":
raise ValueError("信息填入错误")
else:
raise BaseException("Error! See log.txt")
| true | true |
f7fd45ebe328c5bd4d613f500e5bdba837dad285 | 1,427 | py | Python | cosmic_ray/plugins.py | rob-smallshire/cosmic-ray | 4fd751b38eee30568f8366e09452d7aa60be4e26 | [
"MIT"
] | null | null | null | cosmic_ray/plugins.py | rob-smallshire/cosmic-ray | 4fd751b38eee30568f8366e09452d7aa60be4e26 | [
"MIT"
] | null | null | null | cosmic_ray/plugins.py | rob-smallshire/cosmic-ray | 4fd751b38eee30568f8366e09452d7aa60be4e26 | [
"MIT"
] | null | null | null | """Query and retrieve the various plugins in Cosmic Ray.
"""
from stevedore import driver, ExtensionManager
def get_operator(name):
"""Get an operator class from a plugin.
Attrs:
name: The name of the plugin containing the operator class.
Returns: The operator *class object* (i.e. not an instance) provided by the
plugin named `name`.
"""
return ExtensionManager('cosmic_ray.operators')[name].plugin
def operator_names():
"""Get an iterable of all operator plugin names."""
return ExtensionManager('cosmic_ray.operators').names()
def get_test_runner(name, test_args):
"""Get a test-runner instance by name."""
test_runner_manager = driver.DriverManager(
namespace='cosmic_ray.test_runners',
name=name,
invoke_on_load=True,
invoke_args=(test_args,),
)
return test_runner_manager.driver
def test_runner_names():
"""Get iterable of test-runner plugin names."""
return ExtensionManager('cosmic_ray.test_runners').names()
def get_execution_engine(name):
"""Get the execution engine by name."""
manager = driver.DriverManager(
namespace='cosmic_ray.execution_engines',
name=name,
invoke_on_load=True)
return manager.driver
def execution_engine_names():
"""Get iterable of execution-enginer plugin names."""
return ExtensionManager('cosmic_ray.execution_engines').names()
| 26.425926 | 79 | 0.698669 |
from stevedore import driver, ExtensionManager
def get_operator(name):
return ExtensionManager('cosmic_ray.operators')[name].plugin
def operator_names():
return ExtensionManager('cosmic_ray.operators').names()
def get_test_runner(name, test_args):
test_runner_manager = driver.DriverManager(
namespace='cosmic_ray.test_runners',
name=name,
invoke_on_load=True,
invoke_args=(test_args,),
)
return test_runner_manager.driver
def test_runner_names():
return ExtensionManager('cosmic_ray.test_runners').names()
def get_execution_engine(name):
manager = driver.DriverManager(
namespace='cosmic_ray.execution_engines',
name=name,
invoke_on_load=True)
return manager.driver
def execution_engine_names():
return ExtensionManager('cosmic_ray.execution_engines').names()
| true | true |
f7fd46a0ac1d951e4a0e0eedb61910a84f7a37fd | 3,575 | py | Python | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/styles/colors.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 28 | 2021-07-23T16:08:55.000Z | 2022-03-15T16:19:32.000Z | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/styles/colors.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 4 | 2021-03-11T04:02:00.000Z | 2022-03-27T08:31:56.000Z | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/styles/colors.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 11 | 2021-07-15T04:40:27.000Z | 2022-03-19T14:01:12.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import re
from openpyxl.compat import safe_string, basestring
from openpyxl.descriptors import Descriptor, Typed
from .hashable import HashableObject
from openpyxl.descriptors import String, Bool, MinMax, Integer
# Default Color Index as per 18.8.27 of ECMA Part 4
COLOR_INDEX = (
'00000000', '00FFFFFF', '00FF0000', '0000FF00', '000000FF', #0-4
'00FFFF00', '00FF00FF', '0000FFFF', '00000000', '00FFFFFF', #5-9
'00FF0000', '0000FF00', '000000FF', '00FFFF00', '00FF00FF', #10-14
'0000FFFF', '00800000', '00008000', '00000080', '00808000', #15-19
'00800080', '00008080', '00C0C0C0', '00808080', '009999FF', #20-24
'00993366', '00FFFFCC', '00CCFFFF', '00660066', '00FF8080', #25-29
'000066CC', '00CCCCFF', '00000080', '00FF00FF', '00FFFF00', #30-34
'0000FFFF', '00800080', '00800000', '00008080', '000000FF', #35-39
'0000CCFF', '00CCFFFF', '00CCFFCC', '00FFFF99', '0099CCFF', #40-44
'00FF99CC', '00CC99FF', '00FFCC99', '003366FF', '0033CCCC', #45-49
'0099CC00', '00FFCC00', '00FF9900', '00FF6600', '00666699', #50-54
'00969696', '00003366', '00339966', '00003300', '00333300', #55-59
'00993300', '00993366', '00333399', '00333333', 'System Foreground', 'System Background' #60-64
)
BLACK = COLOR_INDEX[0]
WHITE = COLOR_INDEX[1]
RED = COLOR_INDEX[2]
DARKRED = COLOR_INDEX[8]
BLUE = COLOR_INDEX[4]
DARKBLUE = COLOR_INDEX[10]
GREEN = COLOR_INDEX[3]
DARKGREEN = COLOR_INDEX[9]
YELLOW = COLOR_INDEX[5]
DARKYELLOW = COLOR_INDEX[19]
aRGB_REGEX = re.compile("^([A-Fa-f0-9]{8}|[A-Fa-f0-9]{6})$")
class RGB(Typed):
"""
Descriptor for aRGB values
If not supplied alpha is 00
"""
expected_type = basestring
def __set__(self, instance, value):
m = aRGB_REGEX.match(value)
if m is None:
raise ValueError("Colors must be aRGB hex values")
if len(value) == 6:
value = "00" + value
super(RGB, self).__set__(instance, value)
class Color(HashableObject):
"""Named colors for use in styles."""
tagname = "color"
rgb = RGB()
indexed = Integer()
auto = Bool()
theme = Integer()
tint = MinMax(min=-1, max=1, expected_type=float)
type = String()
__fields__ = ('rgb', 'indexed', 'auto', 'theme', 'tint', 'type')
def __init__(self, rgb=BLACK, indexed=None, auto=None, theme=None, tint=0.0, index=None, type='rgb'):
if index is not None:
indexed = index
if indexed is not None:
self.type = 'indexed'
self.indexed = indexed
elif theme is not None:
self.type = 'theme'
self.theme = theme
elif auto is not None:
self.type = 'auto'
self.auto = auto
else:
self.rgb = rgb
self.type = 'rgb'
self.tint = tint
@property
def value(self):
return getattr(self, self.type)
@value.setter
def value(self, value):
setattr(self, self.type, value)
def __iter__(self):
attrs = [(self.type, self.value)]
if self.tint != 0:
attrs.append(('tint', self.tint))
for k, v in attrs:
yield k, safe_string(v)
@property
def index(self):
# legacy
return self.value
class ColorDescriptor(Typed):
expected_type = Color
def __set__(self, instance, value):
if isinstance(value, basestring):
value = Color(rgb=value)
super(ColorDescriptor, self).__set__(instance, value)
| 29.791667 | 105 | 0.612587 | from __future__ import absolute_import
import re
from openpyxl.compat import safe_string, basestring
from openpyxl.descriptors import Descriptor, Typed
from .hashable import HashableObject
from openpyxl.descriptors import String, Bool, MinMax, Integer
COLOR_INDEX = (
'00000000', '00FFFFFF', '00FF0000', '0000FF00', '000000FF',
'00FFFF00', '00FF00FF', '0000FFFF', '00000000', '00FFFFFF',
'00FF0000', '0000FF00', '000000FF', '00FFFF00', '00FF00FF',
'0000FFFF', '00800000', '00008000', '00000080', '00808000',
'00800080', '00008080', '00C0C0C0', '00808080', '009999FF',
'00993366', '00FFFFCC', '00CCFFFF', '00660066', '00FF8080',
'000066CC', '00CCCCFF', '00000080', '00FF00FF', '00FFFF00',
'0000FFFF', '00800080', '00800000', '00008080', '000000FF',
'0000CCFF', '00CCFFFF', '00CCFFCC', '00FFFF99', '0099CCFF',
'00FF99CC', '00CC99FF', '00FFCC99', '003366FF', '0033CCCC',
'0099CC00', '00FFCC00', '00FF9900', '00FF6600', '00666699',
'00969696', '00003366', '00339966', '00003300', '00333300',
'00993300', '00993366', '00333399', '00333333', 'System Foreground', 'System Background'
)
BLACK = COLOR_INDEX[0]
WHITE = COLOR_INDEX[1]
RED = COLOR_INDEX[2]
DARKRED = COLOR_INDEX[8]
BLUE = COLOR_INDEX[4]
DARKBLUE = COLOR_INDEX[10]
GREEN = COLOR_INDEX[3]
DARKGREEN = COLOR_INDEX[9]
YELLOW = COLOR_INDEX[5]
DARKYELLOW = COLOR_INDEX[19]
aRGB_REGEX = re.compile("^([A-Fa-f0-9]{8}|[A-Fa-f0-9]{6})$")
class RGB(Typed):
expected_type = basestring
def __set__(self, instance, value):
m = aRGB_REGEX.match(value)
if m is None:
raise ValueError("Colors must be aRGB hex values")
if len(value) == 6:
value = "00" + value
super(RGB, self).__set__(instance, value)
class Color(HashableObject):
tagname = "color"
rgb = RGB()
indexed = Integer()
auto = Bool()
theme = Integer()
tint = MinMax(min=-1, max=1, expected_type=float)
type = String()
__fields__ = ('rgb', 'indexed', 'auto', 'theme', 'tint', 'type')
def __init__(self, rgb=BLACK, indexed=None, auto=None, theme=None, tint=0.0, index=None, type='rgb'):
if index is not None:
indexed = index
if indexed is not None:
self.type = 'indexed'
self.indexed = indexed
elif theme is not None:
self.type = 'theme'
self.theme = theme
elif auto is not None:
self.type = 'auto'
self.auto = auto
else:
self.rgb = rgb
self.type = 'rgb'
self.tint = tint
@property
def value(self):
return getattr(self, self.type)
@value.setter
def value(self, value):
setattr(self, self.type, value)
def __iter__(self):
attrs = [(self.type, self.value)]
if self.tint != 0:
attrs.append(('tint', self.tint))
for k, v in attrs:
yield k, safe_string(v)
@property
def index(self):
return self.value
class ColorDescriptor(Typed):
expected_type = Color
def __set__(self, instance, value):
if isinstance(value, basestring):
value = Color(rgb=value)
super(ColorDescriptor, self).__set__(instance, value)
| true | true |
f7fd48ace02257350871586614269671db6c47ba | 1,362 | py | Python | useintest/tests/modules/gitlab/test_gitlab.py | wtsi-hgi/startfortest | 426343c0ff340d4d83575cdafe2c4184707e7693 | [
"MIT"
] | 1 | 2019-06-18T20:56:42.000Z | 2019-06-18T20:56:42.000Z | useintest/tests/modules/gitlab/test_gitlab.py | wtsi-hgi/useintest | 426343c0ff340d4d83575cdafe2c4184707e7693 | [
"MIT"
] | 3 | 2017-09-21T12:14:44.000Z | 2018-02-19T11:18:47.000Z | useintest/tests/modules/gitlab/test_gitlab.py | wtsi-hgi/useintest | 426343c0ff340d4d83575cdafe2c4184707e7693 | [
"MIT"
] | null | null | null | import json
import unittest
from abc import ABCMeta
import requests
from useintest.modules.gitlab.gitlab import GitLabServiceController, gitlab_service_controllers
from useintest.services.models import DockerisedServiceWithUsers, User
from testhelpers import TypeUsedInTest, create_tests, get_classes_to_test
from useintest.tests.services.common import TestServiceControllerSubclass
class _TestGitLabBaseServiceController(
TestServiceControllerSubclass[TypeUsedInTest, DockerisedServiceWithUsers[User]], metaclass=ABCMeta):
"""
Tests for `GitLabBaseServiceController`.
"""
def test_start(self):
service = self._start_service()
response = requests.post(f"http://localhost:{service.ports[80]}/api/v3/session", data={
"login": service.root_user.username, "password": service.root_user.password})
print(response.text)
response_payload = json.loads(response.text)
self.assertEqual(service.root_user.username, response_payload["username"])
# Setup tests
globals().update(create_tests(_TestGitLabBaseServiceController, get_classes_to_test(
gitlab_service_controllers, GitLabServiceController)))
# Fix for stupidity of test runners
del _TestGitLabBaseServiceController, TestServiceControllerSubclass, create_tests, get_classes_to_test
if __name__ == "__main__":
unittest.main()
| 37.833333 | 108 | 0.788546 | import json
import unittest
from abc import ABCMeta
import requests
from useintest.modules.gitlab.gitlab import GitLabServiceController, gitlab_service_controllers
from useintest.services.models import DockerisedServiceWithUsers, User
from testhelpers import TypeUsedInTest, create_tests, get_classes_to_test
from useintest.tests.services.common import TestServiceControllerSubclass
class _TestGitLabBaseServiceController(
TestServiceControllerSubclass[TypeUsedInTest, DockerisedServiceWithUsers[User]], metaclass=ABCMeta):
def test_start(self):
service = self._start_service()
response = requests.post(f"http://localhost:{service.ports[80]}/api/v3/session", data={
"login": service.root_user.username, "password": service.root_user.password})
print(response.text)
response_payload = json.loads(response.text)
self.assertEqual(service.root_user.username, response_payload["username"])
globals().update(create_tests(_TestGitLabBaseServiceController, get_classes_to_test(
gitlab_service_controllers, GitLabServiceController)))
del _TestGitLabBaseServiceController, TestServiceControllerSubclass, create_tests, get_classes_to_test
if __name__ == "__main__":
unittest.main()
| true | true |
f7fd48d6b9d7089fc3a110dcb04e669d5c8523e7 | 716 | py | Python | demo.py | Peiiii/weatherChina | 5ae50de41ec5ead988402eeb5acab2c33c2944bb | [
"MIT"
] | 1 | 2021-04-14T15:50:10.000Z | 2021-04-14T15:50:10.000Z | demo.py | Peiiii/weatherChina | 5ae50de41ec5ead988402eeb5acab2c33c2944bb | [
"MIT"
] | 1 | 2021-03-16T14:08:58.000Z | 2021-04-13T03:54:57.000Z | demo.py | Peiiii/weatherChina | 5ae50de41ec5ead988402eeb5acab2c33c2944bb | [
"MIT"
] | 1 | 2021-11-20T07:08:43.000Z | 2021-11-20T07:08:43.000Z | import weatherChina
# 快速使用
# 主城区
weather=weatherChina.getWeatherByNames('上海', '上海')
weather=weatherChina.getWeatherByNames('安徽', '合肥', '合肥')
# 其它
weather=weatherChina.getWeatherByNames('上海', '浦东')
weather=weatherChina.getWeatherByNames('西藏', '日喀则', '萨迦')
#
print(weather)
# 查看所有地区、以及每个区域的id信息
regionData=weatherChina.getRegionData()
print(regionData)
# 查询地区的id
regionId=weatherChina.getRegionId('北京', '朝阳')
regionId=weatherChina.getRegionId('安徽', '合肥', '合肥')
print(regionId)
# 根据id查询近7日天气
weather=weatherChina.getWeatherById(regionId)
print(weather)
# {'lowTemp': ['3', '5', '0', '1', '3', '4', '2'], 'highTemp': ['14', '15', '10', '9', '14', '17', '18'], 'detail': ['多云', '小雨', '小雨转晴', '晴', '多云转晴', '晴', '晴']}
| 26.518519 | 160 | 0.674581 | import weatherChina
weather=weatherChina.getWeatherByNames('上海', '上海')
weather=weatherChina.getWeatherByNames('安徽', '合肥', '合肥')
weather=weatherChina.getWeatherByNames('上海', '浦东')
weather=weatherChina.getWeatherByNames('西藏', '日喀则', '萨迦')
print(weather)
regionData=weatherChina.getRegionData()
print(regionData)
regionId=weatherChina.getRegionId('北京', '朝阳')
regionId=weatherChina.getRegionId('安徽', '合肥', '合肥')
print(regionId)
weather=weatherChina.getWeatherById(regionId)
print(weather)
| true | true |
f7fd4ab22da9fa5150ecadd4ca176a283d22a091 | 3,566 | py | Python | chapter_6/diffall.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | chapter_6/diffall.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | chapter_6/diffall.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | "Comparing Directory Trees"
'Finding Tree Differences'
"""
################################################################################
Usage: "python diffall.py dir1 dir2".
Recursive directory tree comparison: report unique files that exist in only
dir1 or dir2, report files of the same name in dir1 and dir2 with differing
contents, report instances of same name but different type in dir1 and dir2,
and do the same for all subdirectories of the same names in and below dir1
and dir2. A summary of diffs appears at end of output, but search redirected
output for "DIFF" and "unique" strings for further details. New: (3E) limit
reads to 1M for large files, (3E) catch same name=file/dir, (4E) avoid extra
os.listdir() calls in dirdiff.comparedirs() by passing results here along.
################################################################################
"""
import os, dirdiffs
blocksize = 1024 * 1024 # up to 1M per read
def intersect(seq1, seq2):
"""
Return all items in both seq1 and seq2;
a set(seq1) & set(seq2) woud work too, but sets are randomly
ordered, so any platform-dependent directory order would be lost
"""
return [item for item in seq1 if item in seq2]
def comparetrees(dir1, dir2, diffs, verbose=False):
"""
Compare all subdirectories and files in two directory trees;
uses binary files to prevent Unicode decoding and endline transforms,
as trees might contain arbitrary binary files as well as arbitrary text;
may need bytes listdir arg for undecodable filenames on some platforms
"""
# compare file name lists
print('-' * 20)
names1 = os.listdir(dir1)
names2 = os.listdir(dir2)
if not dirdiffs.comparedirs(dir1, dir2, names1, names2):
diffs.append('unique files at %s - %s' % (dir1, dir2))
print('Comparing contents')
common = intersect(names1, names2)
missed = common[:]
# compared contents of files in common
for name in common:
path1 = os.path.join(dir1, name)
path2 = os.path.join(dir2, name)
if os.path.isfile(path1) and os.path.isfile(path2):
missed.remove(name)
file1 = open(path1, 'rb')
file2 = open(path2, 'rb')
while True:
bytes1 = file1.read(blocksize)
bytes2 = file2.read(blocksize)
if (not bytes1) and (not bytes2):
if verbose: print(name, 'matches')
break
if bytes1 != bytes2:
diffs.append('files differ at %s - %s' % (path1, path2))
print(name, 'DIFFERS')
break
# recur to compare directories in common
for name in common:
path1 = os.path.join(dir1, name)
path2 = os.path.join(dir2, name)
if os.path.isdir(path1) and os.path.isdir(path2):
missed.remove(name)
comparetrees(path1, path2, diffs, verbose)
# same name but not both files or dirs?
for name in missed:
diffs.append('files missed at %s - %s: %s' % (dir1, dir2, name))
print(name, 'DIFFERS')
if __name__ == '__main__':
dir1, dir2 = dirdiffs.getargs()
diffs = []
comparetrees(dir1, dir2, diffs, True) # changes diffs in-place
print('=' * 40) # walk, report diffs list
if not diffs:
print('No diffs found.')
else:
print('Diffs found:', len(diffs))
for diff in diffs: print('-', diff)
| 37.93617 | 85 | 0.586652 |
import os, dirdiffs
blocksize = 1024 * 1024
def intersect(seq1, seq2):
return [item for item in seq1 if item in seq2]
def comparetrees(dir1, dir2, diffs, verbose=False):
print('-' * 20)
names1 = os.listdir(dir1)
names2 = os.listdir(dir2)
if not dirdiffs.comparedirs(dir1, dir2, names1, names2):
diffs.append('unique files at %s - %s' % (dir1, dir2))
print('Comparing contents')
common = intersect(names1, names2)
missed = common[:]
for name in common:
path1 = os.path.join(dir1, name)
path2 = os.path.join(dir2, name)
if os.path.isfile(path1) and os.path.isfile(path2):
missed.remove(name)
file1 = open(path1, 'rb')
file2 = open(path2, 'rb')
while True:
bytes1 = file1.read(blocksize)
bytes2 = file2.read(blocksize)
if (not bytes1) and (not bytes2):
if verbose: print(name, 'matches')
break
if bytes1 != bytes2:
diffs.append('files differ at %s - %s' % (path1, path2))
print(name, 'DIFFERS')
break
for name in common:
path1 = os.path.join(dir1, name)
path2 = os.path.join(dir2, name)
if os.path.isdir(path1) and os.path.isdir(path2):
missed.remove(name)
comparetrees(path1, path2, diffs, verbose)
for name in missed:
diffs.append('files missed at %s - %s: %s' % (dir1, dir2, name))
print(name, 'DIFFERS')
if __name__ == '__main__':
dir1, dir2 = dirdiffs.getargs()
diffs = []
comparetrees(dir1, dir2, diffs, True)
print('=' * 40)
if not diffs:
print('No diffs found.')
else:
print('Diffs found:', len(diffs))
for diff in diffs: print('-', diff)
| true | true |
f7fd4ad24afe06422bc497d69f3c9d9e1412ca33 | 125 | py | Python | project/server/main/modules/api/routes/user/__init__.py | ardikabs/flask-server-template | e1dfb33323cc89f6163d604007263b73ec5b6e12 | [
"MIT"
] | 1 | 2019-01-15T10:33:04.000Z | 2019-01-15T10:33:04.000Z | project/server/main/modules/api/routes/user/__init__.py | ardikabs/flask-server-template | e1dfb33323cc89f6163d604007263b73ec5b6e12 | [
"MIT"
] | null | null | null | project/server/main/modules/api/routes/user/__init__.py | ardikabs/flask-server-template | e1dfb33323cc89f6163d604007263b73ec5b6e12 | [
"MIT"
] | null | null | null |
from flask_restplus import Namespace
api = Namespace("users", description="User Related Operation")
from . import resource | 20.833333 | 62 | 0.792 |
from flask_restplus import Namespace
api = Namespace("users", description="User Related Operation")
from . import resource | true | true |
f7fd4b12c53e66fb958f8caf63bbc2ade57a36d4 | 263 | py | Python | Stack_Using_List.py | thegautamkumarjaiswal/Data_Structures_in-_Python | 5ca83b278aaa13b3eee9e8109aad97909545b523 | [
"Apache-2.0"
] | null | null | null | Stack_Using_List.py | thegautamkumarjaiswal/Data_Structures_in-_Python | 5ca83b278aaa13b3eee9e8109aad97909545b523 | [
"Apache-2.0"
] | null | null | null | Stack_Using_List.py | thegautamkumarjaiswal/Data_Structures_in-_Python | 5ca83b278aaa13b3eee9e8109aad97909545b523 | [
"Apache-2.0"
] | null | null | null | # python stack using list #
my_Stack = [10, 12, 13, 11, 33, 24, 56, 78, 13, 56, 31, 32, 33, 10, 15] # array #
print(my_Stack)
print(my_Stack.pop())
# think python simple just pop and push #
print(my_Stack.pop())
print(my_Stack.pop())
print(my_Stack.pop())
| 21.916667 | 85 | 0.65019 | my_Stack = [10, 12, 13, 11, 33, 24, 56, 78, 13, 56, 31, 32, 33, 10, 15]
print(my_Stack)
print(my_Stack.pop())
print(my_Stack.pop())
print(my_Stack.pop())
print(my_Stack.pop())
| true | true |
f7fd4b7de32a29a5b63794d096a3a939c1dc6f09 | 2,859 | py | Python | bots/bot.py | HendrikCrause/hallite2-submission | fef9d836f393f7aa15768e6a424c52ff60702933 | [
"MIT"
] | null | null | null | bots/bot.py | HendrikCrause/hallite2-submission | fef9d836f393f7aa15768e6a424c52ff60702933 | [
"MIT"
] | null | null | null | bots/bot.py | HendrikCrause/hallite2-submission | fef9d836f393f7aa15768e6a424c52ff60702933 | [
"MIT"
] | null | null | null | import argparse
import json
import hlt
import operator
import logging
from rnn.rnn import DeepRecurrentNeuralNet
class Bot:
def __init__(self, name='bot', structure=None):
self.game = hlt.Game(name)
self.brain = DeepRecurrentNeuralNet(structure)
def play(self):
while True:
game_map = self.game.update_map()
command_queue = []
for ship in game_map.get_me().all_ships():
ship_command = False
if ship.docking_status != ship.DockingStatus.UNDOCKED:
continue
for planet in game_map.all_planets():
if planet.is_owned():
continue
if ship.can_dock(planet):
ship_command = ship.dock(planet)
break
if not ship_command:
self.brain.reset()
result = [0, 0]
for feature_list in self.sorted_feature_matrix_by_distance(game_map, ship):
result = self.brain.step(feature_list)
speed = int(abs(result[0]) * hlt.constants.MAX_SPEED)
angle = int(round(abs(result[1]) * 360))
ship_command = ship.thrust(speed, angle)
command_queue.append(ship_command)
self.game.send_command_queue(command_queue)
def sorted_feature_matrix_by_distance(self, game_map, ship):
features = [self.create_feature_list(ship, ship.owner)]
distances = game_map.nearby_entities_by_distance(ship)
sorted_distances = sorted(distances.items(), key=operator.itemgetter(0))
for distance, entities in sorted_distances:
for entity in entities:
features.append(self.create_feature_list(entity, ship.owner))
return features
@staticmethod
def create_feature_list(entity, me):
out = [entity.x, entity.y, entity.radius, entity.health]
if entity.owner == me:
out += [1, 0]
elif entity.owner is None:
out += [0, 0]
else:
out += [0, 1]
if isinstance(entity, hlt.entity.Planet):
out += [1, 0]
elif isinstance(entity, hlt.entity.Ship):
out += [0, 1]
else:
out += [0, 0]
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--uuid", help="Bot uuid which should correspond to a saved structure file")
args = parser.parse_args()
name = args.uuid
try:
with open('temp/' + args.uuid + '.bot') as file:
structure = json.loads(file.read())
except Exception as e:
logging.error(e)
name = 'RandomBot'
structure = None
Bot(name, structure).play()
| 30.741935 | 100 | 0.564183 | import argparse
import json
import hlt
import operator
import logging
from rnn.rnn import DeepRecurrentNeuralNet
class Bot:
def __init__(self, name='bot', structure=None):
self.game = hlt.Game(name)
self.brain = DeepRecurrentNeuralNet(structure)
def play(self):
while True:
game_map = self.game.update_map()
command_queue = []
for ship in game_map.get_me().all_ships():
ship_command = False
if ship.docking_status != ship.DockingStatus.UNDOCKED:
continue
for planet in game_map.all_planets():
if planet.is_owned():
continue
if ship.can_dock(planet):
ship_command = ship.dock(planet)
break
if not ship_command:
self.brain.reset()
result = [0, 0]
for feature_list in self.sorted_feature_matrix_by_distance(game_map, ship):
result = self.brain.step(feature_list)
speed = int(abs(result[0]) * hlt.constants.MAX_SPEED)
angle = int(round(abs(result[1]) * 360))
ship_command = ship.thrust(speed, angle)
command_queue.append(ship_command)
self.game.send_command_queue(command_queue)
def sorted_feature_matrix_by_distance(self, game_map, ship):
features = [self.create_feature_list(ship, ship.owner)]
distances = game_map.nearby_entities_by_distance(ship)
sorted_distances = sorted(distances.items(), key=operator.itemgetter(0))
for distance, entities in sorted_distances:
for entity in entities:
features.append(self.create_feature_list(entity, ship.owner))
return features
@staticmethod
def create_feature_list(entity, me):
out = [entity.x, entity.y, entity.radius, entity.health]
if entity.owner == me:
out += [1, 0]
elif entity.owner is None:
out += [0, 0]
else:
out += [0, 1]
if isinstance(entity, hlt.entity.Planet):
out += [1, 0]
elif isinstance(entity, hlt.entity.Ship):
out += [0, 1]
else:
out += [0, 0]
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--uuid", help="Bot uuid which should correspond to a saved structure file")
args = parser.parse_args()
name = args.uuid
try:
with open('temp/' + args.uuid + '.bot') as file:
structure = json.loads(file.read())
except Exception as e:
logging.error(e)
name = 'RandomBot'
structure = None
Bot(name, structure).play()
| true | true |
f7fd4b81687901bedc61a3a4d30ffa4e01308554 | 7,643 | py | Python | creastephGAN2.py | Sup3Legacy/TIPE | 7e01cef869183c4d609c45d5fcf0bb371a9579f5 | [
"BSD-3-Clause"
] | null | null | null | creastephGAN2.py | Sup3Legacy/TIPE | 7e01cef869183c4d609c45d5fcf0bb371a9579f5 | [
"BSD-3-Clause"
] | null | null | null | creastephGAN2.py | Sup3Legacy/TIPE | 7e01cef869183c4d609c45d5fcf0bb371a9579f5 | [
"BSD-3-Clause"
] | 1 | 2020-06-28T06:07:17.000Z | 2020-06-28T06:07:17.000Z | from __future__ import print_function, division
import os
import random
import argparse
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as dSet
from IPython.display import HTML
import matplotlib.animation as animation
from torchvision import transforms, utils
import datetime
seed = 123321
#random.seed(seed)
#torch.manual_seed(int(datetime.datetime.now().strftime("%H%M%S")))
##Hyperparamètres
ABSOLUTE = 'D:/Documents/Prepa/TIPE'
pathImage = ABSOLUTE + '/Images/Creasteph/'
pathModels = ABSOLUTE + "/Models/"
batchSize = 4 #10 pour Mosa et Mosa2 et 4 pour Mosa3
imSize = 64 #Ok 128 pour Mosa et Mosa2 et Mosa3
channelsNumber = 3 #Couleurs !
inputSize = 100 #Entrée du générateur 100 pour Mosa, 5000 pour Mosa2 et Mosa3 et Mosa4
featuresGenerator = 64 #64 pour Mosa, Mosa2 et Mosa3, 128 pour Mosa4
featuresDiscriminator = 64 #De même
learningRate = 0.0002 #0.0002 pour Mosa, Mosa2 Mosa3
beta1 = 0.5
setImages = dSet.ImageFolder(root = pathImage, transform = transforms.Compose([transforms.RandomCrop((imSize, imSize)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ColorJitter(hue = 0.5), transforms.ToTensor()]))
imagesLoader = torch.utils.data.DataLoader(setImages, batch_size = batchSize, shuffle = True, num_workers=0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def weightsInit(m):
classname = m.__class__.__name__
if classname.find('conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
## générateur
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( inputSize, featuresGenerator * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(featuresGenerator * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(featuresGenerator * 8, featuresGenerator * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresGenerator * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( featuresGenerator * 4, featuresGenerator * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresGenerator * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( featuresGenerator * 2, featuresGenerator, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresGenerator),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( featuresGenerator, channelsNumber, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(channelsNumber, featuresDiscriminator, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(featuresDiscriminator, featuresDiscriminator * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresDiscriminator * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(featuresDiscriminator * 2, featuresDiscriminator * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresDiscriminator * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(featuresDiscriminator * 4, featuresDiscriminator * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresDiscriminator * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(featuresDiscriminator * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
netG = Generator().to(device)
netG.apply(weightsInit)
netD = Discriminator().to(device)
netD.apply(weightsInit)
criterion = nn.BCELoss()
fixedNoise = torch.randn(1, inputSize, 1, 1, device = device)
realLabel = 1
fakeLabel = 0
optimD = optim.Adam(netD.parameters(), lr = learningRate, betas = (beta1, 0.999))
optimG = optim.Adam(netG.parameters(), lr = learningRate, betas = (beta1, 0.999))
imgList = []
GLoss = []
DLoss = []
def train(number):
iters = 0
for epoch in range(number):
for i, data in enumerate(imagesLoader, 0):
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), realLabel, device = device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
noise = torch.randn(b_size, inputSize, 1, 1, device = device)
fake = netG(noise)
label.fill_(fakeLabel)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(realLabel)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimG.step()
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, number, i, len(imagesLoader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
GLoss.append(errG.item())
DLoss.append(errD.item())
if (iters % 500 == 0) or ((epoch == number) and (i == len(imagesLoader)-1)):
with torch.no_grad():
fake = netG(fixedNoise).detach().cpu()
imgList.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
def show():
fig = plt.figure(figsize=(10,10))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in imgList]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())
plt.show()
def clear():
imgList = []
def test():
w = 5
h = 5
fig = plt.figure(figsize = (10,10))
lol = torch.randn(25, inputSize, 1, 1, device = device)
image = netG(lol).detach().cpu()
for i in range(image.size()[0]):
fig.add_subplot(w, h, i + 1)
lel = (image[i].numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)
lel = np.roll(lel, np.random.randint(0, 3), 2)
plt.imshow(lel)
plt.show()
def saveModel(nom):
torch.save(netD.state_dict(), pathModels + 'D-' + nom + '.pt')
torch.save(netG.state_dict(), pathModels + 'G-' + nom + '.pt')
def loadModel(nom):
netD.load_state_dict(torch.load(pathModels + 'D-' + nom + '.pt'))
netG.load_state_dict(torch.load(pathModels + 'G-' + nom + '.pt'))
| 36.395238 | 247 | 0.61154 | from __future__ import print_function, division
import os
import random
import argparse
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.datasets as dSet
from IPython.display import HTML
import matplotlib.animation as animation
from torchvision import transforms, utils
import datetime
seed = 123321
Documents/Prepa/TIPE'
pathImage = ABSOLUTE + '/Images/Creasteph/'
pathModels = ABSOLUTE + "/Models/"
batchSize = 4
imSize = 64
channelsNumber = 3
inputSize = 100
featuresGenerator = 64
featuresDiscriminator = 64
learningRate = 0.0002
beta1 = 0.5
setImages = dSet.ImageFolder(root = pathImage, transform = transforms.Compose([transforms.RandomCrop((imSize, imSize)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ColorJitter(hue = 0.5), transforms.ToTensor()]))
imagesLoader = torch.utils.data.DataLoader(setImages, batch_size = batchSize, shuffle = True, num_workers=0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def weightsInit(m):
classname = m.__class__.__name__
if classname.find('conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
rator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
nn.ConvTranspose2d( inputSize, featuresGenerator * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(featuresGenerator * 8),
nn.ReLU(True),
nn.ConvTranspose2d(featuresGenerator * 8, featuresGenerator * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresGenerator * 4),
nn.ReLU(True),
nn.ConvTranspose2d( featuresGenerator * 4, featuresGenerator * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresGenerator * 2),
nn.ReLU(True),
nn.ConvTranspose2d( featuresGenerator * 2, featuresGenerator, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresGenerator),
nn.ReLU(True),
nn.ConvTranspose2d( featuresGenerator, channelsNumber, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(channelsNumber, featuresDiscriminator, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(featuresDiscriminator, featuresDiscriminator * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresDiscriminator * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(featuresDiscriminator * 2, featuresDiscriminator * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresDiscriminator * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(featuresDiscriminator * 4, featuresDiscriminator * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(featuresDiscriminator * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(featuresDiscriminator * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
netG = Generator().to(device)
netG.apply(weightsInit)
netD = Discriminator().to(device)
netD.apply(weightsInit)
criterion = nn.BCELoss()
fixedNoise = torch.randn(1, inputSize, 1, 1, device = device)
realLabel = 1
fakeLabel = 0
optimD = optim.Adam(netD.parameters(), lr = learningRate, betas = (beta1, 0.999))
optimG = optim.Adam(netG.parameters(), lr = learningRate, betas = (beta1, 0.999))
imgList = []
GLoss = []
DLoss = []
def train(number):
iters = 0
for epoch in range(number):
for i, data in enumerate(imagesLoader, 0):
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), realLabel, device = device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
noise = torch.randn(b_size, inputSize, 1, 1, device = device)
fake = netG(noise)
label.fill_(fakeLabel)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(realLabel)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimG.step()
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, number, i, len(imagesLoader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
GLoss.append(errG.item())
DLoss.append(errD.item())
if (iters % 500 == 0) or ((epoch == number) and (i == len(imagesLoader)-1)):
with torch.no_grad():
fake = netG(fixedNoise).detach().cpu()
imgList.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
def show():
fig = plt.figure(figsize=(10,10))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in imgList]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())
plt.show()
def clear():
imgList = []
def test():
w = 5
h = 5
fig = plt.figure(figsize = (10,10))
lol = torch.randn(25, inputSize, 1, 1, device = device)
image = netG(lol).detach().cpu()
for i in range(image.size()[0]):
fig.add_subplot(w, h, i + 1)
lel = (image[i].numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)
lel = np.roll(lel, np.random.randint(0, 3), 2)
plt.imshow(lel)
plt.show()
def saveModel(nom):
torch.save(netD.state_dict(), pathModels + 'D-' + nom + '.pt')
torch.save(netG.state_dict(), pathModels + 'G-' + nom + '.pt')
def loadModel(nom):
netD.load_state_dict(torch.load(pathModels + 'D-' + nom + '.pt'))
netG.load_state_dict(torch.load(pathModels + 'G-' + nom + '.pt'))
| true | true |
f7fd4c8ccfaeb89edbcb3c758d67ae754d88a617 | 133 | py | Python | app.py | tim-coutinho/cl | 3103c3e8dda5250aa68c1a335dfd9db106a4c302 | [
"MIT"
] | null | null | null | app.py | tim-coutinho/cl | 3103c3e8dda5250aa68c1a335dfd9db106a4c302 | [
"MIT"
] | null | null | null | app.py | tim-coutinho/cl | 3103c3e8dda5250aa68c1a335dfd9db106a4c302 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from aws_cdk.core import App
from cdk.cl_stack import ClStack
app = App()
ClStack(app, "cl2")
app.synth()
| 12.090909 | 32 | 0.714286 |
from aws_cdk.core import App
from cdk.cl_stack import ClStack
app = App()
ClStack(app, "cl2")
app.synth()
| true | true |
f7fd4cfa842adfb38c1908a61c81749ee72bf13b | 2,384 | py | Python | Python/substring-with-concatenation-of-all-words.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/substring-with-concatenation-of-all-words.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/substring-with-concatenation-of-all-words.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O((m + n) * k), where m is string length, n is dictionary size, k is word length
# Space: O(n * k)
import collections
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
if not words:
return []
result, m, n, k = [], len(s), len(words), len(words[0])
if m < n*k:
return result
lookup = collections.defaultdict(int)
for i in words:
lookup[i] += 1 # Space: O(n * k)
for i in range(k): # Time: O(k)
left, count = i, 0
tmp = collections.defaultdict(int)
for j in range(i, m-k+1, k): # Time: O(m / k)
s1 = s[j:j+k] # Time: O(k)
if s1 in lookup:
tmp[s1] += 1
count += 1
while tmp[s1] > lookup[s1]:
tmp[s[left:left+k]] -= 1
count -= 1
left += k
if count == n:
result.append(left)
else:
tmp = collections.defaultdict(int)
count = 0
left = j+k
return result
# Time: O(m * n * k), where m is string length, n is dictionary size, k is word length
# Space: O(n * k)
class Solution2(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
result, m, n, k = [], len(s), len(words), len(words[0])
if m < n*k:
return result
lookup = collections.defaultdict(int)
for i in words:
lookup[i] += 1 # Space: O(n * k)
for i in range(m+1-k*n): # Time: O(m)
cur, j = collections.defaultdict(int), 0
while j < n: # Time: O(n)
word = s[i+j*k:i+j*k+k] # Time: O(k)
if word not in lookup:
break
cur[word] += 1
if cur[word] > lookup[word]:
break
j += 1
if j == n:
result.append(i)
return result
| 30.177215 | 89 | 0.395973 |
import collections
class Solution(object):
def findSubstring(self, s, words):
if not words:
return []
result, m, n, k = [], len(s), len(words), len(words[0])
if m < n*k:
return result
lookup = collections.defaultdict(int)
for i in words:
lookup[i] += 1
for i in range(k):
left, count = i, 0
tmp = collections.defaultdict(int)
for j in range(i, m-k+1, k):
s1 = s[j:j+k]
if s1 in lookup:
tmp[s1] += 1
count += 1
while tmp[s1] > lookup[s1]:
tmp[s[left:left+k]] -= 1
count -= 1
left += k
if count == n:
result.append(left)
else:
tmp = collections.defaultdict(int)
count = 0
left = j+k
return result
class Solution2(object):
def findSubstring(self, s, words):
result, m, n, k = [], len(s), len(words), len(words[0])
if m < n*k:
return result
lookup = collections.defaultdict(int)
for i in words:
lookup[i] += 1
for i in range(m+1-k*n):
cur, j = collections.defaultdict(int), 0
while j < n:
word = s[i+j*k:i+j*k+k]
if word not in lookup:
break
cur[word] += 1
if cur[word] > lookup[word]:
break
j += 1
if j == n:
result.append(i)
return result
| true | true |
f7fd4d573e3b3b03754376eed6b380355b9f8567 | 1,960 | py | Python | opencadd/tests/compounds/standardization/test_remove_salts.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
] | 1 | 2020-11-11T17:24:41.000Z | 2020-11-11T17:24:41.000Z | opencadd/tests/compounds/standardization/test_remove_salts.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
] | null | null | null | opencadd/tests/compounds/standardization/test_remove_salts.py | Allend95/opencadd | 1fde238e3cf8e5e47e8266a504d9df0196505e97 | [
"MIT"
] | null | null | null | """
test for the module `remove_salts`
"""
import pytest
import sys
import rdkit
from rdkit import Chem
from opencadd.compounds.standardization.remove_salts import remove_salts
def _evaluation_mol_generator(test_smiles=None, test_inchi=None):
"""Creates mol files directly with rdkits functions for evaluation."""
if test_smiles is not None:
return Chem.MolFromSmiles(test_smiles)
if test_inchi is not None:
return Chem.MolFromInchi(test_inchi)
def _molecule_test(test_inchi=None, test_smiles=None):
return Chem.MolToInchi(
remove_salts(
_evaluation_mol_generator(test_inchi=test_inchi, test_smiles=test_smiles)
)
)
def test_structure():
"""Only C(C(=O)[O-])(Cc1n[n-]nn1)(C[NH3+])(C[N+](=O)[O-] should be
left after stripping salts.
"""
assert (
_molecule_test(
test_smiles="C(C(=O)[O-])(Cc1n[n-]nn1)(C[NH3+])(C[N+](=O)[O-].CCCCCCCCCCCCCCCCCC(=O)O.OCC(O)C1OC(=O)C(=C1O)O)"
)
== "InChI=1S/C6H10N6O4/c7-2-6(5(13)14,3-12(15)16)1-4-8-10-11-9-4/h1-3,7H2,(H2,8,9,10,11,13,14)/p-1"
)
def test_single_salts():
"""All salt fragments should be detected and stripped."""
assert (
_molecule_test(
test_smiles="[Al].N.[Ba].[Bi].Br.[Ca].Cl.F.I.[K].[Li].[Mg].[Na].[Ag].[Sr].S.O.[Zn]"
)
== ""
)
def test_complex_salts():
"""Complex salts, contained in salts.tsv should be detected."""
assert (
_molecule_test(test_smiles="OC(C(O)C(=O)O)C(=O)O.O=C1NS(=O)(=O)c2ccccc12") == ""
)
def test_custom_dictionary():
"""Configuration of a custom dictionary, by defining one, should
work.
"""
assert (
Chem.MolToInchi(
remove_salts(
_evaluation_mol_generator(test_smiles="[Al].N.[Ba].[Bi]"),
dictionary=False,
defnData="[Al]",
)
)
== "InChI=1S/Ba.Bi.H3N.2H/h;;1H3;;"
)
| 27.222222 | 122 | 0.60102 | import pytest
import sys
import rdkit
from rdkit import Chem
from opencadd.compounds.standardization.remove_salts import remove_salts
def _evaluation_mol_generator(test_smiles=None, test_inchi=None):
if test_smiles is not None:
return Chem.MolFromSmiles(test_smiles)
if test_inchi is not None:
return Chem.MolFromInchi(test_inchi)
def _molecule_test(test_inchi=None, test_smiles=None):
return Chem.MolToInchi(
remove_salts(
_evaluation_mol_generator(test_inchi=test_inchi, test_smiles=test_smiles)
)
)
def test_structure():
assert (
_molecule_test(
test_smiles="C(C(=O)[O-])(Cc1n[n-]nn1)(C[NH3+])(C[N+](=O)[O-].CCCCCCCCCCCCCCCCCC(=O)O.OCC(O)C1OC(=O)C(=C1O)O)"
)
== "InChI=1S/C6H10N6O4/c7-2-6(5(13)14,3-12(15)16)1-4-8-10-11-9-4/h1-3,7H2,(H2,8,9,10,11,13,14)/p-1"
)
def test_single_salts():
assert (
_molecule_test(
test_smiles="[Al].N.[Ba].[Bi].Br.[Ca].Cl.F.I.[K].[Li].[Mg].[Na].[Ag].[Sr].S.O.[Zn]"
)
== ""
)
def test_complex_salts():
assert (
_molecule_test(test_smiles="OC(C(O)C(=O)O)C(=O)O.O=C1NS(=O)(=O)c2ccccc12") == ""
)
def test_custom_dictionary():
assert (
Chem.MolToInchi(
remove_salts(
_evaluation_mol_generator(test_smiles="[Al].N.[Ba].[Bi]"),
dictionary=False,
defnData="[Al]",
)
)
== "InChI=1S/Ba.Bi.H3N.2H/h;;1H3;;"
)
| true | true |
f7fd4efe5bd9f0ff30646b471aa32628dc9936d9 | 16,379 | py | Python | lain_admin_cli/node.py | laincloud/lainctl | bbf662f1a0c322cfaa67d2b137516732918df692 | [
"MIT"
] | 3 | 2016-05-16T12:36:10.000Z | 2017-08-02T09:31:51.000Z | lain_admin_cli/node.py | laincloud/lainctl | bbf662f1a0c322cfaa67d2b137516732918df692 | [
"MIT"
] | 11 | 2016-07-29T03:25:36.000Z | 2018-01-16T04:28:29.000Z | lain_admin_cli/node.py | laincloud/lainctl | bbf662f1a0c322cfaa67d2b137516732918df692 | [
"MIT"
] | 9 | 2016-05-09T08:35:28.000Z | 2017-08-02T09:31:54.000Z | # -*- coding: utf-8 -*-
from argh import CommandError
from argh.decorators import arg, expects_obj
from lain_admin_cli.helpers import Node as NodeInfo
from lain_admin_cli.helpers import (
yes_or_no, info, warn, error, RemoveException, AddNodeException, _yellow,
TwoLevelCommandBase, run_ansible_cmd
)
from subprocess import check_output, check_call, Popen, STDOUT, PIPE
import requests
import signal
import json
import os
import sys
import time
from lain_admin_cli.utils.health import NodeHealth
def sigint_handler(signum, frame):
pass
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGINT, sigint_handler)
class Node(TwoLevelCommandBase):
@classmethod
def subcommands(self):
return [self.list, self.inspect, self.add, self.remove, self.clean,
self.maintain, self.health, self.change_labels]
@classmethod
def namespace(self):
return "node"
@classmethod
def help_message(self):
return "lain node operations"
@classmethod
def __list_node_group(self, group):
output = check_output(['etcdctl', 'ls', '/lain/nodes/%s' % group])
nodes = {}
for line in output.splitlines():
tmp = NodeInfo()
tmp.name, tmp.ip, tmp.ssh_port = line.split('/')[-1].split(':')
nodes[tmp.name] = tmp
return nodes
@classmethod
def list(self):
"""list all the nodes(name and ip) in lain"""
check_output(['etcdctl', 'ls', '/lain/nodes/nodes'])
nodes = self.__list_node_group('nodes')
# The column margin is 2 spaces
min_width = 2 + max(8, *(len(node.name) for node in nodes.values()))
row_fmt = "%-{min_width}s%s".format(min_width=min_width)
print row_fmt % ("NODENAME", "IP")
for node in nodes.values():
print row_fmt % (node.name, node.ip)
@classmethod
@arg('node')
def inspect(self, node):
"""
inspect a node, nodename or nodeip should be given.
info is got from etcd.
"""
check_output(['etcdctl', 'ls', '/lain/nodes/nodes'])
all_nodes = self.__list_node_group('nodes')
for item in all_nodes.values():
if node == item.name or node == item.ip:
etcd_members = self.__list_node_group('etcd-members')
swarm_members = self.__list_node_group('swarm-managers')
managers = self.__list_node_group('managers')
print json.dumps({
"name": item.name,
"ip": item.ip,
"ssh_port": item.ssh_port,
"docker_device": item.docker_device,
"is_lain_managers": node in managers,
"is_etcd_member": node in etcd_members,
"is_swarm_manager": node in swarm_members,
"labels": self.__get_node_labels(item.ip),
}, indent=4)
return
raise CommandError("Unkown node name %s" % node)
@classmethod
def __get_node_labels(self, node_ip):
r = requests.get('http://{}:2375/info'.format(node_ip), timeout=5)
labels = r.json()['Labels']
if labels is None:
return []
return labels
@classmethod
@expects_obj
@arg('nodes', nargs='+', help="the nodes need to add [example: node2:192.168.77.22]")
@arg('-p', '--playbooks', required=True)
@arg('-P', '--ssh-port', default=22, help="SSH port of the node to be added")
@arg('-l', '--labels', nargs='+', default="", help="The labels added to docker daemon in the node. [example: disk=ssd]")
@arg('-d', '--docker-device', default="", help="The block device use for docker's devicemapper storage."
"docker will run on loop-lvm if this is not given, which is not proposed")
def add(self, args):
"""add a new node to lain"""
try:
nodes = dict()
nodes = self.__check_nodes_validation(args.nodes)
port = args.ssh_port
for name, ip in nodes:
check_call(['etcdctl', 'set',
'/lain/nodes/new/%s:%s:%s' % (name, ip, port),
ip])
copy_public_key(ip)
if run_addnode_ansible(args):
error("run add node ansible failed")
return
for name, ip in nodes:
node_data = json.dumps({'name': name,
'ip': ip,
'ssh_port': port,
'docker_device': args.docker_device})
check_call(['etcdctl', 'set',
'/lain/nodes/nodes/%s:%s:%s' % (name, ip, port),
node_data])
except Exception as e:
error(str(e))
finally:
for name, ip in nodes:
check_call(
['etcdctl', 'rm', '/lain/nodes/new/%s:%s:%s' % (name, ip, port)])
@classmethod
def __check_nodes_validation(self, nodes):
try:
nodes = [x.split(':') for x in nodes]
if len(set(n[0] for n in nodes)) != len(nodes):
raise AddNodeException("There are duplicate node names")
if len(set(n[1] for n in nodes)) != len(nodes):
raise AddNodeException("There are duplicate node IPs")
if os.getuid() != 0:
raise AddNodeException(
"Need run add-node script with root privilege please.")
duplicates = self.__check_existing(nodes)
if duplicates:
raise AddNodeException(
"Some nodes already exist in the cluster: " + ", ".join(duplicates))
except ValueError:
raise AddNodeException("the value of param nodes is wrong")
except IndexError:
raise AddNodeException(
"error parse param nodes, needs like 'node2:192.168.77.22'")
return nodes
@classmethod
def __check_existing(self, nodes):
duplicates = set()
output = check_output(['etcdctl', 'ls', '/lain/nodes/nodes'])
for line in output.splitlines():
key = line.split('/')[-1]
name, ip, port = key.split(':')
for node_name, node_ip in nodes:
if node_name == name:
duplicates.add(node_name)
elif node_ip == ip:
duplicates.add(node_ip)
return duplicates
@classmethod
@arg('-p', '--playbooks', required=True)
@arg('-t', '--target')
@arg('nodename')
def remove(self, nodename, target="", playbooks=""):
"""
remove a node in lain, --target is only useful when swarm manager running on this node.
"""
node = NodeInfo(nodename)
target = Node(target) if target != "" else None
key = "%s:%s:%s" % (node.name, node.ip, node.ssh_port)
output = check_output(
['etcdctl', 'ls', '/lain/nodes/nodes'], stderr=STDOUT)
if len(output.splitlines()) == 1:
error("%s is the last node of lain, can not be removed" %
output.splitlines()[0].split('/')[-1])
return
check_output(['etcdctl', 'set', '/lain/nodes/removing/%s' %
key, ""], stderr=STDOUT)
try:
assert_etcd_member(node.name) # check if the node is a etcd member
info("Remove the lain node %s" % node.name)
if not yes_or_no("Are you sure?", default='no', color=_yellow):
raise(RemoveException("Action was canceled"))
# restart a new swarm manager if a swarm mansger on this node
drift_swarm_manager(playbooks, node, target)
remove_node_containers(node.name)
if run_removenode_ansible(playbooks):
error("run remove node ansible failed")
return
# remove maintain for node
self.maintain(node.name, True)
check_call(['etcdctl', 'rm', '/lain/nodes/nodes/%s' %
key], stderr=STDOUT) # remove the node from etcd
except RemoveException as e:
error(str(e))
finally:
check_output(
['etcdctl', 'rm', '/lain/nodes/removing/%s' % key], stderr=STDOUT)
return
@classmethod
@arg('-p', '--playbooks', required=True)
@arg('nodes', nargs='+')
def clean(self, nodes, playbooks=""):
"""
clean node will clean lain node, remove some useless images,
each container on the node will retain at most 3 latest images on the node.
"""
for node in nodes:
node_info = NodeInfo(node)
key = "%s:%s:%s" % (
node_info.name, node_info.ip, node_info.ssh_port)
check_output(['etcdctl', 'set', '/lain/nodes/clean/%s' % key, node_info.ip],
stderr=STDOUT)
run_cleannode_ansible(playbooks)
check_output(['etcdctl', 'rm', '/lain/nodes/clean/%s' % key])
@classmethod
@arg('nodename')
@arg('-r', '--remove', help="whether removing deployment constraint on the specified node")
def maintain(self, nodename, remove=False):
"""
maintain node will disable or enable deployment onto the maintained node.
"""
node = NodeInfo(nodename)
base_url = "http://deployd.lain:9003/api/constraints"
operator = "Remove" if remove else "Add"
if not remove:
url = base_url + "?type=node&value=%s" % node.name
info("PATCH %s" % url)
resp = requests.patch(url)
else:
url = base_url + "?type=node&value=%s" % node.name
info("DELETE %s" % url)
resp = requests.delete(url)
if resp.status_code >= 300:
error("%s constraint on node %s fail: %s" %
(operator, node.name, resp.text))
else:
info("%s constraint on node %s success." % (operator, node.name))
@classmethod
def health(cls):
health = NodeHealth()
health.run()
@classmethod
@arg('nodes', nargs='+')
@arg('-c', '--change-type', choices=['add', 'delete'], required=True)
@arg('-l', '--labels', nargs='+', type=str, required=True,
help='the labels to add, for example: k1=v1 k2=v2')
@arg('-p', '--playbooks', required=True)
def change_labels(self, nodes, change_type="", labels=[], playbooks=""):
"""
change labels of nodes, add/delete operations are supported
"""
normlized_labels = {}
for x in labels:
ys = x.split('=')
if len(ys) != 2 or ys[0].strip() == '' or ys[1].strip() == '':
error('{} is not $k=$v format'.format(x))
sys.exit(1)
key, value = ys[0].strip(), ys[1].strip()
if key in normlized_labels:
error('duplicate key {}'.format(key))
sys.exit(1)
normlized_labels[key] = value
diff_labels = ['{}={}'.format(k, v)
for k, v in normlized_labels.items()]
node_infos = {}
try:
for node in nodes:
node_info = NodeInfo(node)
if node_info.ip in node_infos:
error('duplicate node {}:{}'.
format(node_info.name, node_info.ip))
sys.exit(1)
node_infos[node_info.ip] = node_info
except Exception as e:
error("Exception: {}.".format(e))
sys.exit(1)
try:
for _, node_info in node_infos.items():
key = "{}:{}:{}".format(node_info.name, node_info.ip,
node_info.ssh_port)
check_output(['etcdctl', 'set',
'/lain/nodes/changing-labels/{}'.format(key),
node_info.ip], stderr=STDOUT)
run_change_labels_ansible(playbooks, change_type, diff_labels)
except Exception as e:
error("Exception: {}.".format(e))
sys.exit(1)
finally:
check_output(['etcdctl', 'rm', '--recursive',
'/lain/nodes/changing-labels'], stderr=STDOUT)
def run_addnode_ansible(args):
envs = {
'target': 'new_nodes',
'allow_restart_docker': 'yes',
'adding_node_mode': 'yes' # this ensures the removal of existing key.json
}
if args.docker_device:
envs['docker_device'] = args.docker_device
if args.labels:
envs['node_labels'] = args.labels
return run_ansible_cmd(args.playbooks, envs, file_name='site.yaml')
def run_change_labels_ansible(playbooks_path, change_type, diff_labels):
envs = {
'target': 'changing_labels_nodes',
'role': 'node-change-labels',
'change_type': change_type,
'diff_labels': diff_labels
}
return run_ansible_cmd(playbooks_path, envs)
def run_cleannode_ansible(playbooks_path):
envs = {
'target': 'clean_nodes',
'role': 'node-clean'
}
return run_ansible_cmd(playbooks_path, envs)
def run_removenode_ansible(playbooks_path):
envs = {
'target': 'removing_nodes',
'role': 'remove-node',
}
return run_ansible_cmd(playbooks_path, envs)
def drift_swarm_manager(playbooks_path, rm_node, target):
is_swarm_manager, key = False, "%s:%s:%s" % (
rm_node.name, rm_node.ip, rm_node.ssh_port)
output = check_output(['etcdctl', 'ls', '/lain/nodes/swarm-managers'])
for line in output.splitlines():
if line.split('/')[-1] == key:
is_swarm_manager = True
break
if not is_swarm_manager:
return
if not target:
raise(RemoveException("%s is a swarm manager node,"
"target required to drift the swarm manager,"
"run `remove-node clear -t[--target] ...`" % rm_node.name))
check_call(['etcdctl', 'rm', '/lain/nodes/swarm-managers/%s' % key])
check_call(['etcdctl', 'set', '/lain/nodes/swarm-managers/%s:%s' %
(target.name, target.ssh_port), ""])
envs = dict()
envs['target'] = 'nodes'
envs['role'] = 'swarm'
info('The removed node is a swarm manager, now start a swarm manager on another node.')
run_ansible_cmd(playbooks_path, envs)
def remove_node_containers(nodename):
url = "http://deployd.lain:9003/api/nodes?node=%s" % nodename
# Call deployd api
info("DELETE %s" % url)
resp = requests.delete(url)
if resp.status_code >= 300:
error("Deployd remove node api response a error, %s." % resp.text)
# waiting for deployd complete
print(">>>(need some minutes)Waiting for deployd drift %s's containers" % nodename)
while True:
try:
output = Popen(['docker', '-H', 'swarm.lain:2376', 'ps',
'-a', '-f', 'node=%s' % nodename, '-f',
'label=com.docker.swarm.id'], stdout=PIPE)
exclude_portals = check_output(
['grep', '-v', '.portal.portal'], stdin=output.stdout)
containers = len(exclude_portals.splitlines()) - 1
if containers > 0:
warn("%d containers in node %s need to drift" %
(containers, nodename))
time.sleep(3)
else:
info("all containers in node %s drifted successed" % nodename)
break
except Exception as e:
info('check containers info with err:%s' % e)
time.sleep(3)
def assert_etcd_member(rm_node):
output = check_output(['etcdctl', 'member', 'list'])
node_name = rm_node.split(':')[0]
for line in output.splitlines():
if node_name == line.split()[1].split('=')[1]:
raise RemoveException("%s is a etcd member, you should remove it from "
"etcd cluster before remove it from lain" % rm_node)
def copy_public_key(ip):
cmd = ['sudo', 'ssh-copy-id', '-i', '/root/.ssh/lain.pub']
cmd += ['root@%s' % ip]
info('run cmd: %s', ' '.join(cmd))
check_output(cmd)
| 37.14059 | 124 | 0.550095 |
from argh import CommandError
from argh.decorators import arg, expects_obj
from lain_admin_cli.helpers import Node as NodeInfo
from lain_admin_cli.helpers import (
yes_or_no, info, warn, error, RemoveException, AddNodeException, _yellow,
TwoLevelCommandBase, run_ansible_cmd
)
from subprocess import check_output, check_call, Popen, STDOUT, PIPE
import requests
import signal
import json
import os
import sys
import time
from lain_admin_cli.utils.health import NodeHealth
def sigint_handler(signum, frame):
pass
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGINT, sigint_handler)
class Node(TwoLevelCommandBase):
@classmethod
def subcommands(self):
return [self.list, self.inspect, self.add, self.remove, self.clean,
self.maintain, self.health, self.change_labels]
@classmethod
def namespace(self):
return "node"
@classmethod
def help_message(self):
return "lain node operations"
@classmethod
def __list_node_group(self, group):
output = check_output(['etcdctl', 'ls', '/lain/nodes/%s' % group])
nodes = {}
for line in output.splitlines():
tmp = NodeInfo()
tmp.name, tmp.ip, tmp.ssh_port = line.split('/')[-1].split(':')
nodes[tmp.name] = tmp
return nodes
@classmethod
def list(self):
"""list all the nodes(name and ip) in lain"""
check_output(['etcdctl', 'ls', '/lain/nodes/nodes'])
nodes = self.__list_node_group('nodes')
min_width = 2 + max(8, *(len(node.name) for node in nodes.values()))
row_fmt = "%-{min_width}s%s".format(min_width=min_width)
print row_fmt % ("NODENAME", "IP")
for node in nodes.values():
print row_fmt % (node.name, node.ip)
@classmethod
@arg('node')
def inspect(self, node):
"""
inspect a node, nodename or nodeip should be given.
info is got from etcd.
"""
check_output(['etcdctl', 'ls', '/lain/nodes/nodes'])
all_nodes = self.__list_node_group('nodes')
for item in all_nodes.values():
if node == item.name or node == item.ip:
etcd_members = self.__list_node_group('etcd-members')
swarm_members = self.__list_node_group('swarm-managers')
managers = self.__list_node_group('managers')
print json.dumps({
"name": item.name,
"ip": item.ip,
"ssh_port": item.ssh_port,
"docker_device": item.docker_device,
"is_lain_managers": node in managers,
"is_etcd_member": node in etcd_members,
"is_swarm_manager": node in swarm_members,
"labels": self.__get_node_labels(item.ip),
}, indent=4)
return
raise CommandError("Unkown node name %s" % node)
@classmethod
def __get_node_labels(self, node_ip):
r = requests.get('http://{}:2375/info'.format(node_ip), timeout=5)
labels = r.json()['Labels']
if labels is None:
return []
return labels
@classmethod
@expects_obj
@arg('nodes', nargs='+', help="the nodes need to add [example: node2:192.168.77.22]")
@arg('-p', '--playbooks', required=True)
@arg('-P', '--ssh-port', default=22, help="SSH port of the node to be added")
@arg('-l', '--labels', nargs='+', default="", help="The labels added to docker daemon in the node. [example: disk=ssd]")
@arg('-d', '--docker-device', default="", help="The block device use for docker's devicemapper storage."
"docker will run on loop-lvm if this is not given, which is not proposed")
def add(self, args):
"""add a new node to lain"""
try:
nodes = dict()
nodes = self.__check_nodes_validation(args.nodes)
port = args.ssh_port
for name, ip in nodes:
check_call(['etcdctl', 'set',
'/lain/nodes/new/%s:%s:%s' % (name, ip, port),
ip])
copy_public_key(ip)
if run_addnode_ansible(args):
error("run add node ansible failed")
return
for name, ip in nodes:
node_data = json.dumps({'name': name,
'ip': ip,
'ssh_port': port,
'docker_device': args.docker_device})
check_call(['etcdctl', 'set',
'/lain/nodes/nodes/%s:%s:%s' % (name, ip, port),
node_data])
except Exception as e:
error(str(e))
finally:
for name, ip in nodes:
check_call(
['etcdctl', 'rm', '/lain/nodes/new/%s:%s:%s' % (name, ip, port)])
@classmethod
def __check_nodes_validation(self, nodes):
try:
nodes = [x.split(':') for x in nodes]
if len(set(n[0] for n in nodes)) != len(nodes):
raise AddNodeException("There are duplicate node names")
if len(set(n[1] for n in nodes)) != len(nodes):
raise AddNodeException("There are duplicate node IPs")
if os.getuid() != 0:
raise AddNodeException(
"Need run add-node script with root privilege please.")
duplicates = self.__check_existing(nodes)
if duplicates:
raise AddNodeException(
"Some nodes already exist in the cluster: " + ", ".join(duplicates))
except ValueError:
raise AddNodeException("the value of param nodes is wrong")
except IndexError:
raise AddNodeException(
"error parse param nodes, needs like 'node2:192.168.77.22'")
return nodes
@classmethod
def __check_existing(self, nodes):
duplicates = set()
output = check_output(['etcdctl', 'ls', '/lain/nodes/nodes'])
for line in output.splitlines():
key = line.split('/')[-1]
name, ip, port = key.split(':')
for node_name, node_ip in nodes:
if node_name == name:
duplicates.add(node_name)
elif node_ip == ip:
duplicates.add(node_ip)
return duplicates
@classmethod
@arg('-p', '--playbooks', required=True)
@arg('-t', '--target')
@arg('nodename')
def remove(self, nodename, target="", playbooks=""):
"""
remove a node in lain, --target is only useful when swarm manager running on this node.
"""
node = NodeInfo(nodename)
target = Node(target) if target != "" else None
key = "%s:%s:%s" % (node.name, node.ip, node.ssh_port)
output = check_output(
['etcdctl', 'ls', '/lain/nodes/nodes'], stderr=STDOUT)
if len(output.splitlines()) == 1:
error("%s is the last node of lain, can not be removed" %
output.splitlines()[0].split('/')[-1])
return
check_output(['etcdctl', 'set', '/lain/nodes/removing/%s' %
key, ""], stderr=STDOUT)
try:
assert_etcd_member(node.name) # check if the node is a etcd member
info("Remove the lain node %s" % node.name)
if not yes_or_no("Are you sure?", default='no', color=_yellow):
raise(RemoveException("Action was canceled"))
# restart a new swarm manager if a swarm mansger on this node
drift_swarm_manager(playbooks, node, target)
remove_node_containers(node.name)
if run_removenode_ansible(playbooks):
error("run remove node ansible failed")
return
# remove maintain for node
self.maintain(node.name, True)
check_call(['etcdctl', 'rm', '/lain/nodes/nodes/%s' %
key], stderr=STDOUT) # remove the node from etcd
except RemoveException as e:
error(str(e))
finally:
check_output(
['etcdctl', 'rm', '/lain/nodes/removing/%s' % key], stderr=STDOUT)
return
@classmethod
@arg('-p', '--playbooks', required=True)
@arg('nodes', nargs='+')
def clean(self, nodes, playbooks=""):
"""
clean node will clean lain node, remove some useless images,
each container on the node will retain at most 3 latest images on the node.
"""
for node in nodes:
node_info = NodeInfo(node)
key = "%s:%s:%s" % (
node_info.name, node_info.ip, node_info.ssh_port)
check_output(['etcdctl', 'set', '/lain/nodes/clean/%s' % key, node_info.ip],
stderr=STDOUT)
run_cleannode_ansible(playbooks)
check_output(['etcdctl', 'rm', '/lain/nodes/clean/%s' % key])
@classmethod
@arg('nodename')
@arg('-r', '--remove', help="whether removing deployment constraint on the specified node")
def maintain(self, nodename, remove=False):
"""
maintain node will disable or enable deployment onto the maintained node.
"""
node = NodeInfo(nodename)
base_url = "http://deployd.lain:9003/api/constraints"
operator = "Remove" if remove else "Add"
if not remove:
url = base_url + "?type=node&value=%s" % node.name
info("PATCH %s" % url)
resp = requests.patch(url)
else:
url = base_url + "?type=node&value=%s" % node.name
info("DELETE %s" % url)
resp = requests.delete(url)
if resp.status_code >= 300:
error("%s constraint on node %s fail: %s" %
(operator, node.name, resp.text))
else:
info("%s constraint on node %s success." % (operator, node.name))
@classmethod
def health(cls):
health = NodeHealth()
health.run()
@classmethod
@arg('nodes', nargs='+')
@arg('-c', '--change-type', choices=['add', 'delete'], required=True)
@arg('-l', '--labels', nargs='+', type=str, required=True,
help='the labels to add, for example: k1=v1 k2=v2')
@arg('-p', '--playbooks', required=True)
def change_labels(self, nodes, change_type="", labels=[], playbooks=""):
"""
change labels of nodes, add/delete operations are supported
"""
normlized_labels = {}
for x in labels:
ys = x.split('=')
if len(ys) != 2 or ys[0].strip() == '' or ys[1].strip() == '':
error('{} is not $k=$v format'.format(x))
sys.exit(1)
key, value = ys[0].strip(), ys[1].strip()
if key in normlized_labels:
error('duplicate key {}'.format(key))
sys.exit(1)
normlized_labels[key] = value
diff_labels = ['{}={}'.format(k, v)
for k, v in normlized_labels.items()]
node_infos = {}
try:
for node in nodes:
node_info = NodeInfo(node)
if node_info.ip in node_infos:
error('duplicate node {}:{}'.
format(node_info.name, node_info.ip))
sys.exit(1)
node_infos[node_info.ip] = node_info
except Exception as e:
error("Exception: {}.".format(e))
sys.exit(1)
try:
for _, node_info in node_infos.items():
key = "{}:{}:{}".format(node_info.name, node_info.ip,
node_info.ssh_port)
check_output(['etcdctl', 'set',
'/lain/nodes/changing-labels/{}'.format(key),
node_info.ip], stderr=STDOUT)
run_change_labels_ansible(playbooks, change_type, diff_labels)
except Exception as e:
error("Exception: {}.".format(e))
sys.exit(1)
finally:
check_output(['etcdctl', 'rm', '--recursive',
'/lain/nodes/changing-labels'], stderr=STDOUT)
def run_addnode_ansible(args):
envs = {
'target': 'new_nodes',
'allow_restart_docker': 'yes',
'adding_node_mode': 'yes' # this ensures the removal of existing key.json
}
if args.docker_device:
envs['docker_device'] = args.docker_device
if args.labels:
envs['node_labels'] = args.labels
return run_ansible_cmd(args.playbooks, envs, file_name='site.yaml')
def run_change_labels_ansible(playbooks_path, change_type, diff_labels):
envs = {
'target': 'changing_labels_nodes',
'role': 'node-change-labels',
'change_type': change_type,
'diff_labels': diff_labels
}
return run_ansible_cmd(playbooks_path, envs)
def run_cleannode_ansible(playbooks_path):
envs = {
'target': 'clean_nodes',
'role': 'node-clean'
}
return run_ansible_cmd(playbooks_path, envs)
def run_removenode_ansible(playbooks_path):
envs = {
'target': 'removing_nodes',
'role': 'remove-node',
}
return run_ansible_cmd(playbooks_path, envs)
def drift_swarm_manager(playbooks_path, rm_node, target):
is_swarm_manager, key = False, "%s:%s:%s" % (
rm_node.name, rm_node.ip, rm_node.ssh_port)
output = check_output(['etcdctl', 'ls', '/lain/nodes/swarm-managers'])
for line in output.splitlines():
if line.split('/')[-1] == key:
is_swarm_manager = True
break
if not is_swarm_manager:
return
if not target:
raise(RemoveException("%s is a swarm manager node,"
"target required to drift the swarm manager,"
"run `remove-node clear -t[--target] ...`" % rm_node.name))
check_call(['etcdctl', 'rm', '/lain/nodes/swarm-managers/%s' % key])
check_call(['etcdctl', 'set', '/lain/nodes/swarm-managers/%s:%s' %
(target.name, target.ssh_port), ""])
envs = dict()
envs['target'] = 'nodes'
envs['role'] = 'swarm'
info('The removed node is a swarm manager, now start a swarm manager on another node.')
run_ansible_cmd(playbooks_path, envs)
def remove_node_containers(nodename):
url = "http://deployd.lain:9003/api/nodes?node=%s" % nodename
# Call deployd api
info("DELETE %s" % url)
resp = requests.delete(url)
if resp.status_code >= 300:
error("Deployd remove node api response a error, %s." % resp.text)
# waiting for deployd complete
print(">>>(need some minutes)Waiting for deployd drift %s's containers" % nodename)
while True:
try:
output = Popen(['docker', '-H', 'swarm.lain:2376', 'ps',
'-a', '-f', 'node=%s' % nodename, '-f',
'label=com.docker.swarm.id'], stdout=PIPE)
exclude_portals = check_output(
['grep', '-v', '.portal.portal'], stdin=output.stdout)
containers = len(exclude_portals.splitlines()) - 1
if containers > 0:
warn("%d containers in node %s need to drift" %
(containers, nodename))
time.sleep(3)
else:
info("all containers in node %s drifted successed" % nodename)
break
except Exception as e:
info('check containers info with err:%s' % e)
time.sleep(3)
def assert_etcd_member(rm_node):
output = check_output(['etcdctl', 'member', 'list'])
node_name = rm_node.split(':')[0]
for line in output.splitlines():
if node_name == line.split()[1].split('=')[1]:
raise RemoveException("%s is a etcd member, you should remove it from "
"etcd cluster before remove it from lain" % rm_node)
def copy_public_key(ip):
cmd = ['sudo', 'ssh-copy-id', '-i', '/root/.ssh/lain.pub']
cmd += ['root@%s' % ip]
info('run cmd: %s', ' '.join(cmd))
check_output(cmd)
| false | true |
f7fd517df867da1766ccaf1ad8a6cc764b67f22e | 1,305 | py | Python | Advance_Python/Chapter 13. asyncio/5. thread_asyncio.py | pyforspider/LearningLog | ac5988d7fbb0d07d6e7485f9050250af5bcba089 | [
"MIT"
] | null | null | null | Advance_Python/Chapter 13. asyncio/5. thread_asyncio.py | pyforspider/LearningLog | ac5988d7fbb0d07d6e7485f9050250af5bcba089 | [
"MIT"
] | 18 | 2020-02-12T01:18:12.000Z | 2022-03-12T00:42:15.000Z | Advance_Python/Chapter 13. asyncio/5. thread_asyncio.py | pyforspider/LearningLog | ac5988d7fbb0d07d6e7485f9050250af5bcba089 | [
"MIT"
] | null | null | null | # 使用多线程: 在协程中集成阻塞io
import asyncio
import socket
import time
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import urlparse
def get_url(url):
url = urlparse(url)
path = url.path
host = url.netloc
if path == "":
path = "/"
# client 为固定写法
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, 80))
# http 的 send() 方法固定写法:
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode("utf8"))
data = b""
while True:
d = client.recv(1024)
if d:
data += d
else:
break
client.close()
print(data.decode("utf-8").split("\r\n\r\n")[1])
return data.decode("utf-8").split("\r\n\r\n")[1]
if __name__ == '__main__':
start_time = time.time()
loop = asyncio.get_event_loop()
# 声明一个线程池, 可以加入限制线程数
executor = ThreadPoolExecutor()
# loop.run_in_executor(executor, func_name, func_args)
# loop.run_in_executor(executor, get_url, "http://www.imooc.com")
tasks = []
for i in range(2, 20):
url = "http://shop.projectsedu.com/goods/{}/".format(i)
# loop.run_in_executor(executor, func_name, func_args)返回的是 task 对象
task = loop.run_in_executor(executor, get_url, url)
tasks.append(task)
loop.run_until_complete(asyncio.wait(tasks))
print("last time :{}".format(time.time()-start_time))
| 25.096154 | 104 | 0.696552 |
import asyncio
import socket
import time
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import urlparse
def get_url(url):
url = urlparse(url)
path = url.path
host = url.netloc
if path == "":
path = "/"
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, 80))
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode("utf8"))
data = b""
while True:
d = client.recv(1024)
if d:
data += d
else:
break
client.close()
print(data.decode("utf-8").split("\r\n\r\n")[1])
return data.decode("utf-8").split("\r\n\r\n")[1]
if __name__ == '__main__':
start_time = time.time()
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor()
tasks = []
for i in range(2, 20):
url = "http://shop.projectsedu.com/goods/{}/".format(i)
task = loop.run_in_executor(executor, get_url, url)
tasks.append(task)
loop.run_until_complete(asyncio.wait(tasks))
print("last time :{}".format(time.time()-start_time))
| true | true |
f7fd5247a8ab3f3bdbbbda1cef1fc41df8863351 | 556 | py | Python | day-06/part-2/jon.py | lypnol/adventofcode-2021 | 8ba277d698e8c59ca9cd554acc135473f5964b87 | [
"MIT"
] | 6 | 2021-11-29T15:32:27.000Z | 2021-12-10T12:24:26.000Z | day-06/part-2/jon.py | lypnol/adventofcode-2021 | 8ba277d698e8c59ca9cd554acc135473f5964b87 | [
"MIT"
] | 9 | 2021-11-29T15:38:04.000Z | 2021-12-13T14:54:16.000Z | day-06/part-2/jon.py | lypnol/adventofcode-2021 | 8ba277d698e8c59ca9cd554acc135473f5964b87 | [
"MIT"
] | 3 | 2021-12-02T19:11:44.000Z | 2021-12-22T20:52:47.000Z | from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
l = [0]*9
for x in s.strip().split(","):
l[int(x)] += 1
shift = 0
for _ in range(256):
l[shift-2] += l[shift]
shift = (shift+1) % 9
return sum(l)
def test_jon():
"""
Run `python -m pytest ./day-06/part-2/jon.py` to test the submission.
"""
assert (
JonSubmission().run(
"""
3,4,3,1,2
""".strip()
)
== 26984457539
)
| 17.935484 | 73 | 0.476619 | from tool.runners.python import SubmissionPy
class JonSubmission(SubmissionPy):
def run(self, s):
l = [0]*9
for x in s.strip().split(","):
l[int(x)] += 1
shift = 0
for _ in range(256):
l[shift-2] += l[shift]
shift = (shift+1) % 9
return sum(l)
def test_jon():
assert (
JonSubmission().run(
"""
3,4,3,1,2
""".strip()
)
== 26984457539
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.