code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
""" TODO: What kind of syntax is handled in the Preview "language" type? Raw? """ import keyword import wx import wx.stc as stc if wx.Platform == '__WXMSW__': faces = { 'times': 'Times New Roman', 'mono' : 'Courier New', 'helv' : 'Arial', 'other': 'Comic Sans MS', 'size' : 10, 'size2': 8, } elif wx.Platform == '__WXMAC__': faces = { 'times': 'Times New Roman', 'mono' : 'Monaco', 'helv' : 'Arial', 'other': 'Comic Sans MS', 'size' : 12, 'size2': 10, } else: faces = { 'times': 'Times', 'mono' : 'Courier', 'helv' : 'Helvetica', 'other': 'new century schoolbook', 'size' : 12, 'size2': 10, } class StyledTextCtrl(stc.StyledTextCtrl): """docstring for StyledTextCtrl""" fold_symbols = 2 def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0): stc.StyledTextCtrl.__init__(self, parent, ID, pos, size, style) def Lexer(self, lexer): # Non-namespace method if lexer == 'python': self.__Python__() elif lexer == 'C++': self.__Cpp__() elif lexer == 'XRC': self.__XRC__() elif lexer == 'lisp': self.__Lisp__() elif lexer == 'perl': self.__Perl__() elif lexer == 'preview': # Raw code? self.__Preview__() def __Python__(self): self.SetLexer(stc.STC_LEX_PYTHON) self.CmdKeyAssign(ord('B'), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMIN) self.CmdKeyAssign(ord('N'), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMOUT) self.SetLexer(stc.STC_LEX_PYTHON) self.SetKeyWords(0, " ".join(keyword.kwlist)) self.SetProperty("fold", "1") self.SetProperty("tab.timmy.whinge.level", "1") self.SetMargins(0,0) self.SetViewWhiteSpace(False) # self.SetBufferedDraw(False) # self.SetViewEOL(True) # self.SetEOLMode(stc.STC_EOL_CRLF) # self.SetUseAntiAliasing(True) self.SetEdgeMode(stc.STC_EDGE_BACKGROUND) self.SetEdgeColumn(78) # Setup a margin to hold fold markers # self.SetFoldFlags(16) ### WHAT IS THIS VALUE? WHAT ARE THE OTHER FLAGS? DOES IT MATTER? self.SetMarginType(1, stc.STC_MARGIN_NUMBER) self.SetMarginMask(1, 0) self.SetMarginWidth(1, 20) self.SetMarginSensitive(2, True) if self.fold_symbols == 0: # Arrow pointing right for contracted folders, arrow pointing down for expanded self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_ARROWDOWN, "black", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_ARROW, "black", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_EMPTY, "black", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_EMPTY, "black", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_EMPTY, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_EMPTY, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_EMPTY, "white", "black") elif self.fold_symbols == 1: # Plus for contracted folders, minus for expanded self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_MINUS, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_PLUS, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_EMPTY, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_EMPTY, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_EMPTY, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_EMPTY, "white", "black") self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_EMPTY, "white", "black") elif self.fold_symbols == 2: # Like a flattened tree control using circular headers and curved joins self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_CIRCLEMINUS, "white", "#404040") self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_CIRCLEPLUS, "white", "#404040") self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "#404040") self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNERCURVE, "white", "#404040") self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_CIRCLEPLUSCONNECTED, "white", "#404040") self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_CIRCLEMINUSCONNECTED, "white", "#404040") self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNERCURVE, "white", "#404040") elif self.fold_symbols == 3: # Like a flattened tree control using square headers self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPEN, stc.STC_MARK_BOXMINUS, "white", "#808080") self.MarkerDefine(stc.STC_MARKNUM_FOLDER, stc.STC_MARK_BOXPLUS, "white", "#808080") self.MarkerDefine(stc.STC_MARKNUM_FOLDERSUB, stc.STC_MARK_VLINE, "white", "#808080") self.MarkerDefine(stc.STC_MARKNUM_FOLDERTAIL, stc.STC_MARK_LCORNER, "white", "#808080") self.MarkerDefine(stc.STC_MARKNUM_FOLDEREND, stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080") self.MarkerDefine(stc.STC_MARKNUM_FOLDEROPENMID, stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080") self.MarkerDefine(stc.STC_MARKNUM_FOLDERMIDTAIL, stc.STC_MARK_TCORNER, "white", "#808080") #self.Bind(stc.EVT_STC_UPDATEUI, self.OnUpdateUI) #self.Bind(stc.EVT_STC_MARGINCLICK, self.OnMarginClick) #self.Bind(wx.EVT_KEY_DOWN, self.OnKeyPressed) # Make some styles, The lexer defines what each style is used for, we # just have to define what each style looks like. This set is adapted from # Scintilla sample property files. # Global default styles for all languages self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(helv)s,size:%(size)d" % faces) self.StyleClearAll() # Reset all to be like the default # Global default styles for all languages self.StyleSetSpec(stc.STC_STYLE_DEFAULT, "face:%(helv)s,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_STYLE_LINENUMBER, "back:#C0C0C0,face:%(helv)s,size:%(size2)d" % faces) self.StyleSetSpec(stc.STC_STYLE_CONTROLCHAR, "face:%(other)s" % faces) self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, "fore:#FFFFFF,back:#0000FF,bold") self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, "fore:#000000,back:#FF0000,bold") # Python styles # Default self.StyleSetSpec(stc.STC_P_DEFAULT, "fore:#000000,face:%(helv)s,size:%(size)d" % faces) # Comments self.StyleSetSpec(stc.STC_P_COMMENTLINE, "fore:#007F00,face:%(other)s,size:%(size)d" % faces) # Number self.StyleSetSpec(stc.STC_P_NUMBER, "fore:#007F7F,size:%(size)d" % faces) # String self.StyleSetSpec(stc.STC_P_STRING, "fore:#7F007F,face:%(helv)s,size:%(size)d" % faces) # Single quoted string self.StyleSetSpec(stc.STC_P_CHARACTER, "fore:#7F007F,face:%(helv)s,size:%(size)d" % faces) # Keyword self.StyleSetSpec(stc.STC_P_WORD, "fore:#00007F,bold,size:%(size)d" % faces) # Triple quotes self.StyleSetSpec(stc.STC_P_TRIPLE, "fore:#7F0000,size:%(size)d" % faces) # Triple double quotes self.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, "fore:#7F0000,size:%(size)d" % faces) # Class name definition self.StyleSetSpec(stc.STC_P_CLASSNAME, "fore:#0000FF,bold,underline,size:%(size)d" % faces) # Function or method name definition self.StyleSetSpec(stc.STC_P_DEFNAME, "fore:#007F7F,bold,size:%(size)d" % faces) # Operators self.StyleSetSpec(stc.STC_P_OPERATOR, "bold,size:%(size)d" % faces) # Identifiers self.StyleSetSpec(stc.STC_P_IDENTIFIER, "fore:#000000,face:%(helv)s,size:%(size)d" % faces) # Comment-blocks self.StyleSetSpec(stc.STC_P_COMMENTBLOCK, "fore:#7F7F7F,size:%(size)d" % faces) # End of line where string is not closed self.StyleSetSpec(stc.STC_P_STRINGEOL, "fore:#000000,face:%(mono)s,back:#E0C0E0,eol,size:%(size)d" % faces) self.SetCaretForeground("BLUE") def __Cpp__(self): self.StyleClearAll() self.SetLexer(stc.STC_LEX_CPP) self.StyleSetSpec(stc.STC_C_COMMENT, 'fore:#408060,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_COMMENTLINE, 'fore:#408060,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_COMMENTDOC, 'fore:#408060,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_NUMBER, 'fore:#0076AE,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_WORD, 'bold,fore:#800056,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_STRING, 'fore:#2a00ff,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_PREPROCESSOR, 'bold,fore:#800056,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_OPERATOR, 'bold,size:%(size)d' % faces) self.StyleSetSpec(stc.STC_C_STRINGEOL, 'back:#FFD5FF,size:%(size)d' % faces) def __XRC__(self): pass def __Lisp__(self): self.StyleClearAll() self.SetLexer(stc.STC_LEX_LISP) self.SetKeyWords(0, "abort abs access acons acos acosh add-method adjoin " "adjust-array adjustable-array-p alist allocate-instance " "alpha-char-p alphanumericp and append apply applyhook apropos " "apropos-list aref arithmetic-error arithmetic-error-operands " "arithmetic-error-operation array array-dimension " "array-dimension-limit array-dimensions array-displacement " "array-element-type array-has-fill-pointer-p " "array-in-bounds-p array-rank array-rank-limit " "array-row-major-index array-total-size " "array-total-size-limit arrayp ash asin asinh assert assoc " "assoc-if assoc-if-not atan atanh atom backquote baktrace " "base-char base-string bignum bignums bit bit-and bit-andc1 " "bit-andc2 bit-eqv bit-ior bit-nand bit-nor bit-not bit-orc1 " "bit-orc2 bit-vector bit-vector-p bit-xor block boole boole-1 " "boole-2 boole-and boole-andc1 boole-andc2 boole-c1 boole-c2 " "boole-clr boole-eqv boole-ior boole-nand boole-nor boole-orc1 " "boole-orc2 boole-set boole-xor boolean both-case-p boundp " "break broadcast-stream broadcast-stream-streams " "built-in-class butlast byte byte-position byte-size caaaar " "caaadr caaar caadar caaddr caadr caar cadaar cadadr cadar " "caddar cadddr caddr cadr call-arguments-limit call-method " "call-next-method capitalize car case catch ccase cdaaar " "cdaadr cdaar cdadar cdaddr cdadr cdar cddaar cddadr cddar " "cdddar cddddr cdddr cddr cdr ceil-error ceil-error-name " "ceiling cerror change-class char char-bit char-bits " "char-bits-limit char-code char-code-limit char-control-bit " "char-downcase char-equal char-font char-font-limit " "char-greaterp char-hyper-bit char-int char-lessp " "char-meta-bit char-name char-not-equal char-not-greaterp " "char-not-lessp char-super-bit char-upcase char/= char<= char= " "char>= character characterp check-type cirhash cis class " "class-name class-of clear-input clear-output close code-char " "coerce commonp compilation-speed compile compile-file " "compile-file-pathname compiled-function compiled-function-p " "compiler-let compiler-macro compiler-macro-function " "complement complex complexp compute-applicable-methods " "compute-restarts concatenate concatenated-stream " "concatenated-stream-streams cond condition conjugate cons " "consp constantly constantp continue control-error copy " "copy-list copy-pprint-dispatch copy-readtable copy-seq " "copy-structure copy-symbol copy-tree cos cosh count count-if " "count-if-not ctypecase debug decf declaim declaration declare " "decode-float decode-universal-time defclass defconstant " "defgeneric define-compiler-macro define-condition " "define-method-combination define-modify-macro " "define-setf-expander define-setf-method define-symbol-macro " "defmacro defmethod defpackage defparameter defsetf defstruct " "deftype defun defvar delete delete-duplicates delete-file " "delete-if delete-if-not delete-package denominator " "deposite-field describe describe-object destructuring-bind " "digit-char digit-char-p directory directory-namestring " "disassemble division-by-zero do do* do-all-symbols " "do-external-symbols do-symbols dolist dotimes double-float " "double-float-epsilon double-float-negative-epsilion dpb " "dribble dynamic-extent ecase echo-stream " "echo-stream-input-stream echo-stream-output-stream ed eigth " "elt encode-universal-time end-of-file endp enough-namestring " "ensure-directories-exist ensure-generic-function eq eql equal " "equalp error errset etypecase eval eval-when evalhook evenp " "every exp export expt extend-char fboundp fceiling " "fdefinition fflor fifth file-author file-error " "file-error-pathname file-length file-namestring file-position " "file-stream file-string-length file-write-date fill " "fill-pointer find find-all-symbols find-class find-if " "find-if-not find-method find-package find-restart find-symbol " "finish-output first fixnum flet float float-digits " "float-precision float-radix float-sign floating-point-inexact " "floating-point-invalid-operation floating-point-underflow " "floatp floor fmakunbound force-output format formatter fourth " "fresh-line fround ftruncate ftype funcall function " "function-keywords function-lambda-expression functionp gbitp " "gcd generic-function gensym gentemp get get-decoded-time " "get-dispatched-macro-character get-internal-real-time " "get-internal-run-time get-macro-character " "get-output-stream-string get-properties get-setf-expansion " "get-setf-method get-universial-time getf gethash go " "graphic-char-p handler-bind handler-case hash hash-table " "hash-table-count hash-table-p hash-table-rehash-size " "hash-table-rehash-threshold hash-table-size hash-table-test " "host-namestring identity if if-exists ignorable ignore " "ignore-errors imagpart import in-package incf " "initialize-instance inline input-stream-p inspect int-char " "integer integer-decode-float integer-length integerp " "interactive-stream-p intern internal-time-units-per-second " "intersection invalid-method-error invoke-debugger " "invoke-restart invoke-restart-interactively isqrt keyword " "keywordp l labels lambda lambda-list-keywords " "lambda-parameters-limit last lcm ldb ldb-test ldiff " "least-negative-double-float least-negative-long-float " "least-negative-normalized-double-float " "least-negative-normalized-long-float " "least-negative-normalized-short-font " "least-negative-normalized-single-font " "least-negative-short-font least-negative-single-font " "least-positive-double-float least-positive-long-float " "least-positive-normalized-double-float " "least-positive-normalized-long-float " "least-positive-normalized-short-float " "least-positive-normalized-single-float " "least-positive-short-float least-positive-single-float length " "let let* lisp lisp-implementation-type " "lisp-implementation-version list list* " "list-all-packages list-lenght listen listp load " "load-logical-pathname-translation load-time-value locally " "log logand logandc1 logandc2 logbitp logcount logeqv " "logical-pathname logical-pathname-translations logior lognand " "lognor lognot logorc1 logorc2 logtest logxor long-float " "long-float-epsilon long-float-negative-epsilon long-site-name " "loop loop-finish lower-case-p machine-instance machine-type " "machine-version macro-function macroexpand macroexpand-1 " "macroexpand-l macrolet make make-array make-broadcast-stream " "make-char make-concatenated-stream make-condition " "make-dispatch-macro-character make-echo-stream " "make-hash-table make-instance make-instances-obsolete " "make-list make-load-form make-load-form-saving-slots " "make-method make-package make-pathname make-random-state " "make-sequence make-string make-string-input-stream " "make-string-output-stream make-symbol make-synonym-stream " "make-two-way-stream makunbound map map-into mapc mapcan " "mapcar mapcon maphash mapl maplist mask-field max member " "member-if member-if-not merge merge-pathname merge-pathnames " "method method-combination method-combination-error " "method-qualifiers min minusp mismatch mod " "most-negative-double-float most-negative-fixnum " "most-negative-long-float most-negative-short-float " "most-negative-single-float most-positive-fixnum " "most-positive-long-float most-positive-short-float " "most-positive-single-float muffle-warning " "multiple-value-bind multiple-value-call multiple-value-limit " "multiple-value-list multiple-value-prog1 multiple-value-seteq " "multiple-value-setq name name-char namestring nbutlast nconc " "next-method-p nil nintersection ninth no-applicable-method " "no-next-method not notany notevery notinline nreconc nreverse " "nset-difference nset-exclusive-or nstring nstring-capitalize " "nstring-downcase nstring-upcase nstubst-if-not nsublis nsubst " "nsubst-if nth nth-value nthcdr null number numberp numerator " "nunion oddp open open-stream-p optimize or otherwise " "output-stream-p package package-error package-error-package " "package-name package-nicknames package-shadowing-symbols " "package-use-list package-used-by-list packagep pairlis " "parse-error parse-integer parse-namestring pathname " "pathname-device pathname-directory pathname-host " "pathname-match-p pathname-name pathname-type " "pathname-version pathnamep peek-char phase pi plist plusp pop " "position position-if position-if-not pprint pprint-dispatch " "pprint-exit-if-list-exhausted pprint-fill pprint-indent " "pprint-linear pprint-logical-block pprint-newline pprint-pop " "pprint-tab pprint-tabular prin1 prin1-to-string princ " "princ-to-string print print-not-readable " "print-not-readable-object print-object probe-file proclaim " "prog prog* prog1 prog2 progn program-error progv provide " "psetf psetq push pushnew putprop quote random random-state " "random-state-p rassoc rassoc-if rassoc-if-not ration rational " "rationalize rationalp read read-byte read-car-no-hang " "read-char read-delimited-list read-eval-print " "read-from-string read-line read-preserving-whitespace " "read-squence reader-error readtable readtable-case readtablep " "real realp realpart reduce reinitialize-instance rem remf " "remhash remove remove-duplicates remove-if " "remove-if-not remove-method remprop rename-file " "rename-package replace require rest restart restart-bind " "restart-case restart-name return return-from revappend " "reverse room rotatef round row-major-aref rplaca rplacd " "safety satisfies sbit scale-float schar search second " "sequence serious-condition set set-char-bit set-difference " "set-dispatched-macro-character set-exclusive-or " "set-macro-character set-pprint-dispatch " "set-syntax-from-char setf setq seventh shadow " "shadowing-import shared-initialize shiftf short-float " "short-float-epsilon short-float-negative-epsilon " "short-site-name signal signed-byte signum simple-array " "simple-base-string simple-bit-vector- simple-bit-vector-p " "simple-condition simple-condition-format-arguments " "simple-condition-format-control simple-error simple-string " "simple-string-p simple-type-error simple-vector " "simple-vector-p simple-warning sin single-float " "single-float-epsilon single-float-negative-epsilon sinh " "sixth sleep slot-boundp slot-exists-p slot-makunbound " "slot-missing slot-unbound slot-value software-type " "software-version some sort space special special-form-p " "special-operator-p speed sqrt stable-sort standard " "standard-char standard-char-p standard-class " "standard-generic-function standard-method standard-object " "step storage-condition store-value stream stream-element-type " "stream-error stream-error-stream stream-external-format " "streamp streamup string string-capitalize string-char " "string-char-p string-downcase string-equal string-greaterp " "string-left-trim string-lessp string-not-equal " "string-not-greaterp string-not-lessp string-right-strim " "string-right-trim string-stream string-trim string-upcase " "string/= string< string<= string= string> string>= stringp " "structure structure-class structure-object style-warning " "sublim sublis subseq subsetp subst subst-if subst-if-not " "substitute substitute-if substitute-if-not subtypep svref " "sxhash symbol symbol-function symbol-macrolet symbol-name " "symbol-package symbol-plist symbol-value symbolp " "synonym-stream synonym-stream-symbol sys system t tagbody " "tailp tan tanh tenth terpri the third throw time trace " "translate-logical-pathname translate-pathname tree-equal " "truename truncase truncate two-way-stream " "two-way-stream-input-stream two-way-stream-output-stream " "type type-error type-error-datnum type-error-expected-type " "type-of typecase typep unbound-slot unbound-slot-instance " "unbound-variable undefined-function unexport unintern union " "unless unread unread-char unsigned-byte untrace unuse-package " "unwind-protect update-instance-for-different-class " "update-instance-for-redefined-class " "upgraded-array-element-type upgraded-complex-part-type " "upper-case-p use-package use-value user user-homedir-pathname " "value value-list values vector vector-pop vector-push " "vector-push-extend vectorp warn warning when " "wild-pathname-p with-accessors with-compilation-unit " "with-condition-restarts with-hash-table-iterator " "with-input-from-string with-open-file with-open-stream " "with-output-to-string with-package-iterator " "with-simple-restart with-slots with-standard-io-syntax write " "write-byte write-char write-line write-sequence" ) self.SetKeyWords(1, ":abort :adjustable :append :array :base :case :circle " ":conc-name :constructor :copier :count :create :default " ":device :directory :displaced-index-offset :displaced-to " ":element-type :end :end1 :end2 :error :escape :external " ":from-end :gensym :host :include :if-does-not-exist " ":if-exists :index :inherited :internal :initial-contents " ":initial-element :initial-offset :initial-value :input " ":io :junk-allowed :key :length :level :name :named " ":new-version :nicknames :output :ouput=file :overwrite " ":predicate :preserve-whitespace :pretty :print " ":print-function :probe :radix :read-only :rehash-size " ":rehash-threshold :rename :size :rename-and-delete :start " ":start1 :start2 :stream :supersede :test :test-not :use " ":verbose :version") self.SetLexer(stc.STC_LEX_LISP) self.StyleSetSpec(stc.STC_LISP_COMMENT, "fore:#007F00,face:%(other)s,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_DEFAULT, "fore:#808080,face:%(helv)s,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_IDENTIFIER, "fore:#007F7F,bold,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_KEYWORD, "fore:#7F007F,face:%(helv)s,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_KEYWORD_KW, "fore:#7F007F,face:%(helv)s,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_MULTI_COMMENT, "fore:#7F7F7F,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_NUMBER, "fore:#007F7F,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_OPERATOR, "bold,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_SPECIAL, "fore:#000000,face:%(helv)s,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_STRING, "fore:#7F0000,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_STRINGEOL, "fore:#0000FF,bold,underline,size:%(size)d" % faces) self.StyleSetSpec(stc.STC_LISP_SYMBOL, "fore:#00007F,bold,size:%(size)d" % faces) def __Perl__(self): self.SetLexer(stc.STC_LEX_PERL) pass def __Preview__(self): pass
[ "wx.stc.StyledTextCtrl.__init__" ]
[((1059, 1122), 'wx.stc.StyledTextCtrl.__init__', 'stc.StyledTextCtrl.__init__', (['self', 'parent', 'ID', 'pos', 'size', 'style'], {}), '(self, parent, ID, pos, size, style)\n', (1086, 1122), True, 'import wx.stc as stc\n')]
import alignments import re import read import binaryIO import math import os import preprocess import time class Compressor: aligned = None # 0 - zlib # 1 - lzma # 2 - bz2 compressMethod = 0 covSize = 0 totalSize = 0 def __init__(self, frag_len_cutoff): if self.compressMethod == 0: self.zlib = __import__('zlib') elif self.compressMethod == 1: self.lzma = __import__('lzma') elif self.compressMethod == 2: self.bz2 = __import__('bz2') if frag_len_cutoff: print('Set fragment length cutoff to %d' % frag_len_cutoff) self.frag_len_cutoff = frag_len_cutoff def compress(self, samFilename, compressedFilename, gtf, min_filename, frag_len_z_cutoff, split_diff_strands, split_discordant): ''' Compresses the alignments to 2 files, one for unspliced and one for spliced file_prefix: Prefix for all output file names ''' self.p = preprocess.Preprocessor(samFilename, frag_len_z_cutoff, split_diff_strands) if not self.frag_len_cutoff: self.frag_len_cutoff = self.p.frag_len_cutoff print('Using fragment length cutoff of ' + str(self.frag_len_cutoff)) if split_diff_strands: print('Splitting mates on different strands') else: print('Not splitting mates on different strands') if split_discordant: print('Splitting discordant') else: print('Not splitting discordant') # Reads on different strands that should be unpaired self.diff_strand_unpaired = self.p.unpaired del self.p # Read header header = '' with open(samFilename, 'r') as f: for line in f: if line[0] == '@': header += line else: break self.chromosomes = self.parseSAMHeader(header) self.aligned = alignments.Alignments(self.chromosomes, self.frag_len_cutoff, split_discordant) if gtf: self.aligned.gtf_exons = self.parseGTF(gtf, self.aligned.chromOffsets) self.compressByBundle(samFilename, compressedFilename, min_filename) #print('%d unmatched' % self.aligned.numUnmatched) print('Approximately %d / %d = %f%% of compressed file is coverage' % (self.covSize, self.totalSize, 100.0*float(self.covSize)/float(self.totalSize))) print('Finished compressing') def compressByBundle(self, input_name, compressed_name, intermediate_name=None): ''' Read a sorted SAM file and compress in segments determined by clusters of reads :param filename: :return: ''' # If coverage is 0 for at least this many bases end of a potential gene overlapRadius = 50 spliced_index = [] bundles = [] first = True bundle_id = 0 read_id = 0 diff_strand_unpaired_id = 0 num_diff_strand_unpaired = len(self.diff_strand_unpaired) firstR = None with open(input_name, 'r') as filehandle: id = 0 start_id = 0 for line in filehandle: # Check if header line if line[0] == '@': continue row = line.strip().split('\t') if row[2] == '*': # HISAT includes unmapped reads at the end of the file; we just skip them continue if not row[2] in self.chromosomes[0]: print('Error! Chromosome ' + str(row[2]) + ' not found!') exit() # Starting position of this read start = self.aligned.chromOffsets[row[2]] + int(row[3]) if self.aligned.gene_bounds and start > (self.aligned.gene_bounds[-1] + overlapRadius): # Compress most recent bundle self.aligned.finalizeExons() self.aligned.finalizeUnmatched() self.aligned.finalize_cross_bundle_reads() #if self.aligned.gene_bounds[0] < 100480943 and self.aligned.gene_bounds[1] > 100478955: # print(bundle_id) # print(self.aligned.gene_bounds) # print(self.aligned.exons) # print(self.aligned.gene_bounds[0] - self.aligned.chromOffsets['X']) # print(self.aligned.gene_bounds[1] - self.aligned.chromOffsets['X']) # exit() bundle_id += 1 start_id = id bundles.append(self.aligned.exons) # Write to intermediate file if intermediate_name: if first: # If it's the first bundle, write the header as well with open(intermediate_name, 'w') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id) else: with open(intermediate_name, 'a') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id) junctions, maxReadLen = self.aligned.computeBuckets() self.sortedJuncs = sorted(junctions.keys()) # Compress bundle to temporary file if first: mode = 'wb' else: mode = 'ab' with open('temp.bin', mode) as f: l = self.compressBundle(junctions, maxReadLen, f) spliced_index.append(l) # Start new bundle self.aligned.resetBundle() self.aligned.exons.add(start) first = False # Process read if row[5] == '*': # HISAT occasionally prints * as the cigar string when it is identical to its mate #print('No cigar string') #print(row[0]) #exit() exons = None else: exons = self.parseCigar(row[5], int(row[3])) # find XS (strand) and NH values strand = None NH = 1 for r in row[11 : len(row)]: if r[0:5] == 'XS:A:' or r[0:5] == 'XS:a:': strand = r[5] elif r[0:3] == 'NH:': NH = int(r[5:]) flags = int(row[1]) if flags & 4: # Read is unmapped continue r = read.Read(row[2], int(row[3]), exons, strand, NH) #r.name = row[0] if row[6] == '*' or (flags & 8): paired = False elif diff_strand_unpaired_id < num_diff_strand_unpaired and id == self.diff_strand_unpaired[diff_strand_unpaired_id]: #if not row[6] == '*': # print('\t'.join(row)) paired = False diff_strand_unpaired_id += 1 else: paired = True r.bundle = bundle_id r.pairOffset = int(row[7]) if row[6] == '=': r.pairChrom = row[2] else: r.pairChrom = row[6] self.aligned.processRead(row[0], r, paired) id += 1 # Compress final cluster self.aligned.finalizeExons() self.aligned.finalizeUnmatched() self.aligned.finalize_cross_bundle_reads() bundle_id += 1 bundles.append(self.aligned.exons) # Write to intermediate file if intermediate_name: if first: # If it's the first bundle, write the header as well with open(intermediate_name, 'w') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id) first = False else: with open(intermediate_name, 'a') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id) junctions, maxReadLen = self.aligned.computeBuckets() self.sortedJuncs = sorted(junctions.keys()) # Compress bundle to temporary file if first: mode = 'wb' else: mode = 'ab' with open('temp.bin', mode) as f: l = self.compressBundle(junctions, maxReadLen, f) spliced_index.append(l) leftovers = 0 for k,v in self.aligned.cross_bundle_reads.items(): #if len(v) > 0: # print(k) # print(v) # exit() leftovers += len(v) print('%d cross-bundle reads unmatched' % leftovers) bundle_lens = [c[-1]-c[0] for c in bundles] print('Minimum bundle length: %d' % min(bundle_lens)) print('Maximum bundle length: %d' % max(bundle_lens)) print('Average bundle length: %d'% (sum(bundle_lens) / len(bundle_lens))) # Write index information and append spliced and unspliced files with open(compressed_name, 'wb') as f: s = binaryIO.writeChroms(self.chromosomes) s += binaryIO.writeClusters(bundles) s += binaryIO.writeList(spliced_index) f.write(s) # Compress bundle-spanning buckets self.compressCrossBundle(self.aligned.cross_bundle_buckets, self.aligned.max_cross_bundle_read_len, bundle_id, f) # Move contents of temporary file to output file with open('temp.bin', 'rb') as f2: f.write(f2.read()) os.remove('temp.bin') def compressBundle(self, junctions, maxReadLen, filehandle): # Determine the number of bytes for read lengths readLenBytes = binaryIO.findNumBytes(maxReadLen) cluster = binaryIO.valToBinary(1, readLenBytes) cluster += binaryIO.writeJunctionsList(self.sortedJuncs, 2) self.totalSize += len(cluster) # TODO: No need for junc_lens? junc_lens = [] junc_string = b'' for j in self.sortedJuncs: #if self.aligned.exons[0] == 100476370 and j == [2, None, 1]: # s, c, t = binaryIO.writeJunction(readLenBytes, junctions[j]) self.covSize += c self.totalSize += t junc_lens.append(len(s)) junc_string += s #cluster += binaryIO.writeList(junc_lens) cluster += junc_string # Write to file start = filehandle.tell() filehandle.write(self.compressString(cluster)) # return length of cluster in file return filehandle.tell() - start def compressCrossBundle(self, cross_bundle_buckets, maxReadLen, num_bundles, filehandle): ''' Compress the bundle-spanning buckets ''' readLenBytes = binaryIO.findNumBytes(maxReadLen) bundleIdBytes = binaryIO.findNumBytes(num_bundles) buckets_sorted = sorted(cross_bundle_buckets.keys()) if len(buckets_sorted) > 0: print('%d cross-bundle buckets' % len(buckets_sorted)) pos = filehandle.tell() chunk_size = 20 num_chunks = math.ceil(len(buckets_sorted) / chunk_size) chunk_lens = [0] * num_chunks index = binaryIO.valToBinary(4, len(buckets_sorted)) index += binaryIO.valToBinary(2, chunk_size) index += binaryIO.valToBinary(1, readLenBytes) index += binaryIO.writeCrossBundleBucketNames(bundleIdBytes, cross_bundle_buckets, buckets_sorted) self.totalSize += len(index) main = b'' chunk = b'' chunk_id = 0 for i in range(len(buckets_sorted)): b = buckets_sorted[i] ch, c, t = binaryIO.writeCrossBundleBucket(readLenBytes, cross_bundle_buckets[b]) chunk += ch self.covSize += c self.totalSize += t if (i+1) % chunk_size == 0: compressed = self.compressString(chunk) chunk_lens[chunk_id] = len(compressed) chunk_id += 1 main += compressed chunk = b'' if len(chunk) > 0: compressed = self.compressString(chunk) chunk_lens[chunk_id] = len(compressed) main += compressed index += binaryIO.writeList(chunk_lens) index = self.compressString(index) length = len(index) numBytes = binaryIO.findNumBytes(length) binaryIO.writeVal(filehandle, 1, numBytes) binaryIO.writeVal(filehandle, numBytes, length) filehandle.write(index) filehandle.write(main) print('Compressed size: %d' % (filehandle.tell() - pos)) else: binaryIO.writeVal(filehandle, 1, 1) binaryIO.writeVal(filehandle, 1, 0) def parseCigar(self, cigar, offset): ''' Parse the cigar string starting at the given index of the genome Returns a list of offsets for each exonic region of the read [(start1, end1), (start2, end2), ...] ''' exons = [] newExon = True # Parse cigar string match = re.search("\D", cigar) while match: index = match.start() length = int(''.join(cigar[:index])) if cigar[index] == 'N': # Separates contiguous exons, so set boolean to start a new one newExon = True elif cigar[index] == 'M': # If in the middle of a contiguous exon, append the length to it, otherwise start a new exon if newExon: exons.append([offset, offset+length]) newExon = False else: exons[-1][1] += length elif cigar[index] == 'D': # If in the middle of a contiguous exon, append the deleted length to it if not newExon: exons[-1][1] += length # Skip soft clipping if not cigar[index] == 'S': offset += length cigar = cigar[index+1:] match = re.search("\D", cigar) return exons def parseSAMHeader(self, header): # In the order they appear in the header chromNames = [] chromLens = [] # Dictionary contains chromosome lengths for lookup for line in header.split('\n'): if line[0:3] == '@SQ': row = line.strip().split('\t') chromNames.append(row[1][3:]) chromLens.append(int(row[2][3:])) return [chromNames, chromLens] def parseGTF(self, gtf, chromOffsets): exons = set() with open(gtf, 'r') as f: for line in f: row = line.rstrip().split('\t') if row[2] == 'exon': exons.add(int(row[3]) + chromOffsets[row[0]]) exons.add(int(row[4]) + chromOffsets[row[0]]) return sorted(list(exons)) def compressString(self, s): ''' Use a predefined python library to compress the given string. Return the compressed string ''' if self.compressMethod == 0: return self.zlib.compress(s) elif self.compressMethod == 1: return self.lzma.compress(s) elif self.compressMethod == 2: return self.bz2.compress(s)
[ "re.search", "binaryIO.writeChroms", "binaryIO.writeJunctionsList", "binaryIO.writeCrossBundleBucket", "binaryIO.writeList", "binaryIO.valToBinary", "binaryIO.writeVal", "binaryIO.writeCrossBundleBucketNames", "alignments.Alignments", "preprocess.Preprocessor", "binaryIO.writeClusters", "binar...
[((992, 1067), 'preprocess.Preprocessor', 'preprocess.Preprocessor', (['samFilename', 'frag_len_z_cutoff', 'split_diff_strands'], {}), '(samFilename, frag_len_z_cutoff, split_diff_strands)\n', (1015, 1067), False, 'import preprocess\n'), ((1981, 2060), 'alignments.Alignments', 'alignments.Alignments', (['self.chromosomes', 'self.frag_len_cutoff', 'split_discordant'], {}), '(self.chromosomes, self.frag_len_cutoff, split_discordant)\n', (2002, 2060), False, 'import alignments\n'), ((10235, 10256), 'os.remove', 'os.remove', (['"""temp.bin"""'], {}), "('temp.bin')\n", (10244, 10256), False, 'import os\n'), ((10404, 10437), 'binaryIO.findNumBytes', 'binaryIO.findNumBytes', (['maxReadLen'], {}), '(maxReadLen)\n', (10425, 10437), False, 'import binaryIO\n'), ((10456, 10493), 'binaryIO.valToBinary', 'binaryIO.valToBinary', (['(1)', 'readLenBytes'], {}), '(1, readLenBytes)\n', (10476, 10493), False, 'import binaryIO\n'), ((10513, 10561), 'binaryIO.writeJunctionsList', 'binaryIO.writeJunctionsList', (['self.sortedJuncs', '(2)'], {}), '(self.sortedJuncs, 2)\n', (10540, 10561), False, 'import binaryIO\n'), ((11489, 11522), 'binaryIO.findNumBytes', 'binaryIO.findNumBytes', (['maxReadLen'], {}), '(maxReadLen)\n', (11510, 11522), False, 'import binaryIO\n'), ((11548, 11582), 'binaryIO.findNumBytes', 'binaryIO.findNumBytes', (['num_bundles'], {}), '(num_bundles)\n', (11569, 11582), False, 'import binaryIO\n'), ((13946, 13969), 're.search', 're.search', (['"""\\\\D"""', 'cigar'], {}), "('\\\\D', cigar)\n", (13955, 13969), False, 'import re\n'), ((9746, 9784), 'binaryIO.writeChroms', 'binaryIO.writeChroms', (['self.chromosomes'], {}), '(self.chromosomes)\n', (9766, 9784), False, 'import binaryIO\n'), ((9802, 9833), 'binaryIO.writeClusters', 'binaryIO.writeClusters', (['bundles'], {}), '(bundles)\n', (9824, 9833), False, 'import binaryIO\n'), ((9851, 9884), 'binaryIO.writeList', 'binaryIO.writeList', (['spliced_index'], {}), '(spliced_index)\n', (9869, 9884), False, 'import binaryIO\n'), ((10841, 10891), 'binaryIO.writeJunction', 'binaryIO.writeJunction', (['readLenBytes', 'junctions[j]'], {}), '(readLenBytes, junctions[j])\n', (10863, 10891), False, 'import binaryIO\n'), ((12012, 12047), 'binaryIO.valToBinary', 'binaryIO.valToBinary', (['(2)', 'chunk_size'], {}), '(2, chunk_size)\n', (12032, 12047), False, 'import binaryIO\n'), ((12069, 12106), 'binaryIO.valToBinary', 'binaryIO.valToBinary', (['(1)', 'readLenBytes'], {}), '(1, readLenBytes)\n', (12089, 12106), False, 'import binaryIO\n'), ((12128, 12221), 'binaryIO.writeCrossBundleBucketNames', 'binaryIO.writeCrossBundleBucketNames', (['bundleIdBytes', 'cross_bundle_buckets', 'buckets_sorted'], {}), '(bundleIdBytes, cross_bundle_buckets,\n buckets_sorted)\n', (12164, 12221), False, 'import binaryIO\n'), ((13085, 13115), 'binaryIO.writeList', 'binaryIO.writeList', (['chunk_lens'], {}), '(chunk_lens)\n', (13103, 13115), False, 'import binaryIO\n'), ((13219, 13248), 'binaryIO.findNumBytes', 'binaryIO.findNumBytes', (['length'], {}), '(length)\n', (13240, 13248), False, 'import binaryIO\n'), ((13261, 13303), 'binaryIO.writeVal', 'binaryIO.writeVal', (['filehandle', '(1)', 'numBytes'], {}), '(filehandle, 1, numBytes)\n', (13278, 13303), False, 'import binaryIO\n'), ((13316, 13363), 'binaryIO.writeVal', 'binaryIO.writeVal', (['filehandle', 'numBytes', 'length'], {}), '(filehandle, numBytes, length)\n', (13333, 13363), False, 'import binaryIO\n'), ((13531, 13566), 'binaryIO.writeVal', 'binaryIO.writeVal', (['filehandle', '(1)', '(1)'], {}), '(filehandle, 1, 1)\n', (13548, 13566), False, 'import binaryIO\n'), ((13579, 13614), 'binaryIO.writeVal', 'binaryIO.writeVal', (['filehandle', '(1)', '(0)'], {}), '(filehandle, 1, 0)\n', (13596, 13614), False, 'import binaryIO\n'), ((14920, 14943), 're.search', 're.search', (['"""\\\\D"""', 'cigar'], {}), "('\\\\D', cigar)\n", (14929, 14943), False, 'import re\n'), ((12448, 12518), 'binaryIO.writeCrossBundleBucket', 'binaryIO.writeCrossBundleBucket', (['readLenBytes', 'cross_bundle_buckets[b]'], {}), '(readLenBytes, cross_bundle_buckets[b])\n', (12479, 12518), False, 'import binaryIO\n')]
import sys import csv import glob import argparse parser = argparse.ArgumentParser() parser.add_argument("lp_files", help="The label pair need to be converted") parser.add_argument("dir2pubmed", help="The directory to the place for pubmed articles") parser.add_argument('out_path', help="The path to store finaltraining data") args = parser.parse_args() def main(): lp_files = sorted(glob.glob(args.lp_files)) outpath = args.out_path pubids = [] for lp_file in lp_files: lp = open(lp_file) print(lp_file) print(lp_file[lp_file.find('labelPair'):]) out_file = open(outpath + lp_file[9+lp_file.find('labelPair'):]+'.csv', 'w') writer = csv.writer(out_file, delimiter='\t') for line in lp.readlines(): line = line.strip('\n').split('\t') gene1, gene2, rel, pubmedid = line[0], line[1], line[2], line[3] tep = open(args.dir2pubmed+pubmedid).readline().strip('\n').split() if pubmedid not in pubids: pubids.append(pubmedid) gene1_occ = [i for i, w in enumerate(tep) if w == gene1] gene1_start = ":".join([str(i) for i in gene1_occ]) gene1_end = ":".join([str(i+1) for i in gene1_occ]) gene2_occ = [i for i, w in enumerate(open(args.dir2pubmed+pubmedid).readline().strip('\n').split()) if w == gene2] gene2_start = ":".join([str(i) for i in gene2_occ]) gene2_end = ":".join([str(i+1) for i in gene2_occ]) text = open(args.dir2pubmed+pubmedid).readline().strip('\n') writer.writerow([gene1, 'Gene', gene1, gene1_start, gene1_end, gene2, 'Gene', gene2, gene2_start, gene2_end, pubmedid, rel, text]) out_file.close() cancerlist = [line.strip('\n').split('_') for line in open(args.dir2pubmed+'rel.txt').readlines()] genelist = [line.strip('\n').split('\t')[0] for line in open(args.dir2pubmed+'gene_list.txt').readlines()] #print(genelist) if 'Train' in args.lp_files: nerfile = open(outpath + 'ner_train.txt', 'w') else: nerfile = open(outpath + 'ner_test.txt', 'w') for pubid in pubids: content = open(args.dir2pubmed+pubid).readline().strip('\n').split() for widx, w in enumerate(content): if w in genelist: nerfile.write('%s\t%s\t%s\t%s\n' % (w,'B-GENE',str(genelist.index(w)),pubid)) else: nerfile.write('%s\t%s\t%s\t%s\n' % (w, 'O', '-1', pubid)) nerfile.write('\n') nerfile.close() if __name__ == '__main__': main()
[ "csv.writer", "glob.glob", "argparse.ArgumentParser" ]
[((59, 84), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (82, 84), False, 'import argparse\n'), ((389, 413), 'glob.glob', 'glob.glob', (['args.lp_files'], {}), '(args.lp_files)\n', (398, 413), False, 'import glob\n'), ((691, 727), 'csv.writer', 'csv.writer', (['out_file'], {'delimiter': '"""\t"""'}), "(out_file, delimiter='\\t')\n", (701, 727), False, 'import csv\n')]
""" Copyright (c) 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ """ import neoml.PythonWrapper as PythonWrapper from .Dnn import Layer from neoml.Utils import check_input_layers class RepeatSequence(Layer): """The layer that repeats the input sequences several times. Layer inputs --------- #1: a sequence of objects. The dimensions: - BatchLength is the sequence length - BatchWidth * ListSize is the number of sequences in the set - Height * Width * Depth * Channels is the size of each object Layer outputs --------- #1: the same sequence repeated repeat_count times. The dimensions: - BatchLength is repeat_count times larger than the input's BatchLength - all other dimensions are the same as for the input Parameters ---------- input_layer : (object, int) The input layer and the number of its output. If no number is specified, the first output will be connected. repeat_count : int, > 0 The number of repetitions. name : str, default=None The layer name. """ def __init__(self, input_layer, repeat_count, name=None): if type(input_layer) is PythonWrapper.RepeatSequence: super().__init__(input_layer) return layers, outputs = check_input_layers(input_layer, 1) internal = PythonWrapper.RepeatSequence(str(name), layers[0], int(outputs[0]), int(repeat_count)) super().__init__(internal) @property def repeat_count(self): """Gets the number of repetitions. """ return self._internal.get_repeat_count() @repeat_count.setter def repeat_count(self, repeat_count): """Sets the number of repetitions. """ self._internal.set_repeat_count(int(repeat_count))
[ "neoml.Utils.check_input_layers" ]
[((1934, 1968), 'neoml.Utils.check_input_layers', 'check_input_layers', (['input_layer', '(1)'], {}), '(input_layer, 1)\n', (1952, 1968), False, 'from neoml.Utils import check_input_layers\n')]
#! /usr/bin/env python # Copyright 2018 The Yawn Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trains and exports a saved-model based on some test data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from data.stochastic_quantized_sine_wave import get_numpy_data from model.wavenet_model import WaveNetModel # TODO: Figure out some util file for this function. def data_format_to_shape( batch_length=None, sequence_length=None, channel_length=None, data_format='channels_first' ): """.""" shape = [batch_length, None, None] channel_axis = 1 if data_format == 'channels_first' else 2 sequence_axis = 2 if data_format == 'channels_first' else 1 shape[sequence_axis] = sequence_length shape[channel_axis] = channel_length return tuple(shape) def main(FLAGS): """.""" input_channels = 1 label_channels = 1 quantization = 64 scale = 4 num_mixtures = 2 filters = 8 initial_kernel = 8 kernel_size = 2 dilation_powers = [0, 1, 2, 3, 4, 5, 6] dilations = [kernel_size**power for power in dilation_powers] # data, data_labels, bins = get_numpy_data('../data.npy', quantization) data, data_labels, bins = get_numpy_data(2000, quantization, scale=scale) dataset_size = len(data) mock_epochs = 2000 assert np.all(np.diff(bins) > 0.0) model = WaveNetModel( filters=filters, initial_kernel=initial_kernel, kernel_size=kernel_size, dilations=dilations, quantization=quantization, num_mixtures=num_mixtures, bins=bins, data_format='channels_last', version='mixture' ) batch_size = 2 sequence_length = model.receptive_field + min(512, model.receptive_field) sampled_data = [] sampled_labels = [] for i in range(mock_epochs*dataset_size//sequence_length): index = np.random.randint(dataset_size-sequence_length) sampled_data.append(data[index:index+sequence_length]) sampled_labels.append(data_labels[index:index+sequence_length]) data = np.array(sampled_data) data_labels = np.array(sampled_labels) data = data.reshape( data_format_to_shape(-1, sequence_length, input_channels, data_format=model.data_format) ) data_labels = data_labels.reshape(-1, sequence_length, label_channels) config = tf.ConfigProto() config.gpu_options.allow_growth = True classifier = tf.estimator.Estimator( model_dir=FLAGS.model_dir, model_fn=model.model_fn, params=dict( learning_rate=1e-4, add_summaries=True ), config=tf.estimator.RunConfig(session_config=config) ) classifier.train( input_fn=tf.estimator.inputs.numpy_input_fn( data, data_labels, batch_size=batch_size, shuffle=True, num_epochs=2 ) ) def serving_input_receiver_fn(): features = tf.placeholder( dtype=tf.float32, shape=data_format_to_shape( None, 1+model.receptive_field, input_channels, data_format=model.data_format ), name='inputs' ) return tf.estimator.export.TensorServingInputReceiver( features=features, receiver_tensors=features ) classifier.export_savedmodel( export_dir_base='/tmp/wavenet', serving_input_receiver_fn=serving_input_receiver_fn ) return 0 if __name__ == '__main__': import argparse tf.logging.set_verbosity(tf.logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( '--model_dir', type=str, metavar=dir, default=None, help='Estimator model directory.' ) args = parser.parse_args() exit(main(args))
[ "tensorflow.estimator.RunConfig", "argparse.ArgumentParser", "data.stochastic_quantized_sine_wave.get_numpy_data", "model.wavenet_model.WaveNetModel", "tensorflow.logging.set_verbosity", "numpy.diff", "tensorflow.estimator.inputs.numpy_input_fn", "numpy.array", "numpy.random.randint", "tensorflow....
[((1939, 1986), 'data.stochastic_quantized_sine_wave.get_numpy_data', 'get_numpy_data', (['(2000)', 'quantization'], {'scale': 'scale'}), '(2000, quantization, scale=scale)\n', (1953, 1986), False, 'from data.stochastic_quantized_sine_wave import get_numpy_data\n'), ((2092, 2324), 'model.wavenet_model.WaveNetModel', 'WaveNetModel', ([], {'filters': 'filters', 'initial_kernel': 'initial_kernel', 'kernel_size': 'kernel_size', 'dilations': 'dilations', 'quantization': 'quantization', 'num_mixtures': 'num_mixtures', 'bins': 'bins', 'data_format': '"""channels_last"""', 'version': '"""mixture"""'}), "(filters=filters, initial_kernel=initial_kernel, kernel_size=\n kernel_size, dilations=dilations, quantization=quantization,\n num_mixtures=num_mixtures, bins=bins, data_format='channels_last',\n version='mixture')\n", (2104, 2324), False, 'from model.wavenet_model import WaveNetModel\n'), ((2809, 2831), 'numpy.array', 'np.array', (['sampled_data'], {}), '(sampled_data)\n', (2817, 2831), True, 'import numpy as np\n'), ((2850, 2874), 'numpy.array', 'np.array', (['sampled_labels'], {}), '(sampled_labels)\n', (2858, 2874), True, 'import numpy as np\n'), ((3093, 3109), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3107, 3109), True, 'import tensorflow as tf\n'), ((4246, 4287), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4270, 4287), True, 'import tensorflow as tf\n'), ((4302, 4327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4325, 4327), False, 'import argparse\n'), ((2614, 2663), 'numpy.random.randint', 'np.random.randint', (['(dataset_size - sequence_length)'], {}), '(dataset_size - sequence_length)\n', (2631, 2663), True, 'import numpy as np\n'), ((3912, 4008), 'tensorflow.estimator.export.TensorServingInputReceiver', 'tf.estimator.export.TensorServingInputReceiver', ([], {'features': 'features', 'receiver_tensors': 'features'}), '(features=features,\n receiver_tensors=features)\n', (3958, 4008), True, 'import tensorflow as tf\n'), ((2058, 2071), 'numpy.diff', 'np.diff', (['bins'], {}), '(bins)\n', (2065, 2071), True, 'import numpy as np\n'), ((3373, 3418), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'session_config': 'config'}), '(session_config=config)\n', (3395, 3418), True, 'import tensorflow as tf\n'), ((3465, 3573), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (['data', 'data_labels'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_epochs': '(2)'}), '(data, data_labels, batch_size=batch_size,\n shuffle=True, num_epochs=2)\n', (3499, 3573), True, 'import tensorflow as tf\n')]
""" Created on June 19th, 2017 @author: rouxpn """ from __future__ import division, print_function, unicode_literals, absolute_import import warnings warnings.simplefilter('default', DeprecationWarning) import os import re from decimal import Decimal class DecayParser(): """ Parses the PHISICS xml decay file and replaces the nominal values by the perturbed values. """ def __init__(self, inputFiles, workingDir, **pertDict): """ Constructor @ In, inputFiles, string, .dat Decay file. @ In, workingDir, string, absolute path to the working directory @ In, pertDict, dictionary, dictionary of perturbed variables @ Out, None """ self.allDecayList = [] # All possible types of decay for actinides and FP self.isotopeClassifier = {} # String, FP or Actinide self.decayModeNumbering = {} # Gives the column number of each decay type self.isotopeParsed = ['Actinide', 'FP'] self.listedDict = {} # Nested dictionary of perturbed variables self.inputFiles = inputFiles self.pertDict = self.scientificNotation(pertDict) # open the unperturbed file openInputFile = open(self.inputFiles, "r") lines = openInputFile.readlines() openInputFile.close() self.characterizeLibrary(lines) self.fileReconstruction() self.printInput(workingDir,lines) def matrixPrinter(self, line, outfile, atomicNumber): """ The xml files is split into two categories: hardcopied lines (banner, column labels etc.) that cannot be modified by RAVEN variable definition, and matrix lines that can be modified by RAVEN variable definition. This method treats the matrix lines, and print them into the perturbed file. @ In, line, list, unperturbed input file line @ In, outfile, file object, output file in file object format @ In, atomicNumber, integer, indicates if the isotope parsed is an actinide (0) or a fission product (1) @ Out, None """ line = line.upper().split() line[0] = re.sub(r'(.*?)(\w+)(-)(\d+M?)', r'\1\2\4', line[0]) # remove isotope dashes for isotopeID in self.listedDict.iterkeys(): if line[0] == isotopeID: typeOfDecayPerturbed = [] typeOfDecayPerturbed = self.listedDict.get(isotopeID, {}).keys() for i in range(len(typeOfDecayPerturbed)): try: if self.isotopeClassifier.get(isotopeID) == self.isotopeParsed[0]: # it means the isotope is an actinide line[self.decayModeNumbering.get(atomicNumber).get(typeOfDecayPerturbed[i])] = str(self.listedDict.get(isotopeID).get(typeOfDecayPerturbed[i])) elif self.isotopeClassifier.get(isotopeID) == self.isotopeParsed[1]: # it means the isotope is a FP line[self.decayModeNumbering.get(atomicNumber).get(typeOfDecayPerturbed[i])] = str(self.listedDict.get(isotopeID).get(typeOfDecayPerturbed[i])) except (KeyError, TypeError): raise KeyError('you used the decay mode' + str(typeOfDecayPerturbed) +'Check if the decay mode ' + str(typeOfDecayPerturbed) +'exist in the decay library. You can also check if you perturbed dictionary is under the format |DECAY|DECAYMODE|ISOTOPEID.') if any('ACTINIDES' in s for s in line): flag = self.isotopeParsed[0] elif any('FPRODUCTS' in s for s in line): flag = self.isotopeParsed[1] try: if self.isotopeClassifier[line[0]] == atomicNumber: line[0] = "{0:<7s}".format(line[0]) i = 1 while i <= len(self.decayModeNumbering[atomicNumber]): line[i] = "{0:<11s}".format(line[i]) i = i + 1 outfile.writelines(' ' + ''.join( line[0:len(self.decayModeNumbering[atomicNumber]) + 1]) + "\n") except KeyError: # happens for all the unperturbed isotopes pass def hardcopyPrinter(self, atomicNumber, lines): """ The files are split into two categories: hardcopied lines (banner, column labels etc.) that cannot be modified by RAVEN variable definition, and matrix lines that can be modified by RAVEN variable definition. This method treats the hardcopied lines, and then call the matrix line handler method. @ In, atomicNumber, integer, indicates if the isotope parsed is an actinide (0) or a fission product (1) @ In, lines, list, unperturbed input file lines @ Out, None """ flag = 0 count = 0 with open(self.inputFiles, 'a+') as outfile: for line in lines: # if the line is blank, ignore it if not line.split(): continue if re.match(r'(.*?)' + atomicNumber + 's', line.strip()) and atomicNumber == self.isotopeParsed[0]: flag = 2 # if the word Actinides is found outfile.writelines(line) if re.match(r'(.*?)' + atomicNumber + 'roducts', line.strip())and atomicNumber == self.isotopeParsed[1]: flag = 1 # if the word FProducts is found outfile.writelines(line) if flag == 2: # for the actinides decay section if re.match(r'(.*?)\s+\w+(\W)\s+\w+(\W)', line) and any( s in 'BETA' for s in line.split()) and atomicNumber == self.isotopeParsed[0] and count == 0: count = count + 1 outfile.writelines(line) self.matrixPrinter(line, outfile, atomicNumber) if flag == 1: #for the FP decay section if re.match(r'(.*?)\s+\w+(\W)\s+\w+(\W)', line) and any( s in 'BETA' for s in line.split()) and atomicNumber == self.isotopeParsed[1]: outfile.writelines(line) self.matrixPrinter(line, outfile, atomicNumber) def characterizeLibrary(self,lines): """ Characterizes the structure of the library. Teaches the type of decay available for the actinide family and FP family. @ In, lines, list, unperturbed input file lines @ Out, None """ concatenateDecayList = [] for line in lines: if re.match(r'(.*?)Actinides', line): typeOfIsotopeParsed = self.isotopeParsed[0] elif re.match(r'(.*?)FProducts', line): typeOfIsotopeParsed = self.isotopeParsed[1] if ( re.match(r'(.*?)\w+(\W?)\s+\w+(\W?)\s+\w', line) and any(s in "BETA" for s in line) ): # create dynamic column detector, the search for 'BETA' ensures this is the label line. count = 0 # reset the counter and the dictionary numbering if new colum sequence is detected numbering = {} decayList = [] line = re.sub(r'(Yy?)ield', r'', line) # Remove the word 'yield' in the decay type lines splitStringDecayType = line.upper().split( ) # Split the words into individual strings for decayType in splitStringDecayType: # replace + and * by strings decayList.append( decayType.replace('*', 'S').replace('+', 'PLUS').replace( '_', '')) concatenateDecayList = concatenateDecayList + decayList # concatenate all the possible decay type (including repetition among actinides and FP) self.allDecayList = list(set(concatenateDecayList)) for i in range(len(decayList)): count = count + 1 numbering[decayList[ i]] = count # assign the column position of the given decay types if typeOfIsotopeParsed == self.isotopeParsed[0]: self.decayModeNumbering[self.isotopeParsed[0]] = numbering if typeOfIsotopeParsed == self.isotopeParsed[1]: self.decayModeNumbering[self.isotopeParsed[1]] = numbering if re.match(r'(.*?)\D+(-?)\d+(M?)\s+\d', line): splitString = line.upper().split() for i, decayConstant in enumerate(splitString): try: splitString[i] = float(decayConstant) except ValueError: pass # the element is a string (isotope tag). It can be ignored splitString[0] = re.sub( r'(.*?)(\w+)(-)(\d+M?)', r'\1\2\4', splitString[ 0]) # remove the dash if it the key (isotope ID) contains it if typeOfIsotopeParsed == self.isotopeParsed[0]: self.isotopeClassifier[splitString[0]] = self.isotopeParsed[0] elif typeOfIsotopeParsed == self.isotopeParsed[1]: self.isotopeClassifier[splitString[0]] = self.isotopeParsed[1] def scientificNotation(self, pertDict): """ Converts the numerical values into a scientific notation. @ In, pertDict, dictionary, perturbed variables @ Out, pertDict, dictionary, perturbed variables in scientific format """ for key, value in pertDict.iteritems(): pertDict[key] = '%.3E' % Decimal(str(value)) return pertDict def fileReconstruction(self): """ Converts the formatted dictionary pertdict -> {'DECAY|ALPHA|U235':1.30}. into a dictionary of dictionaries that has the format -> {'DECAY':{'ALPHA':{'U235'1.30}}} @ In, None @ Out, None """ perturbedIsotopes = [] for key in self.pertDict.iterkeys(): splittedDecayKeywords = key.split('|') perturbedIsotopes.append(splittedDecayKeywords[2]) for i in range(len(perturbedIsotopes)): self.listedDict[perturbedIsotopes[i]] = {} for decayTypeKey, decayValue in self.pertDict.iteritems(): decayKeyWords = decayTypeKey.split('|') for i in range(len(self.allDecayList)): self.listedDict[decayKeyWords[2]][decayKeyWords[1]] = decayValue def printInput(self, workingDir,lines): """ Prints out the pertubed decay library into a file. The workflow is: Open a new file with a dummy name; parse the unperturbed library; print the line in the dummy, replace with perturbed variables if necessary. Change the name of the dummy file. @ In, workingDir, string, path to working directory @ In, lines, list, unperturbed input file lines @ Out, None """ if os.path.exists(self.inputFiles): os.remove(self.inputFiles) # remove the file if was already existing for atomicNumber in self.isotopeParsed: self.hardcopyPrinter(atomicNumber, lines) with open(self.inputFiles, 'a') as outfile: outfile.writelines(' end')
[ "os.path.exists", "re.match", "warnings.simplefilter", "re.sub", "os.remove" ]
[((150, 202), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""', 'DeprecationWarning'], {}), "('default', DeprecationWarning)\n", (171, 202), False, 'import warnings\n'), ((2020, 2074), 're.sub', 're.sub', (['"""(.*?)(\\\\w+)(-)(\\\\d+M?)"""', '"""\\\\1\\\\2\\\\4"""', 'line[0]'], {}), "('(.*?)(\\\\w+)(-)(\\\\d+M?)', '\\\\1\\\\2\\\\4', line[0])\n", (2026, 2074), False, 'import re\n'), ((9936, 9967), 'os.path.exists', 'os.path.exists', (['self.inputFiles'], {}), '(self.inputFiles)\n', (9950, 9967), False, 'import os\n'), ((5981, 6013), 're.match', 're.match', (['"""(.*?)Actinides"""', 'line'], {}), "('(.*?)Actinides', line)\n", (5989, 6013), False, 'import re\n'), ((7610, 7656), 're.match', 're.match', (['"""(.*?)\\\\D+(-?)\\\\d+(M?)\\\\s+\\\\d"""', 'line'], {}), "('(.*?)\\\\D+(-?)\\\\d+(M?)\\\\s+\\\\d', line)\n", (7618, 7656), False, 'import re\n'), ((9975, 10001), 'os.remove', 'os.remove', (['self.inputFiles'], {}), '(self.inputFiles)\n', (9984, 10001), False, 'import os\n'), ((6079, 6111), 're.match', 're.match', (['"""(.*?)FProducts"""', 'line'], {}), "('(.*?)FProducts', line)\n", (6087, 6111), False, 'import re\n'), ((6187, 6241), 're.match', 're.match', (['"""(.*?)\\\\w+(\\\\W?)\\\\s+\\\\w+(\\\\W?)\\\\s+\\\\w"""', 'line'], {}), "('(.*?)\\\\w+(\\\\W?)\\\\s+\\\\w+(\\\\W?)\\\\s+\\\\w', line)\n", (6195, 6241), False, 'import re\n'), ((6542, 6571), 're.sub', 're.sub', (['"""(Yy?)ield"""', '""""""', 'line'], {}), "('(Yy?)ield', '', line)\n", (6548, 6571), False, 'import re\n'), ((7950, 8011), 're.sub', 're.sub', (['"""(.*?)(\\\\w+)(-)(\\\\d+M?)"""', '"""\\\\1\\\\2\\\\4"""', 'splitString[0]'], {}), "('(.*?)(\\\\w+)(-)(\\\\d+M?)', '\\\\1\\\\2\\\\4', splitString[0])\n", (7956, 8011), False, 'import re\n'), ((5039, 5088), 're.match', 're.match', (['"""(.*?)\\\\s+\\\\w+(\\\\W)\\\\s+\\\\w+(\\\\W)"""', 'line'], {}), "('(.*?)\\\\s+\\\\w+(\\\\W)\\\\s+\\\\w+(\\\\W)', line)\n", (5047, 5088), False, 'import re\n'), ((5410, 5459), 're.match', 're.match', (['"""(.*?)\\\\s+\\\\w+(\\\\W)\\\\s+\\\\w+(\\\\W)"""', 'line'], {}), "('(.*?)\\\\s+\\\\w+(\\\\W)\\\\s+\\\\w+(\\\\W)', line)\n", (5418, 5459), False, 'import re\n')]
from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.support.ui import Select import unittest import re import sys import os # first thing first. We have to create product, just to make sure there is atleast 1 product available # to assign endpoints to when creating or editing any. # importing Product_unit_test as a module # set relative path dir_path = os.path.dirname(os.path.realpath(__file__)) try: # First Try for python 3 import importlib.util product_unit_test_module = importlib.util.spec_from_file_location("Product_unit_test", os.path.join(dir_path, 'Product_unit_test.py')) # using ',' allows python to determine the type of separator to use. product_unit_test = importlib.util.module_from_spec(product_unit_test_module) product_unit_test_module.loader.exec_module(product_unit_test) except: # This will work for python2 if above fails import imp product_unit_test = imp.load_source('Product_unit_test', os.path.join(dir_path, 'Product_unit_test.py')) class EndpointTest(unittest.TestCase): def setUp(self): # Initialize the driver # When used with Travis, chromdriver is stored in the same # directory as the unit tests self.options = Options() self.options.add_argument("--headless") self.driver = webdriver.Chrome('chromedriver', chrome_options=self.options) # Allow a little time for the driver to initialize self.driver.implicitly_wait(30) # Set the base address of the dojo self.base_url = "http://localhost:8080/" self.verificationErrors = [] self.accept_next_alert = True def login_page(self): # Make a member reference to the driver driver = self.driver # Navigate to the login page driver.get(self.base_url + "login") # Good practice to clear the entry before typing driver.find_element_by_id("id_username").clear() # These credentials will be used by Travis when testing new PRs # They will not work when testing on your own build # Be sure to change them before submitting a PR driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER']) driver.find_element_by_id("id_password").clear() driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD']) # "Click" the but the login button driver.find_element_by_css_selector("button.btn.btn-success").click() return driver def test_create_endpoint(self): # Login to the site. # Username and password will be gotten from environ driver = self.login_page() # Navigate to the Endpoint page driver.get(self.base_url + "endpoint") # "Click" the dropdown button to see options driver.find_element_by_id("dropdownMenu1").click() # "Click" the New Endpoint driver.find_element_by_link_text("New Endpoint").click() # Keep a good practice of clearing field before entering value # Endpoints driver.find_element_by_id("id_endpoint").clear() driver.find_element_by_id("id_endpoint").send_keys("moving.com.rnd") # Select product to assign endpoint to Select(driver.find_element_by_id("id_product")).select_by_visible_text("QA Test") # submit driver.find_element_by_css_selector("input.btn.btn-primary").click() # Query the site to determine if the finding has been added productTxt = driver.find_element_by_tag_name("BODY").text # Assert ot the query to dtermine status of failure self.assertTrue(re.search(r'Endpoint added successfully', productTxt)) def test_edit_endpoint(self): # Login to the site. Password will have to be modified # to match an admin password in your own container driver = self.login_page() # Navigate to the endpoint page driver.get(self.base_url + "endpoint") # Select one of the previously created endpoint to edit driver.find_element_by_link_text("moving.com.rnd").click() # "Click" the dropdown button to see options driver.find_element_by_id("dropdownMenu1").click() # "Click" the Edit Endpoint driver.find_element_by_link_text("Edit Endpoint").click() # Clear the old endpoint host name driver.find_element_by_id("id_host").clear() # Fill in the endpoint host name driver.find_element_by_id("id_host").send_keys("/rnd.moving.com") # Fill in port for endpoint driver.find_element_by_id("id_port").clear() driver.find_element_by_id("id_port").send_keys("8080") # "Click" the submit button to complete the transaction driver.find_element_by_css_selector("input.btn.btn-primary").click() # Query the site to determine if the product has been added productTxt = driver.find_element_by_tag_name("BODY").text # Assert ot the query to dtermine status of failure self.assertTrue(re.search(r'Endpoint updated successfully', productTxt)) def test_delete_endpoint(self): # Login to the site. Password will have to be modified # to match an admin password in your own container driver = self.login_page() # Navigate to the endpoint page driver.get(self.base_url + "endpoint") # Select one of the previously created endpoint to delete driver.find_element_by_link_text("/rnd.moving.com").click() # "Click" the dropdown button to see options driver.find_element_by_id("dropdownMenu1").click() # "Click" the Delete Endpoint driver.find_element_by_link_text("Delete Endpoint").click() # "Click" the delete button to complete the transaction driver.find_element_by_css_selector("button.btn.btn-danger").click() # Query the site to determine if the product has been added productTxt = driver.find_element_by_tag_name("BODY").text # Assert ot the query to dtermine status of failure self.assertTrue(re.search(r'Endpoint and relationships removed.', productTxt)) def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) def suite(): suite = unittest.TestSuite() # Add each test the the suite to be run # success and failure is output by the test suite.addTest(product_unit_test.ProductTest('test_create_product')) suite.addTest(EndpointTest('test_create_endpoint')) suite.addTest(EndpointTest('test_edit_endpoint')) suite.addTest(EndpointTest('test_delete_endpoint')) suite.addTest(product_unit_test.ProductTest('test_delete_product')) return suite if __name__ == "__main__": runner = unittest.TextTestRunner(descriptions=True, failfast=True) ret = not runner.run(suite()).wasSuccessful() sys.exit(ret)
[ "unittest.TestSuite", "selenium.webdriver.chrome.options.Options", "selenium.webdriver.Chrome", "os.path.join", "os.path.realpath", "sys.exit", "unittest.TextTestRunner", "re.search" ]
[((430, 456), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (446, 456), False, 'import os\n'), ((6343, 6363), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (6361, 6363), False, 'import unittest\n'), ((6825, 6882), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'descriptions': '(True)', 'failfast': '(True)'}), '(descriptions=True, failfast=True)\n', (6848, 6882), False, 'import unittest\n'), ((6937, 6950), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (6945, 6950), False, 'import sys\n'), ((614, 660), 'os.path.join', 'os.path.join', (['dir_path', '"""Product_unit_test.py"""'], {}), "(dir_path, 'Product_unit_test.py')\n", (626, 660), False, 'import os\n'), ((1288, 1297), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (1295, 1297), False, 'from selenium.webdriver.chrome.options import Options\n'), ((1368, 1429), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""chromedriver"""'], {'chrome_options': 'self.options'}), "('chromedriver', chrome_options=self.options)\n", (1384, 1429), False, 'from selenium import webdriver\n'), ((1018, 1064), 'os.path.join', 'os.path.join', (['dir_path', '"""Product_unit_test.py"""'], {}), "(dir_path, 'Product_unit_test.py')\n", (1030, 1064), False, 'import os\n'), ((3697, 3749), 're.search', 're.search', (['"""Endpoint added successfully"""', 'productTxt'], {}), "('Endpoint added successfully', productTxt)\n", (3706, 3749), False, 'import re\n'), ((5098, 5152), 're.search', 're.search', (['"""Endpoint updated successfully"""', 'productTxt'], {}), "('Endpoint updated successfully', productTxt)\n", (5107, 5152), False, 'import re\n'), ((6147, 6207), 're.search', 're.search', (['"""Endpoint and relationships removed."""', 'productTxt'], {}), "('Endpoint and relationships removed.', productTxt)\n", (6156, 6207), False, 'import re\n')]
""" Class :py:class:`CMWDBMain` is a QWidget for calibman =========================================================== Usage :: See test_CMWDBMain() at the end See: - :class:`CMWDBMain` - :class:`CMWMainTabs` - :class:`CMConfigParameters` - `graphqt documentation <https://lcls-psana.github.io/graphqt/py-modindex.html>`_. Created on 2017-02-01 by <NAME> Adopted for LCLS2 on 2018-02-26 by <NAME> """ import logging logger = logging.getLogger(__name__) from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit from PyQt5.QtCore import Qt from psana.graphqt.CMConfigParameters import cp from psana.graphqt.CMWDBDocs import CMWDBDocs from psana.graphqt.CMWDBDocEditor import CMWDBDocEditor from psana.graphqt.CMWDBTree import CMWDBTree from psana.graphqt.CMWDBControl import CMWDBControl class CMWDBMain(QWidget): _name = 'CMWDBMain' def __init__(self, parent=None): QWidget.__init__(self, parent=parent) cp.cmwdbmain = self self.wbuts = CMWDBControl(parent=self) self.wtree = CMWDBTree() self.wdocs = CMWDBDocs() self.wdoce = CMWDBDocEditor() # Horizontal splitter widget self.hspl = QSplitter(Qt.Horizontal) self.hspl.addWidget(self.wtree) self.hspl.addWidget(self.wdocs) self.hspl.addWidget(self.wdoce) # Vertical splitter widget self.vspl = QSplitter(Qt.Vertical) self.vspl.addWidget(self.wbuts) self.vspl.addWidget(self.hspl) # Main box layout self.mbox = QVBoxLayout() self.mbox.addWidget(self.vspl) self.setLayout(self.mbox) self.set_style() self.set_tool_tips() self.connect_signals_to_slots() def connect_signals_to_slots(self): pass def on_but_tabs_clicked_test(self): logger.debug('on_but_tabs_clicked') def proc_parser(self, parser=None): self.parser=parser if parser is None: return return def set_tool_tips(self): pass #self.butStop.setToolTip('Not implemented yet...') def set_hsplitter_sizes(self, s0=None, s1=None, s2=None): _s0 = cp.cdb_hsplitter0.value() if s0 is None else s0 _s1 = cp.cdb_hsplitter1.value() if s1 is None else s1 _s2 = cp.cdb_hsplitter2.value() if s2 is None else s2 self.hspl.setSizes((_s0, _s1, _s2)) def set_hsplitter_size2(self, s2=0): _s0, _s1, _s2 = self.hsplitter_sizes() self.set_hsplitter_sizes(_s0, _s1+_s2-s2, s2 ) def hsplitter_sizes(self): return self.hspl.sizes() #[0] def save_hsplitter_sizes(self): """Save hsplitter sizes in configuration parameters. """ s0, s1, s2 = self.hsplitter_sizes() msg = 'Save h-splitter sizes %d %d %d' % (s0, s1, s2) logger.debug(msg) cp.cdb_hsplitter0.setValue(s0) cp.cdb_hsplitter1.setValue(s1) cp.cdb_hsplitter2.setValue(s2) def set_style(self): self.layout().setContentsMargins(0,0,0,0) self.wtree.setMinimumWidth(100) self.wtree.setMaximumWidth(600) self.set_hsplitter_sizes() def closeEvent(self, e): logger.debug('%s.closeEvent' % self._name) self.on_save() QWidget.closeEvent(self, e) def view_hide_tabs(self): #self.set_tabs_visible(not self.tab_bar.isVisible()) #self.wbuts.tab_bar.setVisible(not self.tab_bar.isVisible()) self.wbuts.view_hide_tabs() def key_usage(self): return 'Keys:'\ '\n V - view/hide tabs'\ '\n' if __name__ == "__main__": def keyPressEvent(self, e): #logger.debug('keyPressEvent, key=', e.key()) logger.info('%s.keyPressEvent, key=%d' % (self._name, e.key())) if e.key() == Qt.Key_Escape: self.close() elif e.key() == Qt.Key_V: self.view_hide_tabs() else: logger.debug(self.key_usage()) def on_save(self): self.save_hsplitter_sizes() if __name__ == "__main__": def test_CMWDBMain(): import sys logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', level=logging.DEBUG) app = QApplication(sys.argv) w = CMWDBMain() w.setMinimumSize(600, 300) w.show() app.exec_() del w del app if __name__ == "__main__": test_CMWDBMain() # EOF
[ "logging.getLogger", "psana.graphqt.CMConfigParameters.cp.cdb_hsplitter2.setValue", "logging.basicConfig", "psana.graphqt.CMConfigParameters.cp.cdb_hsplitter0.value", "psana.graphqt.CMWDBTree.CMWDBTree", "psana.graphqt.CMWDBDocs.CMWDBDocs", "psana.graphqt.CMWDBDocEditor.CMWDBDocEditor", "PyQt5.QtWidge...
[((449, 476), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (466, 476), False, 'import logging\n'), ((939, 976), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (955, 976), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit\n'), ((1027, 1052), 'psana.graphqt.CMWDBControl.CMWDBControl', 'CMWDBControl', ([], {'parent': 'self'}), '(parent=self)\n', (1039, 1052), False, 'from psana.graphqt.CMWDBControl import CMWDBControl\n'), ((1074, 1085), 'psana.graphqt.CMWDBTree.CMWDBTree', 'CMWDBTree', ([], {}), '()\n', (1083, 1085), False, 'from psana.graphqt.CMWDBTree import CMWDBTree\n'), ((1107, 1118), 'psana.graphqt.CMWDBDocs.CMWDBDocs', 'CMWDBDocs', ([], {}), '()\n', (1116, 1118), False, 'from psana.graphqt.CMWDBDocs import CMWDBDocs\n'), ((1140, 1156), 'psana.graphqt.CMWDBDocEditor.CMWDBDocEditor', 'CMWDBDocEditor', ([], {}), '()\n', (1154, 1156), False, 'from psana.graphqt.CMWDBDocEditor import CMWDBDocEditor\n'), ((1215, 1239), 'PyQt5.QtWidgets.QSplitter', 'QSplitter', (['Qt.Horizontal'], {}), '(Qt.Horizontal)\n', (1224, 1239), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit\n'), ((1416, 1438), 'PyQt5.QtWidgets.QSplitter', 'QSplitter', (['Qt.Vertical'], {}), '(Qt.Vertical)\n', (1425, 1438), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit\n'), ((1565, 1578), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (1576, 1578), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit\n'), ((2883, 2913), 'psana.graphqt.CMConfigParameters.cp.cdb_hsplitter0.setValue', 'cp.cdb_hsplitter0.setValue', (['s0'], {}), '(s0)\n', (2909, 2913), False, 'from psana.graphqt.CMConfigParameters import cp\n'), ((2922, 2952), 'psana.graphqt.CMConfigParameters.cp.cdb_hsplitter1.setValue', 'cp.cdb_hsplitter1.setValue', (['s1'], {}), '(s1)\n', (2948, 2952), False, 'from psana.graphqt.CMConfigParameters import cp\n'), ((2961, 2991), 'psana.graphqt.CMConfigParameters.cp.cdb_hsplitter2.setValue', 'cp.cdb_hsplitter2.setValue', (['s2'], {}), '(s2)\n', (2987, 2991), False, 'from psana.graphqt.CMConfigParameters import cp\n'), ((3297, 3324), 'PyQt5.QtWidgets.QWidget.closeEvent', 'QWidget.closeEvent', (['self', 'e'], {}), '(self, e)\n', (3315, 3324), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit\n'), ((4151, 4254), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(name)s %(levelname)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s %(name)s %(levelname)s: %(message)s', level=logging.DEBUG)\n", (4170, 4254), False, 'import logging\n'), ((4260, 4282), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4272, 4282), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QSplitter, QVBoxLayout, QTextEdit\n'), ((2199, 2224), 'psana.graphqt.CMConfigParameters.cp.cdb_hsplitter0.value', 'cp.cdb_hsplitter0.value', ([], {}), '()\n', (2222, 2224), False, 'from psana.graphqt.CMConfigParameters import cp\n'), ((2261, 2286), 'psana.graphqt.CMConfigParameters.cp.cdb_hsplitter1.value', 'cp.cdb_hsplitter1.value', ([], {}), '()\n', (2284, 2286), False, 'from psana.graphqt.CMConfigParameters import cp\n'), ((2323, 2348), 'psana.graphqt.CMConfigParameters.cp.cdb_hsplitter2.value', 'cp.cdb_hsplitter2.value', ([], {}), '()\n', (2346, 2348), False, 'from psana.graphqt.CMConfigParameters import cp\n')]
import inspect from collections import defaultdict from typing import Dict, List, NamedTuple from .core import PluginFinder, PluginSpec from .discovery import PackagePathPluginFinder class EntryPoint(NamedTuple): name: str value: str group: str EntryPointDict = Dict[str, List[str]] def discover_entry_points(finder: PluginFinder) -> EntryPointDict: """ Creates a dictionary for the entry_points attribute of setuptools' setup(), where keys are stevedore plugin namespaces, and values are lists of "name = module:object" pairs. :return: an entry_point dictionary """ return to_entry_point_dict([spec_to_entry_point(spec) for spec in finder.find_plugins()]) def to_entry_point_dict(eps: List[EntryPoint]) -> EntryPointDict: result = defaultdict(list) names = defaultdict(set) # book-keeping to check duplicates for ep in eps: if ep.name in names[ep.group]: raise ValueError("Duplicate entry point %s %s" % (ep.group, ep.name)) result[ep.group].append("%s=%s" % (ep.name, ep.value)) names[ep.group].add(ep.name) return result def spec_to_entry_point(spec: PluginSpec) -> EntryPoint: module = inspect.getmodule(spec.factory).__name__ name = spec.factory.__name__ path = f"{module}:{name}" return EntryPoint(group=spec.namespace, name=spec.name, value=path) def find_plugins(where=".", exclude=(), include=("*",)) -> EntryPointDict: """ Utility for setup.py that collects all plugins from the specified path, and creates a dictionary for entry_points. For example: setup( entry_points=find_plugins() ) """ return discover_entry_points( PackagePathPluginFinder(where=where, exclude=exclude, include=include) )
[ "inspect.getmodule", "collections.defaultdict" ]
[((782, 799), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (793, 799), False, 'from collections import defaultdict\n'), ((812, 828), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (823, 828), False, 'from collections import defaultdict\n'), ((1198, 1229), 'inspect.getmodule', 'inspect.getmodule', (['spec.factory'], {}), '(spec.factory)\n', (1215, 1229), False, 'import inspect\n')]
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_dataloader.ipynb (unless otherwise specified). __all__ = ['MNIST_NORMALIZATION', 'AmbiguousMNIST', 'FastMNIST', 'DirtyMNIST'] # Cell import os from typing import IO, Any, Callable, Dict, List, Optional, Tuple, Union from urllib.error import URLError import torch from torchvision.datasets.mnist import MNIST, VisionDataset from torchvision.datasets.utils import download_url, extract_archive, verify_str_arg from torchvision.transforms import Compose, Normalize, ToTensor # Cell MNIST_NORMALIZATION = Normalize((0.1307,), (0.3081,)) # Cell # based on torchvision.datasets.mnist.py (https://github.com/pytorch/vision/blob/37eb37a836fbc2c26197dfaf76d2a3f4f39f15df/torchvision/datasets/mnist.py) class AmbiguousMNIST(VisionDataset): """ Ambiguous-MNIST Dataset Please cite: @article{mukhoti2021deterministic, title={Deterministic Neural Networks with Appropriate Inductive Biases Capture Epistemic and Aleatoric Uncertainty}, author={<NAME> and Kirsch, Andreas and <NAME>, Joost and Torr, <NAME> and Gal, Yarin}, journal={arXiv preprint arXiv:2102.11582}, year={2021} } Args: root (string): Root directory of dataset where ``MNIST/processed/training.pt`` and ``MNIST/processed/test.pt`` exist. train (bool, optional): If True, creates dataset from ``training.pt``, otherwise from ``test.pt``. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. normalize (bool, optional): Normalize the samples. device: Device to use (pass `num_workers=0, pin_memory=False` to the DataLoader for max throughput) """ mirrors = ["http://github.com/BlackHC/ddu_dirty_mnist/releases/download/data-v1.0.0/"] resources = dict( data=("amnist_samples.pt", "4f7865093b1d28e34019847fab917722"), targets=("amnist_labels.pt", "3bfc055a9f91a76d8d493e8b898c3c95"), ) def __init__( self, root: str, *, train: bool = True, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, normalize: bool = True, noise_stddev=0.05, device=None, ): super().__init__(root, transform=transform, target_transform=target_transform) self.train = train # training set or test set if download: self.download() self.data = torch.load(self.resource_path("data"), map_location=device) if normalize: self.data = self.data.sub_(0.1307).div_(0.3081) self.targets = torch.load(self.resource_path("targets"), map_location=device) # Each sample has `num_multi_labels` many labels. num_multi_labels = self.targets.shape[1] # Flatten the multi-label dataset into a single-label dataset with samples repeated x `num_multi_labels` many times self.data = self.data.expand(-1, num_multi_labels, 28, 28).reshape(-1, 1, 28, 28) self.targets = self.targets.reshape(-1) data_range = slice(None, 60000) if self.train else slice(60000, None) self.data = self.data[data_range] if noise_stddev > 0.0: self.data += torch.randn_like(self.data) * noise_stddev self.targets = self.targets[data_range] def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]: """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ img, target = self.data[index], self.targets[index] if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self) -> int: return len(self.data) @property def data_folder(self) -> str: return os.path.join(self.root, self.__class__.__name__) def resource_path(self, name): return os.path.join(self.data_folder, self.resources[name][0]) def _check_exists(self) -> bool: return all(os.path.exists(self.resource_path(name)) for name in self.resources) def download(self) -> None: """Download the data if it doesn't exist in data_folder already.""" if self._check_exists(): return os.makedirs(self.data_folder, exist_ok=True) # download files for filename, md5 in self.resources.values(): for mirror in self.mirrors: url = "{}{}".format(mirror, filename) try: print("Downloading {}".format(url)) download_url(url, root=self.data_folder, filename=filename, md5=md5) except URLError as error: print("Failed to download (trying next):\n{}".format(error)) continue except: raise finally: print() break else: raise RuntimeError("Error downloading {}".format(filename)) print("Done!") # Cell class FastMNIST(MNIST): """ FastMNIST, based on https://tinyurl.com/pytorch-fast-mnist. It's like MNIST (<http://yann.lecun.com/exdb/mnist/>) but faster. Args: root (string): Root directory of dataset where ``MNIST/processed/training.pt`` and ``MNIST/processed/test.pt`` exist. train (bool, optional): If True, creates dataset from ``training.pt``, otherwise from ``test.pt``. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. normalize (bool, optional): Normalize the samples. device: Device to use (pass `num_workers=0, pin_memory=False` to the DataLoader for max throughput). """ def __init__(self, *args, normalize, noise_stddev=0.05, device, **kwargs): super().__init__(*args, **kwargs) # Scale data to [0,1] self.data = self.data.unsqueeze(1).float().div(255) # Put both data and targets on GPU in advance self.data, self.targets = self.data.to(device), self.targets.to(device) if normalize: self.data = self.data.sub_(0.1307).div_(0.3081) if noise_stddev > 0.0: self.data += torch.randn_like(self.data) * noise_stddev def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]: """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ img, target = self.data[index], self.targets[index] if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target # Cell def DirtyMNIST( root: str, *, train: bool = True, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, normalize=True, noise_stddev=0.05, device=None, ): """ DirtyMNIST Please cite: @article{mukhoti2021deterministic, title={Deterministic Neural Networks with Appropriate Inductive Biases Capture Epistemic and Aleatoric Uncertainty}, author={<NAME> and Kirsch, Andreas and <NAME>, Joost and <NAME> and <NAME>}, journal={arXiv preprint arXiv:2102.11582}, year={2021} } Args: root (string): Root directory of dataset where ``MNIST/processed/training.pt`` and ``MNIST/processed/test.pt`` exist. train (bool, optional): If True, creates dataset from ``training.pt``, otherwise from ``test.pt``. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. normalize (bool, optional): Normalize the samples. device: Device to use (pass `num_workers=0, pin_memory=False` to the DataLoader for max throughput). """ mnist_dataset = FastMNIST( root=root, train=train, transform=transform, target_transform=target_transform, download=download, normalize=normalize, noise_stddev=noise_stddev, device=device, ) amnist_dataset = AmbiguousMNIST( root=root, train=train, transform=transform, target_transform=target_transform, download=download, normalize=normalize, noise_stddev=noise_stddev, device=device, ) return torch.utils.data.ConcatDataset([mnist_dataset, amnist_dataset])
[ "torch.utils.data.ConcatDataset", "os.makedirs", "os.path.join", "torchvision.datasets.utils.download_url", "torch.randn_like", "torchvision.transforms.Normalize" ]
[((554, 585), 'torchvision.transforms.Normalize', 'Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (563, 585), False, 'from torchvision.transforms import Compose, Normalize, ToTensor\n'), ((9926, 9989), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['[mnist_dataset, amnist_dataset]'], {}), '([mnist_dataset, amnist_dataset])\n', (9956, 9989), False, 'import torch\n'), ((4411, 4459), 'os.path.join', 'os.path.join', (['self.root', 'self.__class__.__name__'], {}), '(self.root, self.__class__.__name__)\n', (4423, 4459), False, 'import os\n'), ((4511, 4566), 'os.path.join', 'os.path.join', (['self.data_folder', 'self.resources[name][0]'], {}), '(self.data_folder, self.resources[name][0])\n', (4523, 4566), False, 'import os\n'), ((4864, 4908), 'os.makedirs', 'os.makedirs', (['self.data_folder'], {'exist_ok': '(True)'}), '(self.data_folder, exist_ok=True)\n', (4875, 4908), False, 'import os\n'), ((3697, 3724), 'torch.randn_like', 'torch.randn_like', (['self.data'], {}), '(self.data)\n', (3713, 3724), False, 'import torch\n'), ((7253, 7280), 'torch.randn_like', 'torch.randn_like', (['self.data'], {}), '(self.data)\n', (7269, 7280), False, 'import torch\n'), ((5180, 5248), 'torchvision.datasets.utils.download_url', 'download_url', (['url'], {'root': 'self.data_folder', 'filename': 'filename', 'md5': 'md5'}), '(url, root=self.data_folder, filename=filename, md5=md5)\n', (5192, 5248), False, 'from torchvision.datasets.utils import download_url, extract_archive, verify_str_arg\n')]
#!/usr/bin/python # TrueRNG Read - Simple Example # <NAME> # 8/21/2016 # # Requires Python 2.7, pyserial # On Linux - may need to be root or set /dev/tty port permissions to 666 # # Python 2.7.xx is available here: https://www.python.org/ # Install Pyserial package with: python -m pip install pyserial import serial import time from serial.tools import list_ports # Size of block for each loop blocksize=102400 # Number of loops numloops=10 # Print our header print('TrueRNG Data Read Example') print('http://ubld.it') print('==================================================') # Create ports variable as dictionary ports=dict() # Call list_ports to get com port info ports_avaiable = list(list_ports.comports()) # Set default of None for com port rng_com_port = None # Loop on all available ports to find TrueRNG for temp in ports_avaiable: if temp[1].startswith("TrueRNG"): print('Found: ' + str(temp)) if rng_com_port == None: # always chooses the 1st TrueRNG found rng_com_port=str(temp[0]) # Print which port we're using print('Using com port: ' + str(rng_com_port)) # Print block size and number of loops print('Block Size: ' + str(blocksize) + ' Bytes') print('Number of loops: ' + str(numloops)) print('Total size: ' + str(blocksize * numloops) + ' Bytes') print('Writing to: random.bin') print('==================================================') # Open/create the file random.bin in the current directory with 'write binary' fp=open('random.bin','wb') # Print an error if we can't open the file if fp==None: print('Error Opening File!') # Try to setup and open the comport try: ser = serial.Serial(port=rng_com_port,timeout=10) # timeout set at 10 seconds in case the read fails except: print('Port Not Usable!') print('Do you have permissions set to read ' + rng_com_port + ' ?') # Open the serial port if it isn't open if(ser.isOpen() == False): ser.open() # Set Data Terminal Ready to start flow ser.setDTR(True) # This clears the receive buffer so we aren't using buffered data ser.flushInput() # Keep track of total bytes read totalbytes=0 # Loop for _ in range(numloops): # Try to read the port and record the time before and after try: before = time.time() # in microseconds x=ser.read(blocksize) # read bytes from serial port after = time.time() # in microseconds except: print('Read Failed!!!') break # Update total bytes read totalbytes +=len(x) # If we were able to open the file, write to disk if fp !=0: fp.write(x) # Calculate the rate rate=float(blocksize) / ((after-before)*1000.0) print(str(totalbytes) + ' Bytes Read at ' + '{:6.2f}'.format(rate) + ' Kbytes/s') # Close the serial port ser.close() # If the file is open then close it if fp != 0: fp.close()
[ "serial.tools.list_ports.comports", "serial.Serial", "time.time" ]
[((701, 722), 'serial.tools.list_ports.comports', 'list_ports.comports', ([], {}), '()\n', (720, 722), False, 'from serial.tools import list_ports\n'), ((1686, 1730), 'serial.Serial', 'serial.Serial', ([], {'port': 'rng_com_port', 'timeout': '(10)'}), '(port=rng_com_port, timeout=10)\n', (1699, 1730), False, 'import serial\n'), ((2289, 2300), 'time.time', 'time.time', ([], {}), '()\n', (2298, 2300), False, 'import time\n'), ((2400, 2411), 'time.time', 'time.time', ([], {}), '()\n', (2409, 2411), False, 'import time\n')]
from io import BytesIO import matplotlib.pyplot as plt from user_database import data, MONTHS, get_city_temperature, get_city_humidity, CITIES def get_main_image(): """Rendering the scatter chart""" yearly_temp = [] yearly_hum = [] for city in data: yearly_temp.append(sum(get_city_temperature(city))/12) yearly_hum.append(sum(get_city_humidity(city))/12) plt.clf() plt.scatter(yearly_hum, yearly_temp, alpha=0.5) plt.title('Yearly Average Temperature/Humidity') plt.xlim(70, 95) plt.ylabel('Yearly Average Temperature') plt.xlabel('Yearly Average Relative Humidity') for i, txt in enumerate(CITIES): plt.annotate(txt, (yearly_hum[i], yearly_temp[i])) img = BytesIO() plt.savefig(img) img.seek(0) return img def get_city_image(city_id): """Rendering line charts with city specific data""" city = data.get(city_id) city_temp = get_city_temperature(city) city_hum = get_city_humidity(city) plt.clf() plt.plot(MONTHS, city_temp, color='blue', linewidth=2.5, linestyle='-') plt.ylabel('Mean Daily Temperature', color='blue') plt.yticks(color='blue') plt.twinx() plt.plot(MONTHS, city_hum, color='red', linewidth=2.5, linestyle='-') plt.ylabel('Average Relative Humidity', color='red') plt.yticks(color='red') plt.title(city.city_name) img = BytesIO() plt.savefig(img) img.seek(0) return img
[ "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.clf", "io.BytesIO", "matplotlib.pyplot.plot", "matplotlib.pyplot.twinx", "user_database.data.get", "user_database.get_city_temperature", "user_database.get_city_humidity", "matplotlib.pyplot.y...
[((396, 405), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (403, 405), True, 'import matplotlib.pyplot as plt\n'), ((410, 457), 'matplotlib.pyplot.scatter', 'plt.scatter', (['yearly_hum', 'yearly_temp'], {'alpha': '(0.5)'}), '(yearly_hum, yearly_temp, alpha=0.5)\n', (421, 457), True, 'import matplotlib.pyplot as plt\n'), ((462, 510), 'matplotlib.pyplot.title', 'plt.title', (['"""Yearly Average Temperature/Humidity"""'], {}), "('Yearly Average Temperature/Humidity')\n", (471, 510), True, 'import matplotlib.pyplot as plt\n'), ((515, 531), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(70)', '(95)'], {}), '(70, 95)\n', (523, 531), True, 'import matplotlib.pyplot as plt\n'), ((536, 576), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Yearly Average Temperature"""'], {}), "('Yearly Average Temperature')\n", (546, 576), True, 'import matplotlib.pyplot as plt\n'), ((581, 627), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Yearly Average Relative Humidity"""'], {}), "('Yearly Average Relative Humidity')\n", (591, 627), True, 'import matplotlib.pyplot as plt\n'), ((736, 745), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (743, 745), False, 'from io import BytesIO\n'), ((750, 766), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img'], {}), '(img)\n', (761, 766), True, 'import matplotlib.pyplot as plt\n'), ((896, 913), 'user_database.data.get', 'data.get', (['city_id'], {}), '(city_id)\n', (904, 913), False, 'from user_database import data, MONTHS, get_city_temperature, get_city_humidity, CITIES\n'), ((930, 956), 'user_database.get_city_temperature', 'get_city_temperature', (['city'], {}), '(city)\n', (950, 956), False, 'from user_database import data, MONTHS, get_city_temperature, get_city_humidity, CITIES\n'), ((972, 995), 'user_database.get_city_humidity', 'get_city_humidity', (['city'], {}), '(city)\n', (989, 995), False, 'from user_database import data, MONTHS, get_city_temperature, get_city_humidity, CITIES\n'), ((1001, 1010), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1008, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1086), 'matplotlib.pyplot.plot', 'plt.plot', (['MONTHS', 'city_temp'], {'color': '"""blue"""', 'linewidth': '(2.5)', 'linestyle': '"""-"""'}), "(MONTHS, city_temp, color='blue', linewidth=2.5, linestyle='-')\n", (1023, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1141), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Daily Temperature"""'], {'color': '"""blue"""'}), "('Mean Daily Temperature', color='blue')\n", (1101, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1170), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'color': '"""blue"""'}), "(color='blue')\n", (1156, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1186), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (1184, 1186), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1260), 'matplotlib.pyplot.plot', 'plt.plot', (['MONTHS', 'city_hum'], {'color': '"""red"""', 'linewidth': '(2.5)', 'linestyle': '"""-"""'}), "(MONTHS, city_hum, color='red', linewidth=2.5, linestyle='-')\n", (1199, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1317), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Relative Humidity"""'], {'color': '"""red"""'}), "('Average Relative Humidity', color='red')\n", (1275, 1317), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1345), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'color': '"""red"""'}), "(color='red')\n", (1332, 1345), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1375), 'matplotlib.pyplot.title', 'plt.title', (['city.city_name'], {}), '(city.city_name)\n', (1359, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1396), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1394, 1396), False, 'from io import BytesIO\n'), ((1401, 1417), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img'], {}), '(img)\n', (1412, 1417), True, 'import matplotlib.pyplot as plt\n'), ((674, 724), 'matplotlib.pyplot.annotate', 'plt.annotate', (['txt', '(yearly_hum[i], yearly_temp[i])'], {}), '(txt, (yearly_hum[i], yearly_temp[i]))\n', (686, 724), True, 'import matplotlib.pyplot as plt\n'), ((300, 326), 'user_database.get_city_temperature', 'get_city_temperature', (['city'], {}), '(city)\n', (320, 326), False, 'from user_database import data, MONTHS, get_city_temperature, get_city_humidity, CITIES\n'), ((362, 385), 'user_database.get_city_humidity', 'get_city_humidity', (['city'], {}), '(city)\n', (379, 385), False, 'from user_database import data, MONTHS, get_city_temperature, get_city_humidity, CITIES\n')]
"""Represent a class to generate a meme.""" from PIL import Image, ImageDraw, ImageFont from os import makedirs from random import randint from textwrap import fill class MemeGenerator: """ A class to generate a meme. The following responsibilities are defined under this class: - Loading of an image from a disk. - Transforming the image by resizing to a maximum width of 500px while maintaining the input aspect ratio. - Adding a caption to the image with a body and author to a random location on the image. """ def __init__(self, output_dir: str): """ Create a new `MemeGenerator`. Parameters ---------- `output_dir`: str output directory to save the generated meme. """ self.output_dir = output_dir makedirs(self.output_dir, exist_ok=True) def validate_width(self, width: int) -> ValueError: """ Assert whether desired width of the image does not exceed 500px. Raise `ValueError` if width greater than 500px. Parameters ---------- `width`: int width of the image. """ if width > 500: raise ValueError('Width of the image cannot exceed 500px.') def make_meme(self, img_path: str, text: str, author: str, width: int = 500) -> str: """ Return path of the saved image of the meme after adding caption to it. Parameters ---------- `img_path`: str path of the original image. `text`: str quote of the meme. `author`: str author of the meme. `width`: int desired width of the image, default = 500px. """ # Opening the image. img = Image.open(img_path) # Try-except block to handle instances where width > 500px. try: self.validate_width(width) except ValueError as val_err: print(val_err) else: # Resizing the image proportionate to the given width. ratio = width / float(img.size[0]) height = int(ratio * float(img.size[1])) img = img.resize((width, height), Image.NEAREST) # Adding the caption to the image. caption = fill(f'{text} - {author}', width=40) font = ImageFont.truetype('./_data/font/Candara.ttf', size=20) draw = ImageDraw.Draw(img) draw.text(xy=(randint(10, 40), randint(20, 70)), text=caption, font=font, fill='white') # Saving the image to the output directory. output_path = f'{self.output_dir}/{randint(0, 1000)}.png' try: img.save(output_path) except Exception as e: print(e) return output_path
[ "PIL.Image.open", "os.makedirs", "PIL.ImageFont.truetype", "textwrap.fill", "PIL.ImageDraw.Draw", "random.randint" ]
[((832, 872), 'os.makedirs', 'makedirs', (['self.output_dir'], {'exist_ok': '(True)'}), '(self.output_dir, exist_ok=True)\n', (840, 872), False, 'from os import makedirs\n'), ((1815, 1835), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1825, 1835), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2326, 2362), 'textwrap.fill', 'fill', (['f"""{text} - {author}"""'], {'width': '(40)'}), "(f'{text} - {author}', width=40)\n", (2330, 2362), False, 'from textwrap import fill\n'), ((2378, 2433), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""./_data/font/Candara.ttf"""'], {'size': '(20)'}), "('./_data/font/Candara.ttf', size=20)\n", (2396, 2433), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2450, 2469), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2464, 2469), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2738, 2754), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (2745, 2754), False, 'from random import randint\n'), ((2492, 2507), 'random.randint', 'randint', (['(10)', '(40)'], {}), '(10, 40)\n', (2499, 2507), False, 'from random import randint\n'), ((2531, 2546), 'random.randint', 'randint', (['(20)', '(70)'], {}), '(20, 70)\n', (2538, 2546), False, 'from random import randint\n')]
import json from flask import jsonify, request, make_response, render_template from flask.blueprints import Blueprint from app.daos import tenant_dao from app.firebase_utils import * from app.services import user_service from app.user_dao import user_dao users_bp = Blueprint("users_bp", __name__) @users_bp.route("/users", methods=["GET"]) def get_multiple(): accessible_roles = ["SUPER_ADMIN","TENANT_ADMIN"] returned_value = have_claims(request.headers.get("Authorization"),accessible_roles) if returned_value["have_access"]: params = user_service.url_args_to_query_params_dict(request.args, False) values_for_return = user_dao.get_users( returned_value, params['filters'], params['sort'], params['range'] ) return user_service.response(values_for_return['data'],values_for_return['count'], 200) else: return user_service.response(status_code=403) @users_bp.route("/users", methods=["POST"]) def create_tenant_admin(): accessible_roles = ["SUPER_ADMIN","TENANT_ADMIN"] returned_value = have_claims(request.headers.get("Authorization"),accessible_roles) if returned_value["have_access"]: data = request.json success = user_dao.create_user(**data) if success: return user_service.response(success,status_code=200) else: return user_service.response(status_code=500) else: return user_service.response(status_code=403) @users_bp.route("/users/<id>", methods=["DELETE"]) def del_tenant(id): accessible_roles = ["SUPER_ADMIN","TENANT_ADMIN"] returned_value = have_claims(request.headers.get("Authorization"),accessible_roles) if returned_value["have_access"]: '''success = user_dao.delete_user(id) if success: return user_service.response() ''' return user_service.response(user_dao.update_user(have_claims, id, {"role":"", "tenantId": ""})) return user_service.response(status_code=403) @users_bp.route("/users/<id>", methods=["PUT"]) def update_tenant_admin(id): accessible_roles = ["SUPER_ADMIN", "TENANT_ADMIN", "USER"] returned_value = have_claims(request.headers.get("Authorization"),accessible_roles) if returned_value["have_access"]: data = request.json success = user_dao.update_user(returned_value, id, data) if success: return user_service.response(success,status_code=200) else: return user_service.response(status_code=500) return user_service.response(status_code=403) @users_bp.route("/users/<id>", methods=["GET"]) def get_one_user(id): accessible_roles = ["SUPER_ADMIN","TENANT_ADMIN", "USER"] returned_value = have_claims(request.headers.get("Authorization"),accessible_roles) if returned_value["have_access"]: return user_service.response(user_dao.get_one(returned_value, id)) else: return user_service.response(status_code=403)
[ "app.user_dao.user_dao.update_user", "app.services.user_service.response", "app.user_dao.user_dao.get_users", "flask.blueprints.Blueprint", "app.user_dao.user_dao.create_user", "app.user_dao.user_dao.get_one", "app.services.user_service.url_args_to_query_params_dict", "flask.request.headers.get" ]
[((275, 306), 'flask.blueprints.Blueprint', 'Blueprint', (['"""users_bp"""', '__name__'], {}), "('users_bp', __name__)\n", (284, 306), False, 'from flask.blueprints import Blueprint\n'), ((2040, 2078), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(403)'}), '(status_code=403)\n', (2061, 2078), False, 'from app.services import user_service\n'), ((2621, 2659), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(403)'}), '(status_code=403)\n', (2642, 2659), False, 'from app.services import user_service\n'), ((463, 499), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (482, 499), False, 'from flask import jsonify, request, make_response, render_template\n'), ((575, 638), 'app.services.user_service.url_args_to_query_params_dict', 'user_service.url_args_to_query_params_dict', (['request.args', '(False)'], {}), '(request.args, False)\n', (617, 638), False, 'from app.services import user_service\n'), ((668, 758), 'app.user_dao.user_dao.get_users', 'user_dao.get_users', (['returned_value', "params['filters']", "params['sort']", "params['range']"], {}), "(returned_value, params['filters'], params['sort'],\n params['range'])\n", (686, 758), False, 'from app.user_dao import user_dao\n'), ((840, 925), 'app.services.user_service.response', 'user_service.response', (["values_for_return['data']", "values_for_return['count']", '(200)'], {}), "(values_for_return['data'], values_for_return['count'],\n 200)\n", (861, 925), False, 'from app.services import user_service\n'), ((948, 986), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(403)'}), '(status_code=403)\n', (969, 986), False, 'from app.services import user_service\n'), ((1151, 1187), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (1170, 1187), False, 'from flask import jsonify, request, make_response, render_template\n'), ((1293, 1321), 'app.user_dao.user_dao.create_user', 'user_dao.create_user', ([], {}), '(**data)\n', (1313, 1321), False, 'from app.user_dao import user_dao\n'), ((1511, 1549), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(403)'}), '(status_code=403)\n', (1532, 1549), False, 'from app.services import user_service\n'), ((1714, 1750), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (1733, 1750), False, 'from flask import jsonify, request, make_response, render_template\n'), ((2258, 2294), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (2277, 2294), False, 'from flask import jsonify, request, make_response, render_template\n'), ((2400, 2446), 'app.user_dao.user_dao.update_user', 'user_dao.update_user', (['returned_value', 'id', 'data'], {}), '(returned_value, id, data)\n', (2420, 2446), False, 'from app.user_dao import user_dao\n'), ((2826, 2862), 'flask.request.headers.get', 'request.headers.get', (['"""Authorization"""'], {}), "('Authorization')\n", (2845, 2862), False, 'from flask import jsonify, request, make_response, render_template\n'), ((3019, 3057), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(403)'}), '(status_code=403)\n', (3040, 3057), False, 'from app.services import user_service\n'), ((1363, 1410), 'app.services.user_service.response', 'user_service.response', (['success'], {'status_code': '(200)'}), '(success, status_code=200)\n', (1384, 1410), False, 'from app.services import user_service\n'), ((1445, 1483), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(500)'}), '(status_code=500)\n', (1466, 1483), False, 'from app.services import user_service\n'), ((1961, 2028), 'app.user_dao.user_dao.update_user', 'user_dao.update_user', (['have_claims', 'id', "{'role': '', 'tenantId': ''}"], {}), "(have_claims, id, {'role': '', 'tenantId': ''})\n", (1981, 2028), False, 'from app.user_dao import user_dao\n'), ((2488, 2535), 'app.services.user_service.response', 'user_service.response', (['success'], {'status_code': '(200)'}), '(success, status_code=200)\n', (2509, 2535), False, 'from app.services import user_service\n'), ((2570, 2608), 'app.services.user_service.response', 'user_service.response', ([], {'status_code': '(500)'}), '(status_code=500)\n', (2591, 2608), False, 'from app.services import user_service\n'), ((2956, 2992), 'app.user_dao.user_dao.get_one', 'user_dao.get_one', (['returned_value', 'id'], {}), '(returned_value, id)\n', (2972, 2992), False, 'from app.user_dao import user_dao\n')]
#/usr/bin/env python import codecs import os import sys from setuptools import setup, find_packages if 'publish' in sys.argv: os.system('python setup.py sdist upload') sys.exit() read = lambda filepath: codecs.open(filepath, 'r', 'utf-8').read() # Dynamically calculate the version based on redator.VERSION. version = __import__('redator').get_version() setup( name='django-redator', version=version, description=( 'Django Redator (sic) is a application for the Django Web Framework to ' 'help you integrate Redactor <http://imperavi.com/redactor/>, a ' 'beautiful and easy-to-use WYSIWYG HTML editor, into your projects.' ), long_description=read(os.path.join(os.path.dirname(__file__), 'README.rst')), keywords = 'django app wysiwyg editor redactor', author='<NAME>, <NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', license='BSD License', url='https://bitbucket.org/semente/django-redator/', download_url='https://bitbucket.org/semente/django-redator/downloads/', packages=find_packages(), zip_safe=False, include_package_data=True, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
[ "setuptools.find_packages", "os.path.dirname", "sys.exit", "os.system", "codecs.open" ]
[((134, 175), 'os.system', 'os.system', (['"""python setup.py sdist upload"""'], {}), "('python setup.py sdist upload')\n", (143, 175), False, 'import os\n'), ((180, 190), 'sys.exit', 'sys.exit', ([], {}), '()\n', (188, 190), False, 'import sys\n'), ((1104, 1119), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1117, 1119), False, 'from setuptools import setup, find_packages\n'), ((216, 251), 'codecs.open', 'codecs.open', (['filepath', '"""r"""', '"""utf-8"""'], {}), "(filepath, 'r', 'utf-8')\n", (227, 251), False, 'import codecs\n'), ((721, 746), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (736, 746), False, 'import os\n')]
"""Evaluate outcome (+CATE) of datasets""" from scipy.stats import spearmanr import numpy as np def safe_spearmanr(arr_a, arr_b): "Compute the spearman-R correlation, but 0 if all equal" if np.all(arr_a[0] == arr_a) or np.all(arr_b[0] == arr_b): return 0 return spearmanr(arr_a, arr_b).correlation def evaluate_outcome(model, dataset, k=5, n=1): """Evaluate the outcome of a model with a dataset Arguments --------- model: BaseModel model to be trained and evaluated on the dataset. dataset: BaseDataset Dataset on which the model is evaluated. k: int Number of folds n: int Number of iterations to evaluate over """ results = [] for _ in range(n): for train_data, test_data in dataset.kfolds(k=k): model.train(model.preprocess(train_data.standard_df)) model_outcome = model.predict_outcome(test_data.standard_df) if (np.all(model_outcome == model_outcome[0]) or np.all(test_data.outcome == test_data.outcome[0])): corr = 0 else: corr = spearmanr(model_outcome, test_data.outcome).correlation results.append(corr) return results def evaluate_performance(model, dataset, k=5, n=1): """Evaluate the outcome + CATE of a model with a dataset Arguments --------- model: BaseModel model to be trained and evaluated on the dataset. dataset: BaseDataset Dataset on which the model is evaluated. k: int Number of folds n: int Number of iterations to evaluate over """ cate_corr = [] outcome_corr = [] for _ in range(n): for train_data, test_data in dataset.kfolds(k=k): model.train(model.preprocess(train_data.standard_df)) test_df = test_data.standard_df cate = model.predict_cate(test_df) outcome = model.predict_outcome(test_df) cate_corr.append(safe_spearmanr(cate, test_data.cate)) outcome_corr.append(safe_spearmanr(outcome, test_data.outcome)) return cate_corr, outcome_corr
[ "scipy.stats.spearmanr", "numpy.all" ]
[((201, 226), 'numpy.all', 'np.all', (['(arr_a[0] == arr_a)'], {}), '(arr_a[0] == arr_a)\n', (207, 226), True, 'import numpy as np\n'), ((230, 255), 'numpy.all', 'np.all', (['(arr_b[0] == arr_b)'], {}), '(arr_b[0] == arr_b)\n', (236, 255), True, 'import numpy as np\n'), ((285, 308), 'scipy.stats.spearmanr', 'spearmanr', (['arr_a', 'arr_b'], {}), '(arr_a, arr_b)\n', (294, 308), False, 'from scipy.stats import spearmanr\n'), ((960, 1001), 'numpy.all', 'np.all', (['(model_outcome == model_outcome[0])'], {}), '(model_outcome == model_outcome[0])\n', (966, 1001), True, 'import numpy as np\n'), ((1025, 1074), 'numpy.all', 'np.all', (['(test_data.outcome == test_data.outcome[0])'], {}), '(test_data.outcome == test_data.outcome[0])\n', (1031, 1074), True, 'import numpy as np\n'), ((1143, 1186), 'scipy.stats.spearmanr', 'spearmanr', (['model_outcome', 'test_data.outcome'], {}), '(model_outcome, test_data.outcome)\n', (1152, 1186), False, 'from scipy.stats import spearmanr\n')]
import dht class DHT22Sensor: provides = ["temperature", "humidity"] def __init__(self, port): self._sensor = dht.DHT22(port) def readout(self): self._sensor.measure() return {"temperature": self._sensor.temperature(), "humidity": self._sensor.humidity()}
[ "dht.DHT22" ]
[((129, 144), 'dht.DHT22', 'dht.DHT22', (['port'], {}), '(port)\n', (138, 144), False, 'import dht\n')]
# -*- coding: utf-8 -*- """ Breadcrumb resolving ==================== """ try: from django.core.urlresolvers import Resolver404, get_resolver except ImportError: from django.urls import Resolver404, get_resolver from autobreadcrumbs.registry import breadcrumbs_registry class BreadcrumbRessource(object): """ Simple crumb ressource model to contain all datas about a ressource. """ def __init__(self, path, name, title, view_args, view_kwargs, link_type_settings={}): self.path = path self.name = name self.title = title self.view_args = view_args self.view_kwargs = view_kwargs self.link_type_settings = link_type_settings def __repr__(self): return "<BreadcrumbRessource: {0}>".format(self.name) def __str__(self): # NOTE: should be __unicode__() because passed paths can be unicode... # right ? return self.path class PathBreadcrumbResolver(object): """ Resolve given path as breadcrumbs Arguments: root_urlconf (string): Python path to an url conf file, usually ``settings.ROOT_URLCONF``. It will be used as the url map to resolve every given path. """ def __init__(self, root_urlconf): self.urlresolver = get_resolver(root_urlconf) def cut(self, path): """ Cut given path into segments Arguments: path (string): An url path like ``/foo/bar/``. Returns: list: List of path segments, each segment is a part of the url path starting from ``/`` and ending on the full path. Such as for ``/foo/bar/`` segments will be: :: - / - /foo - /foo/bar """ # Cut the path in segments segments = ['/'] tmp = '/' for item in path.split('/'): if item: tmp += item + '/' segments.append(tmp) return segments def format_title(self, value): """ Manage title format Arguments: name (string): Url name. value (string): Crumb value. Keyword Arguments: request (django.http.request.HttpRequest): Optional Django request object used with custom crumb function. If not given, crumb functions is ignored (so the crumb ressource still be available). Returns: string: Crumb title. """ title = value if value is None: return None # Force unicode on lazy translation else it will trigger an exception # with templates if hasattr(value, '_proxy____unicode_cast'): title = unicode(value) return title def get_current(self, elements): """ Return current Breadcrumb from elements. This is pretty simple as the current element is allways the last element (if element list is not empty). Arguments: elements (list): List of breadcrumb elements. Returns: BreadcrumbRessource or None: The last element from given ``elements`` if any, else None. """ if len(elements) > 0: return elements[-1] return None def resolve(self, path, request=None): """ Return resolved breadcrumbs datas from given path. Cut the path in segments and check each of them to find breadcrumb details if any. Crumb value can be a simple string, a Django lazy unicode or a tuple ``(title, custom_function)``. Crumb ``custom_function`` take url name and request object as arguments and will return ``False`` to ignore crumb (won't be in breadcrumbs) or ``True`` to keep crumb element. Arguments: path (string): An url path like ``/foo/bar/``. Keyword Arguments: request (django.http.request.HttpRequest): Optional Django request object used with custom crumb function. If not given, crumb functions will be ignored (so the crumb ressources still be available). Returns: Dict: Datas from resolved crumbs: * ``autobreadcrumbs_elements``: Resolved bread crumbs for each segment; * ``autobreadcrumbs_current``: Breadcrumb for current (the last one) path. """ breadcrumbs_elements = [] link_type_settings = {} path_segments = self.cut(path) # Resolve each segment for seg in path_segments: try: resolved = self.urlresolver.resolve(seg) except Resolver404: pass else: view_control = None namespace = resolved.namespace title = name = resolved.url_name if not name: continue if namespace: name = ':'.join([namespace, name]) # Ignore ressource without a crumb title if not breadcrumbs_registry.has_title(name): continue # Get defined title title = breadcrumbs_registry.get_title(name) # Custom function usage if isinstance(title, tuple) or isinstance(title, list): title, link_type_settings = title title = self.format_title(title) # Ignore element if empty if title is None: continue # Finally append the part to the knowed crumbs list breadcrumbs_elements.append( BreadcrumbRessource(seg, name, title, resolved.args, resolved.kwargs, link_type_settings) ) return { 'autobreadcrumbs_elements': breadcrumbs_elements, 'autobreadcrumbs_current': self.get_current(breadcrumbs_elements), }
[ "django.urls.get_resolver", "autobreadcrumbs.registry.breadcrumbs_registry.get_title", "autobreadcrumbs.registry.breadcrumbs_registry.has_title" ]
[((1291, 1317), 'django.urls.get_resolver', 'get_resolver', (['root_urlconf'], {}), '(root_urlconf)\n', (1303, 1317), False, 'from django.urls import Resolver404, get_resolver\n'), ((5314, 5350), 'autobreadcrumbs.registry.breadcrumbs_registry.get_title', 'breadcrumbs_registry.get_title', (['name'], {}), '(name)\n', (5344, 5350), False, 'from autobreadcrumbs.registry import breadcrumbs_registry\n'), ((5186, 5222), 'autobreadcrumbs.registry.breadcrumbs_registry.has_title', 'breadcrumbs_registry.has_title', (['name'], {}), '(name)\n', (5216, 5222), False, 'from autobreadcrumbs.registry import breadcrumbs_registry\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- from BlockDataWrapper import BlockDataWrapper from BlockInputsWrapper import BlockInputsWrapper from BlockLinkerWrapper import BlockLinkerWrapper from BlockRandomWrapper import BlockRandomWrapper from BlockVoterWrapper import BlockVoterWrapper from BlockForwardWrapper import BlockForwardWrapper from BlockDistributeWrapper import BlockDistributeWrapper from BlockTriggerWrapper import BlockTriggerWrapper from BlockWriterWrapper import BlockWriterWrapper class SpellbookWrapper(): def __init__(self, url='http://bitcoinspellbook.appspot.com'): self.url = url def blockdata(self): return BlockDataWrapper(self.url) def blockinputs(self): return BlockInputsWrapper(self.url) def blocklinker(self): return BlockLinkerWrapper(self.url) def blockrandom(self): return BlockRandomWrapper(self.url) def blockvoter(self): return BlockVoterWrapper(self.url) def blockforward(self): return BlockForwardWrapper(self.url) def blockdistribute(self): return BlockDistributeWrapper(self.url) def blocktrigger(self): return BlockTriggerWrapper(self.url) def blockwriter(self): return BlockWriterWrapper(self.url)
[ "BlockDataWrapper.BlockDataWrapper", "BlockRandomWrapper.BlockRandomWrapper", "BlockVoterWrapper.BlockVoterWrapper", "BlockForwardWrapper.BlockForwardWrapper", "BlockDistributeWrapper.BlockDistributeWrapper", "BlockTriggerWrapper.BlockTriggerWrapper", "BlockLinkerWrapper.BlockLinkerWrapper", "BlockInp...
[((664, 690), 'BlockDataWrapper.BlockDataWrapper', 'BlockDataWrapper', (['self.url'], {}), '(self.url)\n', (680, 690), False, 'from BlockDataWrapper import BlockDataWrapper\n'), ((734, 762), 'BlockInputsWrapper.BlockInputsWrapper', 'BlockInputsWrapper', (['self.url'], {}), '(self.url)\n', (752, 762), False, 'from BlockInputsWrapper import BlockInputsWrapper\n'), ((806, 834), 'BlockLinkerWrapper.BlockLinkerWrapper', 'BlockLinkerWrapper', (['self.url'], {}), '(self.url)\n', (824, 834), False, 'from BlockLinkerWrapper import BlockLinkerWrapper\n'), ((878, 906), 'BlockRandomWrapper.BlockRandomWrapper', 'BlockRandomWrapper', (['self.url'], {}), '(self.url)\n', (896, 906), False, 'from BlockRandomWrapper import BlockRandomWrapper\n'), ((949, 976), 'BlockVoterWrapper.BlockVoterWrapper', 'BlockVoterWrapper', (['self.url'], {}), '(self.url)\n', (966, 976), False, 'from BlockVoterWrapper import BlockVoterWrapper\n'), ((1021, 1050), 'BlockForwardWrapper.BlockForwardWrapper', 'BlockForwardWrapper', (['self.url'], {}), '(self.url)\n', (1040, 1050), False, 'from BlockForwardWrapper import BlockForwardWrapper\n'), ((1098, 1130), 'BlockDistributeWrapper.BlockDistributeWrapper', 'BlockDistributeWrapper', (['self.url'], {}), '(self.url)\n', (1120, 1130), False, 'from BlockDistributeWrapper import BlockDistributeWrapper\n'), ((1175, 1204), 'BlockTriggerWrapper.BlockTriggerWrapper', 'BlockTriggerWrapper', (['self.url'], {}), '(self.url)\n', (1194, 1204), False, 'from BlockTriggerWrapper import BlockTriggerWrapper\n'), ((1248, 1276), 'BlockWriterWrapper.BlockWriterWrapper', 'BlockWriterWrapper', (['self.url'], {}), '(self.url)\n', (1266, 1276), False, 'from BlockWriterWrapper import BlockWriterWrapper\n')]
#!/usr/bin/python __author__ = 'kilroy' # (c) 2014, WasHere Consulting, Inc. # Written for Infinite Skills # need pycrypto package from Crypto.Cipher import AES # need PIL and stepic packages import Image, stepic import binascii # key has to be 16, 24 or 32 bytes long cryptObj = AES.new("This is my key42", AES.MODE_CBC, "16 character vec") # notice the spaces -- that's to pad it out to a multiple of 16 bytes plaintext = "This is some text we need to encrypt because it's very secret " ciphertext = cryptObj.encrypt(plaintext) # we need to convert to ASCII to store it nicely binval = binascii.b2a_base64(ciphertext) i = Image.open("bullpuppies.jpg") print("ASCII: ", binval) stego = stepic.encode(i, binval) stego.save("stegencrypt.bmp", "BMP") newim = Image.open("stegencrypt.bmp") data = stepic.decode(newim).rstrip('\n') print("What we have out: ", data) # convert from ASCII back to binary encrypted = binascii.a2b_base64(data) newcryptObj = AES.new("This is my key42", AES.MODE_CBC, "16 character vec") result = newcryptObj.decrypt(encrypted) print(result)
[ "binascii.b2a_base64", "Image.open", "Crypto.Cipher.AES.new", "binascii.a2b_base64", "stepic.decode", "stepic.encode" ]
[((284, 345), 'Crypto.Cipher.AES.new', 'AES.new', (['"""This is my key42"""', 'AES.MODE_CBC', '"""16 character vec"""'], {}), "('This is my key42', AES.MODE_CBC, '16 character vec')\n", (291, 345), False, 'from Crypto.Cipher import AES\n'), ((597, 628), 'binascii.b2a_base64', 'binascii.b2a_base64', (['ciphertext'], {}), '(ciphertext)\n', (616, 628), False, 'import binascii\n'), ((633, 662), 'Image.open', 'Image.open', (['"""bullpuppies.jpg"""'], {}), "('bullpuppies.jpg')\n", (643, 662), False, 'import Image, stepic\n'), ((697, 721), 'stepic.encode', 'stepic.encode', (['i', 'binval'], {}), '(i, binval)\n', (710, 721), False, 'import Image, stepic\n'), ((768, 797), 'Image.open', 'Image.open', (['"""stegencrypt.bmp"""'], {}), "('stegencrypt.bmp')\n", (778, 797), False, 'import Image, stepic\n'), ((923, 948), 'binascii.a2b_base64', 'binascii.a2b_base64', (['data'], {}), '(data)\n', (942, 948), False, 'import binascii\n'), ((964, 1025), 'Crypto.Cipher.AES.new', 'AES.new', (['"""This is my key42"""', 'AES.MODE_CBC', '"""16 character vec"""'], {}), "('This is my key42', AES.MODE_CBC, '16 character vec')\n", (971, 1025), False, 'from Crypto.Cipher import AES\n'), ((805, 825), 'stepic.decode', 'stepic.decode', (['newim'], {}), '(newim)\n', (818, 825), False, 'import Image, stepic\n')]
"""Finance Database view""" __docformat__ = "numpy" import os import pandas as pd from tabulate import tabulate from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal.etf import financedatabase_model from gamestonk_terminal.helper_funcs import export_data def display_etf_by_name( name: str, limit: int, export: str = "", ): """Display a selection of ETFs based on name filtered by total assets. [Source: Finance Database] Parameters ---------- name: str Search by name to find ETFs matching the criteria. limit: int Limit of ETFs to display export: str Type of format to export data """ data = financedatabase_model.get_etfs_by_name(name) if not data: print("No data was found with that name\n") return tabulate_data = pd.DataFrame(data).T[ ["long_name", "family", "category", "total_assets"] ] tabulate_data_sorted = tabulate_data.sort_values(by="total_assets", ascending=False) tabulate_data_sorted["total_assets"] = tabulate_data_sorted["total_assets"] / 1e6 if gtff.USE_TABULATE_DF: print( tabulate( tabulate_data_sorted.iloc[:limit], showindex=True, headers=["Name", "Family", "Category", "Total Assets [M]"], floatfmt=".2f", tablefmt="fancy_grid", ), "\n", ) else: print(tabulate_data_sorted.iloc[:limit].to_string(), "\n") export_data(export, os.path.dirname(os.path.abspath(__file__)), "ln_fd", data) def display_etf_by_description( description: str, limit: int, export: str = "", ): """Display a selection of ETFs based on description filtered by total assets. [Source: Finance Database] Parameters ---------- description: str Search by description to find ETFs matching the criteria. limit: int Limit of ETFs to display export: str Type of format to export data """ data = financedatabase_model.get_etfs_by_description(description) if not data: print("No data was found with that description\n") return tabulate_data = pd.DataFrame(data).T[ ["long_name", "family", "category", "total_assets"] ] tabulate_data_sorted = tabulate_data.sort_values(by="total_assets", ascending=False) tabulate_data_sorted["total_assets"] = tabulate_data_sorted["total_assets"] / 1e6 if gtff.USE_TABULATE_DF: print( tabulate( tabulate_data_sorted.iloc[:limit], showindex=True, headers=["Name", "Family", "Category", "Total Assets [M]"], floatfmt=".2f", tablefmt="fancy_grid", ), "\n", ) else: print(tabulate_data_sorted.iloc[:limit].to_string(), "\n") export_data(export, os.path.dirname(os.path.abspath(__file__)), "ld", data) def display_etf_by_category( category: str, limit: int, export: str = "", ): """Display a selection of ETFs based on a category filtered by total assets. [Source: Finance Database] Parameters ---------- description: str Search by description to find ETFs matching the criteria. limit: int Limit of ETFs to display export: str Type of format to export data """ data = financedatabase_model.get_etfs_by_category(category) if not data: print("No data was found on that category\n") return tabulate_data = pd.DataFrame(data).T[ ["long_name", "family", "category", "total_assets"] ] tabulate_data_sorted = tabulate_data.sort_values(by="total_assets", ascending=False) tabulate_data_sorted["total_assets"] = tabulate_data_sorted["total_assets"] / 1e6 if gtff.USE_TABULATE_DF: print( tabulate( tabulate_data_sorted.iloc[:limit], showindex=True, headers=["Name", "Family", "Category", "Total Assets [M]"], floatfmt=".2f", tablefmt="fancy_grid", ), "\n", ) else: print(tabulate_data_sorted.iloc[:limit].to_string(), "\n") export_data( export, os.path.join(os.path.dirname(os.path.abspath(__file__)), "screener"), "sbc", data, )
[ "gamestonk_terminal.etf.financedatabase_model.get_etfs_by_description", "tabulate.tabulate", "gamestonk_terminal.etf.financedatabase_model.get_etfs_by_category", "gamestonk_terminal.etf.financedatabase_model.get_etfs_by_name", "pandas.DataFrame", "os.path.abspath" ]
[((689, 733), 'gamestonk_terminal.etf.financedatabase_model.get_etfs_by_name', 'financedatabase_model.get_etfs_by_name', (['name'], {}), '(name)\n', (727, 733), False, 'from gamestonk_terminal.etf import financedatabase_model\n'), ((2048, 2106), 'gamestonk_terminal.etf.financedatabase_model.get_etfs_by_description', 'financedatabase_model.get_etfs_by_description', (['description'], {}), '(description)\n', (2093, 2106), False, 'from gamestonk_terminal.etf import financedatabase_model\n'), ((3418, 3470), 'gamestonk_terminal.etf.financedatabase_model.get_etfs_by_category', 'financedatabase_model.get_etfs_by_category', (['category'], {}), '(category)\n', (3460, 3470), False, 'from gamestonk_terminal.etf import financedatabase_model\n'), ((839, 857), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (851, 857), True, 'import pandas as pd\n'), ((1159, 1326), 'tabulate.tabulate', 'tabulate', (['tabulate_data_sorted.iloc[:limit]'], {'showindex': '(True)', 'headers': "['Name', 'Family', 'Category', 'Total Assets [M]']", 'floatfmt': '""".2f"""', 'tablefmt': '"""fancy_grid"""'}), "(tabulate_data_sorted.iloc[:limit], showindex=True, headers=['Name',\n 'Family', 'Category', 'Total Assets [M]'], floatfmt='.2f', tablefmt=\n 'fancy_grid')\n", (1167, 1326), False, 'from tabulate import tabulate\n'), ((1560, 1585), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1575, 1585), False, 'import os\n'), ((2219, 2237), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2231, 2237), True, 'import pandas as pd\n'), ((2539, 2706), 'tabulate.tabulate', 'tabulate', (['tabulate_data_sorted.iloc[:limit]'], {'showindex': '(True)', 'headers': "['Name', 'Family', 'Category', 'Total Assets [M]']", 'floatfmt': '""".2f"""', 'tablefmt': '"""fancy_grid"""'}), "(tabulate_data_sorted.iloc[:limit], showindex=True, headers=['Name',\n 'Family', 'Category', 'Total Assets [M]'], floatfmt='.2f', tablefmt=\n 'fancy_grid')\n", (2547, 2706), False, 'from tabulate import tabulate\n'), ((2940, 2965), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2955, 2965), False, 'import os\n'), ((3578, 3596), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3590, 3596), True, 'import pandas as pd\n'), ((3898, 4065), 'tabulate.tabulate', 'tabulate', (['tabulate_data_sorted.iloc[:limit]'], {'showindex': '(True)', 'headers': "['Name', 'Family', 'Category', 'Total Assets [M]']", 'floatfmt': '""".2f"""', 'tablefmt': '"""fancy_grid"""'}), "(tabulate_data_sorted.iloc[:limit], showindex=True, headers=['Name',\n 'Family', 'Category', 'Total Assets [M]'], floatfmt='.2f', tablefmt=\n 'fancy_grid')\n", (3906, 4065), False, 'from tabulate import tabulate\n'), ((4329, 4354), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4344, 4354), False, 'import os\n')]
import random import torch import numpy as np import time import os from model.net import Net from model.loss import Loss from torch.autograd import Variable import itertools import pandas as pd from main.dataset import LunaDataSet from torch.utils.data import DataLoader from configs import VAL_PCT, TOTAL_EPOCHS, DEFAULT_LR, OUTPUT_PATH from glob import glob def get_lr(epoch): if epoch <= TOTAL_EPOCHS * 0.5: lr = DEFAULT_LR elif epoch <= TOTAL_EPOCHS * 0.8: lr = 0.1 * DEFAULT_LR else: lr = 0.01 * DEFAULT_LR return lr def train(data_loader, net, loss, epoch, optimizer, get_lr, save_dir='./models/'): print("****************training:*******************") start_time = time.time() net.train() lr = get_lr(epoch) for param_group in optimizer.param_groups: param_group['lr'] = lr metrics = [] for i, (data, target, coord) in enumerate(data_loader): if torch.cuda.is_available(): data = Variable(data.cuda()) target = Variable(target.cuda()) coord = Variable(coord.cuda()) data = data.float() target = target.float() coord = coord.float() output = net(data, coord) loss_output = loss(output, target) optimizer.zero_grad() loss_output[0].backward() optimizer.step() loss_output[0] = loss_output[0].item() metrics.append(loss_output) break metrics = np.asarray(metrics, np.float32) if epoch % 10 == 0: net_state_dict = net.state_dict() for key in net_state_dict.keys(): net_state_dict[key] = net_state_dict[key].cpu() torch.save({ 'epoch': epoch, 'save_dir': save_dir, 'model_state_dict': net_state_dict, 'optimizer_state_dict': optimizer.state_dict(), 'loss': np.mean(metrics[:, 0])}, os.path.join(save_dir, f'''{epoch}.ckpt''')) end_time = time.time() print(f'''Epoch {epoch} (lr {lr})''') print(f'''Train: tpr {100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7])}, tnr {100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9])}, total pos {np.sum(metrics[:, 7])}, total neg {np.sum(metrics[:, 9])}, time {end_time - start_time}''') print(f'''loss {np.mean(metrics[:, 0])}, classify loss {np.mean(metrics[:, 1])}, regress loss {np.mean(metrics[:, 2])}, {np.mean(metrics[:, 3])}, {np.mean(metrics[:, 4])}, {np.mean(metrics[:, 5])}''') def validate(data_loader, net, loss): print("****************validation:*******************") start_time = time.time() net.eval() metrics = [] for i, (data, target, coord) in enumerate(data_loader): if torch.cuda.is_available(): data = Variable(data.cuda()) target = Variable(target.cuda()) coord = Variable(coord.cuda()) data = data.float() target = target.float() coord = coord.float() output = net(data, coord) loss_output = loss(output, target, train=False) loss_output[0] = loss_output[0].item() metrics.append(loss_output) break end_time = time.time() metrics = np.asarray(metrics, np.float32) print(f'''time {end_time - start_time}''') print(f'''loss {np.mean(metrics[:, 0])}, classify loss {np.mean(metrics[:, 1])}, regress loss {np.mean(metrics[:, 2])}, {np.mean(metrics[:, 3])}, {np.mean(metrics[:, 4])}, {np.mean(metrics[:, 5])}''') def run(load_last_checkpoint=False): save_dir = f'{OUTPUT_PATH}/models/' os.makedirs(save_dir, exist_ok=True) neural_net = Net() loss_fn = Loss() optim = torch.optim.SGD(neural_net.parameters(), DEFAULT_LR, momentum=0.9, weight_decay=1e-4) starting_epoch = 0 initial_loss = None if load_last_checkpoint: model_paths = glob(f'''{save_dir}*.ckpt''') model_names = [int(i.split('/')[-1][:-5]) for i in model_paths] latest_model_path = f'''{save_dir}{max(model_names)}.ckpt''' print('loading latest model from:', latest_model_path) checkpoint = torch.load(latest_model_path) neural_net.load_state_dict(checkpoint['model_state_dict']) optim.load_state_dict(checkpoint['optimizer_state_dict']) starting_epoch = checkpoint['epoch'] initial_loss = checkpoint['loss'] if torch.cuda.is_available(): neural_net = neural_net.cuda() loss_fn = loss_fn.cuda() print(f'''Training from epoch: {starting_epoch} towards: {TOTAL_EPOCHS}, with learning rate starting from: {get_lr(starting_epoch)}, and loss: {initial_loss}''') meta = pd.read_csv(f'{OUTPUT_PATH}/augmented_meta.csv', index_col=0).sample(frac=1).reset_index(drop=True) meta_group_by_series = meta.groupby(['seriesuid']).indices list_of_groups = [{i: list(meta_group_by_series[i])} for i in meta_group_by_series.keys()] random.Random(0).shuffle(list_of_groups) val_split = int(VAL_PCT * len(list_of_groups)) val_indices = list(itertools.chain(*[list(i.values())[0] for i in list_of_groups[:val_split]])) train_indices = list(itertools.chain(*[list(i.values())[0] for i in list_of_groups[val_split:]])) ltd = LunaDataSet(train_indices, meta) lvd = LunaDataSet(val_indices, meta) train_loader = DataLoader(ltd, batch_size=1, shuffle=False) val_loader = DataLoader(lvd, batch_size=1, shuffle=False) for ep in range(starting_epoch, TOTAL_EPOCHS): train(train_loader, neural_net, loss_fn, ep, optim, get_lr, save_dir=save_dir) validate(train_loader, neural_net, loss_fn) if __name__ == '__main__': run(load_last_checkpoint=False)
[ "numpy.mean", "os.makedirs", "torch.utils.data.DataLoader", "random.Random", "pandas.read_csv", "torch.load", "numpy.asarray", "os.path.join", "model.net.Net", "numpy.sum", "torch.cuda.is_available", "model.loss.Loss", "time.time", "glob.glob", "main.dataset.LunaDataSet" ]
[((726, 737), 'time.time', 'time.time', ([], {}), '()\n', (735, 737), False, 'import time\n'), ((1470, 1501), 'numpy.asarray', 'np.asarray', (['metrics', 'np.float32'], {}), '(metrics, np.float32)\n', (1480, 1501), True, 'import numpy as np\n'), ((1967, 1978), 'time.time', 'time.time', ([], {}), '()\n', (1976, 1978), False, 'import time\n'), ((2589, 2600), 'time.time', 'time.time', ([], {}), '()\n', (2598, 2600), False, 'import time\n'), ((3156, 3167), 'time.time', 'time.time', ([], {}), '()\n', (3165, 3167), False, 'import time\n'), ((3183, 3214), 'numpy.asarray', 'np.asarray', (['metrics', 'np.float32'], {}), '(metrics, np.float32)\n', (3193, 3214), True, 'import numpy as np\n'), ((3550, 3586), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (3561, 3586), False, 'import os\n'), ((3604, 3609), 'model.net.Net', 'Net', ([], {}), '()\n', (3607, 3609), False, 'from model.net import Net\n'), ((3624, 3630), 'model.loss.Loss', 'Loss', ([], {}), '()\n', (3628, 3630), False, 'from model.loss import Loss\n'), ((4339, 4364), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4362, 4364), False, 'import torch\n'), ((5181, 5213), 'main.dataset.LunaDataSet', 'LunaDataSet', (['train_indices', 'meta'], {}), '(train_indices, meta)\n', (5192, 5213), False, 'from main.dataset import LunaDataSet\n'), ((5224, 5254), 'main.dataset.LunaDataSet', 'LunaDataSet', (['val_indices', 'meta'], {}), '(val_indices, meta)\n', (5235, 5254), False, 'from main.dataset import LunaDataSet\n'), ((5274, 5318), 'torch.utils.data.DataLoader', 'DataLoader', (['ltd'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(ltd, batch_size=1, shuffle=False)\n', (5284, 5318), False, 'from torch.utils.data import DataLoader\n'), ((5336, 5380), 'torch.utils.data.DataLoader', 'DataLoader', (['lvd'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(lvd, batch_size=1, shuffle=False)\n', (5346, 5380), False, 'from torch.utils.data import DataLoader\n'), ((945, 970), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (968, 970), False, 'import torch\n'), ((2706, 2731), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2729, 2731), False, 'import torch\n'), ((3827, 3852), 'glob.glob', 'glob', (['f"""{save_dir}*.ckpt"""'], {}), "(f'{save_dir}*.ckpt')\n", (3831, 3852), False, 'from glob import glob\n'), ((4082, 4111), 'torch.load', 'torch.load', (['latest_model_path'], {}), '(latest_model_path)\n', (4092, 4111), False, 'import torch\n'), ((1906, 1945), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{epoch}.ckpt"""'], {}), "(save_dir, f'{epoch}.ckpt')\n", (1918, 1945), False, 'import os\n'), ((4877, 4893), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (4890, 4893), False, 'import random\n'), ((1881, 1903), 'numpy.mean', 'np.mean', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (1888, 1903), True, 'import numpy as np\n'), ((2186, 2207), 'numpy.sum', 'np.sum', (['metrics[:, 7]'], {}), '(metrics[:, 7])\n', (2192, 2207), True, 'import numpy as np\n'), ((2221, 2242), 'numpy.sum', 'np.sum', (['metrics[:, 9]'], {}), '(metrics[:, 9])\n', (2227, 2242), True, 'import numpy as np\n'), ((2287, 2309), 'numpy.mean', 'np.mean', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (2294, 2309), True, 'import numpy as np\n'), ((2327, 2349), 'numpy.mean', 'np.mean', (['metrics[:, 1]'], {}), '(metrics[:, 1])\n', (2334, 2349), True, 'import numpy as np\n'), ((2377, 2399), 'numpy.mean', 'np.mean', (['metrics[:, 2]'], {}), '(metrics[:, 2])\n', (2384, 2399), True, 'import numpy as np\n'), ((2403, 2425), 'numpy.mean', 'np.mean', (['metrics[:, 3]'], {}), '(metrics[:, 3])\n', (2410, 2425), True, 'import numpy as np\n'), ((2429, 2451), 'numpy.mean', 'np.mean', (['metrics[:, 4]'], {}), '(metrics[:, 4])\n', (2436, 2451), True, 'import numpy as np\n'), ((2455, 2477), 'numpy.mean', 'np.mean', (['metrics[:, 5]'], {}), '(metrics[:, 5])\n', (2462, 2477), True, 'import numpy as np\n'), ((3282, 3304), 'numpy.mean', 'np.mean', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (3289, 3304), True, 'import numpy as np\n'), ((3322, 3344), 'numpy.mean', 'np.mean', (['metrics[:, 1]'], {}), '(metrics[:, 1])\n', (3329, 3344), True, 'import numpy as np\n'), ((3372, 3394), 'numpy.mean', 'np.mean', (['metrics[:, 2]'], {}), '(metrics[:, 2])\n', (3379, 3394), True, 'import numpy as np\n'), ((3398, 3420), 'numpy.mean', 'np.mean', (['metrics[:, 3]'], {}), '(metrics[:, 3])\n', (3405, 3420), True, 'import numpy as np\n'), ((3424, 3446), 'numpy.mean', 'np.mean', (['metrics[:, 4]'], {}), '(metrics[:, 4])\n', (3431, 3446), True, 'import numpy as np\n'), ((3450, 3472), 'numpy.mean', 'np.mean', (['metrics[:, 5]'], {}), '(metrics[:, 5])\n', (3457, 3472), True, 'import numpy as np\n'), ((2079, 2100), 'numpy.sum', 'np.sum', (['metrics[:, 7]'], {}), '(metrics[:, 7])\n', (2085, 2100), True, 'import numpy as np\n'), ((2151, 2172), 'numpy.sum', 'np.sum', (['metrics[:, 9]'], {}), '(metrics[:, 9])\n', (2157, 2172), True, 'import numpy as np\n'), ((4615, 4676), 'pandas.read_csv', 'pd.read_csv', (['f"""{OUTPUT_PATH}/augmented_meta.csv"""'], {'index_col': '(0)'}), "(f'{OUTPUT_PATH}/augmented_meta.csv', index_col=0)\n", (4626, 4676), True, 'import pandas as pd\n'), ((2055, 2076), 'numpy.sum', 'np.sum', (['metrics[:, 6]'], {}), '(metrics[:, 6])\n', (2061, 2076), True, 'import numpy as np\n'), ((2127, 2148), 'numpy.sum', 'np.sum', (['metrics[:, 8]'], {}), '(metrics[:, 8])\n', (2133, 2148), True, 'import numpy as np\n')]
import json from typing import Any, Dict import requests def lambda_handler(event: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]: print(f"request: {json.dumps(event)}") uuid_message = get_uuid_message() return { "statusCode": 200, "headers": {"Content-Type": "text/plain"}, "body": uuid_message, } def get_uuid_message() -> str: response = requests.get("https://httpbin.org/uuid") uuid = response.json()["uuid"] return f"Hello, CDK! Here is your UUID: {uuid}"
[ "json.dumps", "requests.get" ]
[((402, 442), 'requests.get', 'requests.get', (['"""https://httpbin.org/uuid"""'], {}), "('https://httpbin.org/uuid')\n", (414, 442), False, 'import requests\n'), ((168, 185), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (178, 185), False, 'import json\n')]
#!/usr/bin/python3 """Plot histograms of images. Possible nans and infinities are ignored.""" import argparse from collections import OrderedDict import logging import numpy as np import pylab as pl from scipy import interpolate import dwi.files import dwi.util def parse_args(): """Parse command-line arguments.""" p = argparse.ArgumentParser(description=__doc__) p.add_argument('--verbose', '-v', action='count', help='increase verbosity') p.add_argument('--input', nargs='+', help='input files') p.add_argument('--param', type=int, nargs='*', help='image parameter index to use') p.add_argument('--fig', required=True, help='output figure file') p.add_argument('--smooth', action='store_true', help='smoothen the histogram by spline interpolation') return p.parse_args() def histogram(a, m1=None, m2=None, inclusive=True, bins='doane'): """Create histogram from data between (m1, m2), with bin centers.""" a = np.asarray(a) if m1 is not None: if inclusive: a = a[a >= m1] else: a = a[a > m1] if m2 is not None: if inclusive: a = a[a <= m2] else: a = a[a < m2] mn, mx = a.min(), a.max() hist, bin_edges = np.histogram(a, bins=bins, density=False) bin_centers = [np.mean(t) for t in zip(bin_edges, bin_edges[1:])] return hist, bin_centers, mn, mx def smoothen(x, y): """Smoothen histogram.""" x_smooth = np.linspace(min(x), max(x), 300) y_smooth = interpolate.spline(x, y, x_smooth) y_smooth[y_smooth < 0] = 0 # Don't let it dive negative. return x_smooth, y_smooth def plot_histograms(Histograms, outfile, smooth=False): """Plot subfigures, each having several histograms bundled together.""" nrows = len({x[0] for x in Histograms}) ncols = len({x[1] for x in Histograms}) # logging.warning('## %s ', [nrows, ncols]) fig = pl.figure(figsize=(ncols * 6, nrows * 6)) # pl.yscale('log') for i, ((param, rng), histograms) in enumerate(Histograms.items(), 1): # logging.warning('#### %s ', [i, param, rng, len(histograms)]) if histograms: fig.add_subplot(nrows, ncols, i) minmin, maxmax = None, None for hist, bins, mn, mx in histograms: x, y = bins, hist if smooth: x, y = smoothen(x, y) pl.plot(x, y) # pl.bar(x, y, width=x[1] - x[0]) if minmin is None: minmin = mn if maxmax is None: maxmax = mx minmin = min(minmin, mn) maxmax = max(maxmax, mx) pl.title(f'{param}; {len(histograms)}; {rng}; ' f'[{minmin:.5g}, {maxmax:.5g}]') # pl.tight_layout() logging.info('Plotting to %s...', outfile) pl.savefig(outfile, bbox_inches='tight') pl.close() def add_histograms(hists, path, img, param, ranges, verbose): """Add histograms for a file.""" original_shape, original_size = img.shape, img.size img = img[dwi.util.bbox(img)] img = img[np.isfinite(img)] if np.any(img < 0): # negatives = img[img < 0] logging.warning('Image contains negatives: %s', path) if verbose: print(f'Read {original_shape}, {img.dtype}, ' f'{img.size / original_size:.1%}, {np.mean(img):.4g}, ' f'{dwi.util.fivenums(img)}, {param}, {path}') for rng in ranges: if isinstance(rng, list): incl = True if isinstance(rng, tuple): incl = False m1, m2 = np.percentile(img, rng) key = param, str(rng) hists.setdefault(key, []).append(histogram(img, m1, m2, incl)) # hists[0].append(histogram(img, None, None)) # hists[1].append(histogram(img, 0, 100)) # hists[2].append(histogram(img, 0.1, 99.9)) # hists[3].append(histogram(img, 1, 99)) # hists[4].append(histogram(img, 2, 98)) def main(): """Main.""" args = parse_args() logging.basicConfig(level=logging.INFO) ranges = [[0, 100], (0, 100), [0, 99], (1, 95)] hists = OrderedDict() for path in args.input: img, attrs = dwi.files.read_pmap(path, params=args.param, dtype=np.float32) for i, param in enumerate(attrs['parameters']): add_histograms(hists, path, img[..., i], param, ranges, args.verbose) plot_histograms(hists, args.fig, smooth=args.smooth) if __name__ == '__main__': main()
[ "logging.basicConfig", "numpy.mean", "numpy.histogram", "collections.OrderedDict", "argparse.ArgumentParser", "pylab.plot", "pylab.savefig", "numpy.asarray", "logging.warning", "numpy.any", "pylab.close", "pylab.figure", "numpy.isfinite", "scipy.interpolate.spline", "numpy.percentile", ...
[((334, 378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (357, 378), False, 'import argparse\n'), ((1056, 1069), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (1066, 1069), True, 'import numpy as np\n'), ((1346, 1387), 'numpy.histogram', 'np.histogram', (['a'], {'bins': 'bins', 'density': '(False)'}), '(a, bins=bins, density=False)\n', (1358, 1387), True, 'import numpy as np\n'), ((1610, 1644), 'scipy.interpolate.spline', 'interpolate.spline', (['x', 'y', 'x_smooth'], {}), '(x, y, x_smooth)\n', (1628, 1644), False, 'from scipy import interpolate\n'), ((2017, 2058), 'pylab.figure', 'pl.figure', ([], {'figsize': '(ncols * 6, nrows * 6)'}), '(figsize=(ncols * 6, nrows * 6))\n', (2026, 2058), True, 'import pylab as pl\n'), ((2928, 2970), 'logging.info', 'logging.info', (['"""Plotting to %s..."""', 'outfile'], {}), "('Plotting to %s...', outfile)\n", (2940, 2970), False, 'import logging\n'), ((2975, 3015), 'pylab.savefig', 'pl.savefig', (['outfile'], {'bbox_inches': '"""tight"""'}), "(outfile, bbox_inches='tight')\n", (2985, 3015), True, 'import pylab as pl\n'), ((3020, 3030), 'pylab.close', 'pl.close', ([], {}), '()\n', (3028, 3030), True, 'import pylab as pl\n'), ((3261, 3276), 'numpy.any', 'np.any', (['(img < 0)'], {}), '(img < 0)\n', (3267, 3276), True, 'import numpy as np\n'), ((4151, 4190), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (4170, 4190), False, 'import logging\n'), ((4256, 4269), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4267, 4269), False, 'from collections import OrderedDict\n'), ((1407, 1417), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (1414, 1417), True, 'import numpy as np\n'), ((3236, 3252), 'numpy.isfinite', 'np.isfinite', (['img'], {}), '(img)\n', (3247, 3252), True, 'import numpy as np\n'), ((3321, 3374), 'logging.warning', 'logging.warning', (['"""Image contains negatives: %s"""', 'path'], {}), "('Image contains negatives: %s', path)\n", (3336, 3374), False, 'import logging\n'), ((3733, 3756), 'numpy.percentile', 'np.percentile', (['img', 'rng'], {}), '(img, rng)\n', (3746, 3756), True, 'import numpy as np\n'), ((2506, 2519), 'pylab.plot', 'pl.plot', (['x', 'y'], {}), '(x, y)\n', (2513, 2519), True, 'import pylab as pl\n'), ((3494, 3506), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (3501, 3506), True, 'import numpy as np\n')]
from django.db import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \ PermissionsMixin class UserManager(BaseUserManager): def create_user(self, email, password=None, **extra_fields): """Creates and saves a new user""" if not email: raise ValueError('No valid email provided') user = self.model(email=self.normalize_email(email), **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, password): """Creates and saves a new superuser""" user = self.create_user(email, password) user.is_staff = True user.is_superuser = True user.save(using=self._db) return user class User(AbstractBaseUser, PermissionsMixin): """Custom user model that supports using email instead of username""" email = models.EmailField(max_length=255, unique=True) name = models.CharField(max_length=255) is_active = models.BooleanField(default=True) is_staff = models.BooleanField(default=False) objects = UserManager() USERNAME_FIELD = 'email' class TestUser(models.Model): """Delme asap""" field = models.EmailField(max_length=255, unique=True)
[ "django.db.models.EmailField", "django.db.models.CharField", "django.db.models.BooleanField" ]
[((827, 873), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (844, 873), False, 'from django.db import models\n'), ((889, 921), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (905, 921), False, 'from django.db import models\n'), ((937, 970), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (956, 970), False, 'from django.db import models\n'), ((986, 1020), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1005, 1020), False, 'from django.db import models\n'), ((1143, 1189), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (1160, 1189), False, 'from django.db import models\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Alex # @Date: 2015-11-16 19:15:59 # @Last Modified by: Alex # @Last Modified time: 2015-12-28 22:28:23 from django.db import models from Inventationery.core.models import TimeStampedModel from Inventationery.apps.Customer.models import CustomerModel from Inventationery.apps.Inventory.models import ItemModel, LocationModel from Inventationery.apps.Payments.models import PaymentModel, PaymModeModel from Inventationery.core.models import Countries # Function: Get currency codes from Countries model def Get_CurrencyCodes(): CurrencyCodes = Countries.objects.values('currency_code') Codes_list = () for Currency in CurrencyCodes: Codes_list.append() # Function: Get new sequence number for Sales Order def Get_SalesId(): prefix = 'OV-' try: last = SalesOrderModel.objects.latest('created') except: last = None if last: no = int(filter(unicode.isdigit, last.SalesId)) no = no + 1 return str(prefix + str(no).zfill(5)) else: return str(prefix + str(1).zfill(5)) # Class: Model for Sales Order # --------------------------------------------------------------------------- class SalesOrderModel(TimeStampedModel): # SALES TYPE OPTIONS SALES_ORDER = 'SO' RESTORED_ORDER = 'RO' SALES_TYPE = ( (SALES_ORDER, 'Orden de venta'), (RESTORED_ORDER, 'Orden devuelta'), ) # SALES STATUS OPTIONS OPEN = 'OPE' # BACKORDER = 'BAC' REDUCED = 'RED' INVOICED = 'INV' CASHED = 'CAS' CANCELED = 'CAN' REDUCED_CASHED = 'RCA' SALES_STATUS = ( (OPEN, 'Abierta'), # (BACKORDER, 'Back order'), (REDUCED, 'Reducido'), (INVOICED, 'Facturado'), (CASHED, 'Cobrado'), (CANCELED, 'Cancelado'), (REDUCED_CASHED, 'Reducido/Cobrado'), ) # DOCUMENT STATE OPTIONS OPEN = 'Abierto' PROCESS = 'Proceso' CLOSED = 'Cerrado' DOC_STATE = ( (OPEN, 'Abierto'), (PROCESS, 'En proceso'), (CLOSED, 'Cerrado'), ) # DELIVERY MODE HOME = 'HOM' BRANCH = 'BRA' DLV_MODE = ( (HOME, 'A domicilio'), (BRANCH, 'En sucursal'), ) SalesId = models.CharField( max_length=45, default=Get_SalesId, unique=True) SalesName = models.CharField(max_length=100) SalesType = models.CharField( max_length=50, choices=SALES_TYPE, default=SALES_ORDER) SalesStatus = models.CharField( max_length=100, default=OPEN, choices=SALES_STATUS) # DocumentStatus //Pendiente WorkerSalesPlacer = models.CharField( max_length=100, blank=True, null=True) # DirParty-Name LanguageCode = models.CharField( max_length=5, default='es-mx') # DirParty-LanguageCode DeliveryName = models.CharField( max_length=200, blank=True, null=True) # CustModel-get_PrimaryAddress DeliveryDate = models.DateField(blank=True, null=True) ConfirmedDlv = models.DateField(blank=True, null=True) DlvMode = models.CharField(max_length=20, default=HOME, choices=DLV_MODE) CurrencyCode = models.CharField( default='MXN', max_length=3) # CustModel-CurrencyCode # Catalogo de pagos a 30 dias Payment = models.ForeignKey(PaymentModel, blank=True, null=True) # Catalogo de tipo de pagos PaymMode = models.ForeignKey(PaymModeModel, null=True, blank=True) Remarks = models.TextField( default=None, blank=True, null=True) SubTotal = models.DecimalField( max_digits=20, decimal_places=2, blank=True, null=True) Total = models.DecimalField( max_digits=20, decimal_places=2, blank=True, null=True) Paid = models.DecimalField( max_digits=20, decimal_places=2, blank=True, null=True) Balance = models.DecimalField( max_digits=20, decimal_places=2, blank=True, null=True) Enabled = models.BooleanField(default=True) DocumentState = models.CharField( max_length=20, choices=DOC_STATE, default=CLOSED) Customer = models.ForeignKey(CustomerModel, default=None) Location = models.ForeignKey(LocationModel, blank=True, null=True) def __unicode__(self): return "{0}".format(self.SalesId) # Class: Model for Sales Order # --------------------------------------------------------------------------- class SalesLineModel(TimeStampedModel): # SALES STATUS OPTIONS BACKORDER = 'BAC' REDUCED = 'RED' INVOICED = 'INV' CASHED = 'CAS' CANCELED = 'CAN' SALES_STATUS = ( (BACKORDER, 'Back order'), (REDUCED, 'Reducido'), (INVOICED, 'Facturado'), (CASHED, 'Cobrado'), (CANCELED, 'Cancelado'), ) ItemId = models.ForeignKey(ItemModel, blank=True, null=True) ItemName = models.CharField(max_length=50, blank=True, null=True) SalesQty = models.PositiveIntegerField(blank=True, null=True) SalesUnit = models.CharField(max_length=10, blank=True, null=True) SalesPrice = models.DecimalField( blank=True, null=True, max_digits=10, decimal_places=2) LineDisc = models.DecimalField( max_digits=10, decimal_places=2, blank=True, null=True) LinePercent = models.DecimalField( max_digits=10, decimal_places=2, blank=True, null=True) LineAmount = models.DecimalField( max_digits=20, decimal_places=2, blank=True, null=True) SalesLineStatus = models.CharField(max_length=100, default=BACKORDER, choices=SALES_STATUS, blank=True, null=True) LineNum = models.PositiveSmallIntegerField(blank=True, null=True) SalesOrder = models.ForeignKey( SalesOrderModel, null=True, blank=True)
[ "django.db.models.DateField", "django.db.models.TextField", "django.db.models.ForeignKey", "django.db.models.BooleanField", "django.db.models.DecimalField", "django.db.models.PositiveIntegerField", "Inventationery.core.models.Countries.objects.values", "django.db.models.PositiveSmallIntegerField", "...
[((609, 650), 'Inventationery.core.models.Countries.objects.values', 'Countries.objects.values', (['"""currency_code"""'], {}), "('currency_code')\n", (633, 650), False, 'from Inventationery.core.models import Countries\n'), ((2264, 2329), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(45)', 'default': 'Get_SalesId', 'unique': '(True)'}), '(max_length=45, default=Get_SalesId, unique=True)\n', (2280, 2329), False, 'from django.db import models\n'), ((2355, 2387), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (2371, 2387), False, 'from django.db import models\n'), ((2404, 2476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'choices': 'SALES_TYPE', 'default': 'SALES_ORDER'}), '(max_length=50, choices=SALES_TYPE, default=SALES_ORDER)\n', (2420, 2476), False, 'from django.db import models\n'), ((2504, 2572), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': 'OPEN', 'choices': 'SALES_STATUS'}), '(max_length=100, default=OPEN, choices=SALES_STATUS)\n', (2520, 2572), False, 'from django.db import models\n'), ((2639, 2694), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, blank=True, null=True)\n', (2655, 2694), False, 'from django.db import models\n'), ((2740, 2787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(5)', 'default': '"""es-mx"""'}), "(max_length=5, default='es-mx')\n", (2756, 2787), False, 'from django.db import models\n'), ((2841, 2896), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)', 'null': '(True)'}), '(max_length=200, blank=True, null=True)\n', (2857, 2896), False, 'from django.db import models\n'), ((2957, 2996), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2973, 2996), False, 'from django.db import models\n'), ((3016, 3055), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3032, 3055), False, 'from django.db import models\n'), ((3070, 3133), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': 'HOME', 'choices': 'DLV_MODE'}), '(max_length=20, default=HOME, choices=DLV_MODE)\n', (3086, 3133), False, 'from django.db import models\n'), ((3153, 3198), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""MXN"""', 'max_length': '(3)'}), "(default='MXN', max_length=3)\n", (3169, 3198), False, 'from django.db import models\n'), ((3290, 3344), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PaymentModel'], {'blank': '(True)', 'null': '(True)'}), '(PaymentModel, blank=True, null=True)\n', (3307, 3344), False, 'from django.db import models\n'), ((3392, 3447), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PaymModeModel'], {'null': '(True)', 'blank': '(True)'}), '(PaymModeModel, null=True, blank=True)\n', (3409, 3447), False, 'from django.db import models\n'), ((3462, 3515), 'django.db.models.TextField', 'models.TextField', ([], {'default': 'None', 'blank': '(True)', 'null': '(True)'}), '(default=None, blank=True, null=True)\n', (3478, 3515), False, 'from django.db import models\n'), ((3540, 3615), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(20)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=20, decimal_places=2, blank=True, null=True)\n', (3559, 3615), False, 'from django.db import models\n'), ((3637, 3712), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(20)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=20, decimal_places=2, blank=True, null=True)\n', (3656, 3712), False, 'from django.db import models\n'), ((3733, 3808), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(20)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=20, decimal_places=2, blank=True, null=True)\n', (3752, 3808), False, 'from django.db import models\n'), ((3832, 3907), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(20)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=20, decimal_places=2, blank=True, null=True)\n', (3851, 3907), False, 'from django.db import models\n'), ((3932, 3965), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3951, 3965), False, 'from django.db import models\n'), ((3986, 4052), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'choices': 'DOC_STATE', 'default': 'CLOSED'}), '(max_length=20, choices=DOC_STATE, default=CLOSED)\n', (4002, 4052), False, 'from django.db import models\n'), ((4078, 4124), 'django.db.models.ForeignKey', 'models.ForeignKey', (['CustomerModel'], {'default': 'None'}), '(CustomerModel, default=None)\n', (4095, 4124), False, 'from django.db import models\n'), ((4141, 4196), 'django.db.models.ForeignKey', 'models.ForeignKey', (['LocationModel'], {'blank': '(True)', 'null': '(True)'}), '(LocationModel, blank=True, null=True)\n', (4158, 4196), False, 'from django.db import models\n'), ((4752, 4803), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ItemModel'], {'blank': '(True)', 'null': '(True)'}), '(ItemModel, blank=True, null=True)\n', (4769, 4803), False, 'from django.db import models\n'), ((4819, 4873), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), '(max_length=50, blank=True, null=True)\n', (4835, 4873), False, 'from django.db import models\n'), ((4889, 4939), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4916, 4939), False, 'from django.db import models\n'), ((4956, 5010), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'blank': '(True)', 'null': '(True)'}), '(max_length=10, blank=True, null=True)\n', (4972, 5010), False, 'from django.db import models\n'), ((5028, 5103), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'null': '(True)', 'max_digits': '(10)', 'decimal_places': '(2)'}), '(blank=True, null=True, max_digits=10, decimal_places=2)\n', (5047, 5103), False, 'from django.db import models\n'), ((5128, 5203), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=10, decimal_places=2, blank=True, null=True)\n', (5147, 5203), False, 'from django.db import models\n'), ((5231, 5306), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=10, decimal_places=2, blank=True, null=True)\n', (5250, 5306), False, 'from django.db import models\n'), ((5333, 5408), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(20)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=20, decimal_places=2, blank=True, null=True)\n', (5352, 5408), False, 'from django.db import models\n'), ((5440, 5540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': 'BACKORDER', 'choices': 'SALES_STATUS', 'blank': '(True)', 'null': '(True)'}), '(max_length=100, default=BACKORDER, choices=SALES_STATUS,\n blank=True, null=True)\n', (5456, 5540), False, 'from django.db import models\n'), ((5707, 5762), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5739, 5762), False, 'from django.db import models\n'), ((5780, 5837), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SalesOrderModel'], {'null': '(True)', 'blank': '(True)'}), '(SalesOrderModel, null=True, blank=True)\n', (5797, 5837), False, 'from django.db import models\n')]
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Executes benchmark testing for 3D Unet model.""" # pylint: disable=line-too-long from __future__ import print_function import functools import os import time from typing import Optional from absl import flags import tensorflow as tf # pylint: disable=g-bad-import-order from official.benchmark import benchmark_wrappers from official.benchmark import keras_benchmark from official.benchmark import owner_utils from official.vision.segmentation import unet_main as unet_training_lib from official.vision.segmentation import unet_model as unet_model_lib UNET3D_MIN_ACCURACY = 0.90 UNET3D_MAX_ACCURACY = 0.98 UNET_TRAINING_FILES = 'gs://mlcompass-data/unet3d/train_data/*' UNET_EVAL_FILES = 'gs://mlcompass-data/unet3d/eval_data/*' UNET_MODEL_CONFIG_FILE = 'gs://mlcompass-data/unet3d/config/unet_config.yaml' FLAGS = flags.FLAGS class Unet3DAccuracyBenchmark(keras_benchmark.KerasBenchmark): """Benchmark accuracy tests for UNet3D model in Keras.""" def __init__(self, output_dir: Optional[str] = None, root_data_dir: Optional[str] = None, **kwargs): """A benchmark class. Args: output_dir: directory where to output e.g. log files root_data_dir: directory under which to look for dataset **kwargs: arbitrary named arguments. This is needed to make the constructor forward compatible in case PerfZero provides more named arguments before updating the constructor. """ flag_methods = [unet_training_lib.define_unet3d_flags] # UNet3D model in Keras.""" self.training_file_pattern = UNET_TRAINING_FILES self.eval_file_pattern = UNET_EVAL_FILES # TODO(hongjunchoi): Create and use shared config file instead. self.config_file = UNET_MODEL_CONFIG_FILE super(Unet3DAccuracyBenchmark, self).__init__( output_dir=output_dir, flag_methods=flag_methods) def _set_benchmark_parameters(self, experiment_name): """Overrides training parameters for benchmark tests.""" FLAGS.model_dir = self._get_model_dir(experiment_name) FLAGS.mode = 'train' FLAGS.training_file_pattern = self.training_file_pattern FLAGS.eval_file_pattern = self.eval_file_pattern FLAGS.config_file = self.config_file FLAGS.lr_init_value = 0.00005 FLAGS.lr_decay_rate = 0.5 FLAGS.epochs = 3 @benchmark_wrappers.enable_runtime_flags def _run_and_report_benchmark(self, experiment_name: str, min_accuracy: float = UNET3D_MIN_ACCURACY, max_accuracy: float = UNET3D_MAX_ACCURACY, distribution_strategy: str = 'tpu', epochs: int = 10, steps: int = 0, epochs_between_evals: int = 1, dtype: str = 'float32', enable_xla: bool = False, run_eagerly: bool = False): """Runs and reports the benchmark given the provided configuration.""" params = unet_training_lib.extract_params(FLAGS) strategy = unet_training_lib.create_distribution_strategy(params) input_dtype = params.dtype if input_dtype == 'float16' or input_dtype == 'bfloat16': policy = tf.keras.mixed_precision.experimental.Policy( 'mixed_bfloat16' if input_dtype == 'bfloat16' else 'mixed_float16') tf.keras.mixed_precision.experimental.set_policy(policy) stats = {} start_time_sec = time.time() with strategy.scope(): unet_model = unet_model_lib.build_unet_model(params) history = unet_training_lib.train( params, strategy, unet_model, functools.partial(unet_training_lib.get_train_dataset, params), functools.partial(unet_training_lib.get_eval_dataset, params)) stats['accuracy_top_1'] = history.history['val_metric_accuracy'][-1] stats['training_accuracy_top_1'] = history.history['metric_accuracy'][-1] wall_time_sec = time.time() - start_time_sec super(Unet3DAccuracyBenchmark, self)._report_benchmark( stats, wall_time_sec, top_1_min=min_accuracy, top_1_max=max_accuracy, total_batch_size=params.train_batch_size) def _get_model_dir(self, folder_name): return os.path.join(self.output_dir, folder_name) @owner_utils.Owner('tf-model-garden') def benchmark_4x4_tpu_bf16(self): """Test Keras model with 4x4 TPU, fp16.""" experiment_name = 'benchmark_4x4_tpu_fp16' self._setup() self._set_benchmark_parameters(experiment_name) self._run_and_report_benchmark( experiment_name=experiment_name, dtype='bfloat16', distribution_strategy='tpu') @owner_utils.Owner('tf-graph-compiler') def benchmark_4x4_tpu_bf16_mlir(self): """Test Keras model with 4x4 TPU, fp16 and MLIR enabled.""" experiment_name = 'benchmark_4x4_tpu_fp16_mlir' tf.config.experimental.enable_mlir_bridge() self._setup() self._set_benchmark_parameters(experiment_name) self._run_and_report_benchmark( experiment_name=experiment_name, dtype='bfloat16', distribution_strategy='tpu') if __name__ == '__main__': tf.test.main()
[ "official.benchmark.owner_utils.Owner", "official.vision.segmentation.unet_main.extract_params", "tensorflow.keras.mixed_precision.experimental.Policy", "official.vision.segmentation.unet_model.build_unet_model", "official.vision.segmentation.unet_main.create_distribution_strategy", "os.path.join", "ten...
[((5102, 5138), 'official.benchmark.owner_utils.Owner', 'owner_utils.Owner', (['"""tf-model-garden"""'], {}), "('tf-model-garden')\n", (5119, 5138), False, 'from official.benchmark import owner_utils\n'), ((5483, 5521), 'official.benchmark.owner_utils.Owner', 'owner_utils.Owner', (['"""tf-graph-compiler"""'], {}), "('tf-graph-compiler')\n", (5500, 5521), False, 'from official.benchmark import owner_utils\n'), ((5968, 5982), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (5980, 5982), True, 'import tensorflow as tf\n'), ((3815, 3854), 'official.vision.segmentation.unet_main.extract_params', 'unet_training_lib.extract_params', (['FLAGS'], {}), '(FLAGS)\n', (3847, 3854), True, 'from official.vision.segmentation import unet_main as unet_training_lib\n'), ((3870, 3924), 'official.vision.segmentation.unet_main.create_distribution_strategy', 'unet_training_lib.create_distribution_strategy', (['params'], {}), '(params)\n', (3916, 3924), True, 'from official.vision.segmentation import unet_main as unet_training_lib\n'), ((4258, 4269), 'time.time', 'time.time', ([], {}), '()\n', (4267, 4269), False, 'import time\n'), ((5055, 5097), 'os.path.join', 'os.path.join', (['self.output_dir', 'folder_name'], {}), '(self.output_dir, folder_name)\n', (5067, 5097), False, 'import os\n'), ((5683, 5726), 'tensorflow.config.experimental.enable_mlir_bridge', 'tf.config.experimental.enable_mlir_bridge', ([], {}), '()\n', (5724, 5726), True, 'import tensorflow as tf\n'), ((4034, 4151), 'tensorflow.keras.mixed_precision.experimental.Policy', 'tf.keras.mixed_precision.experimental.Policy', (["('mixed_bfloat16' if input_dtype == 'bfloat16' else 'mixed_float16')"], {}), "('mixed_bfloat16' if \n input_dtype == 'bfloat16' else 'mixed_float16')\n", (4078, 4151), True, 'import tensorflow as tf\n'), ((4164, 4220), 'tensorflow.keras.mixed_precision.experimental.set_policy', 'tf.keras.mixed_precision.experimental.set_policy', (['policy'], {}), '(policy)\n', (4212, 4220), True, 'import tensorflow as tf\n'), ((4316, 4355), 'official.vision.segmentation.unet_model.build_unet_model', 'unet_model_lib.build_unet_model', (['params'], {}), '(params)\n', (4347, 4355), True, 'from official.vision.segmentation import unet_model as unet_model_lib\n'), ((4760, 4771), 'time.time', 'time.time', ([], {}), '()\n', (4769, 4771), False, 'import time\n'), ((4447, 4509), 'functools.partial', 'functools.partial', (['unet_training_lib.get_train_dataset', 'params'], {}), '(unet_training_lib.get_train_dataset, params)\n', (4464, 4509), False, 'import functools\n'), ((4521, 4582), 'functools.partial', 'functools.partial', (['unet_training_lib.get_eval_dataset', 'params'], {}), '(unet_training_lib.get_eval_dataset, params)\n', (4538, 4582), False, 'import functools\n')]
try: import subprocess32 as sp except ModuleNotFoundError: import subprocess as sp import shlex def run_command(cmd): p = sp.run(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE) return p.stdout, p.stderr
[ "shlex.split" ]
[((142, 158), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (153, 158), False, 'import shlex\n')]
from tletools import TLE import matplotlib.pyplot as plt import numpy as np import math from math import * import sys import datetime as dt import tleToKeplerianElements as ttke # uses TLE to return the current distance of the satellite to the focus def distanceToCentralBody(tle, curr_trueA): lan, argp, inc, ecc, n, M, a, E, v = ttke.tleToKepler(tle) eccA = atan2(sqrt(1 - ecc ** 2) * sin(curr_trueA), ecc + cos(curr_trueA)) eccA = eccA % (2 * pi) r = a * (1 - ecc * cos(eccA)) return r # calculates the orbital plane cartesian coordinates for one point in time def getOrbitalCartesianCoords(tle, curr_trueA): r = distanceToCentralBody(tle, curr_trueA) x = r * cos(curr_trueA) y = r * sin(curr_trueA) return x, y
[ "tleToKeplerianElements.tleToKepler" ]
[((337, 358), 'tleToKeplerianElements.tleToKepler', 'ttke.tleToKepler', (['tle'], {}), '(tle)\n', (353, 358), True, 'import tleToKeplerianElements as ttke\n')]
from tf_models.utils import train, save_model def train_and_save(name: str, corpus: str, pos_label: str, root: str = ""): print("Start training {}...".format(name)) mlp_model, _, vec = train(corpus, pos_label, root) save_model(mlp_model, vec, name, root) if __name__ == "__main__": # Train intent model # fmt: off train_and_save( name="intent", corpus="intent_corpus.csv", pos_label="weather", root="intent" ) # fmt: on # Train flow control model train_and_save( name="flow_control", corpus="flow_control_corpus.csv", pos_label="continue", root="flow_control", )
[ "tf_models.utils.train", "tf_models.utils.save_model" ]
[((196, 226), 'tf_models.utils.train', 'train', (['corpus', 'pos_label', 'root'], {}), '(corpus, pos_label, root)\n', (201, 226), False, 'from tf_models.utils import train, save_model\n'), ((231, 269), 'tf_models.utils.save_model', 'save_model', (['mlp_model', 'vec', 'name', 'root'], {}), '(mlp_model, vec, name, root)\n', (241, 269), False, 'from tf_models.utils import train, save_model\n')]
"""empty message Revision ID: f1896d92dddc Revises: <PASSWORD> Create Date: 2020-08-21 22:08:42.863607 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '<PASSWORD>' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('bills', sa.Column('created', sa.Date(), nullable=True)) op.add_column('paychecks', sa.Column('created', sa.Date(), nullable=True)) op.add_column('savings_goals', sa.Column('created', sa.Date(), nullable=True)) op.add_column('total_savings_log', sa.Column('created', sa.Date(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('total_savings_log', 'created') op.drop_column('savings_goals', 'created') op.drop_column('paychecks', 'created') op.drop_column('bills', 'created') # ### end Alembic commands ###
[ "alembic.op.drop_column", "sqlalchemy.Date" ]
[((817, 863), 'alembic.op.drop_column', 'op.drop_column', (['"""total_savings_log"""', '"""created"""'], {}), "('total_savings_log', 'created')\n", (831, 863), False, 'from alembic import op\n'), ((868, 910), 'alembic.op.drop_column', 'op.drop_column', (['"""savings_goals"""', '"""created"""'], {}), "('savings_goals', 'created')\n", (882, 910), False, 'from alembic import op\n'), ((915, 953), 'alembic.op.drop_column', 'op.drop_column', (['"""paychecks"""', '"""created"""'], {}), "('paychecks', 'created')\n", (929, 953), False, 'from alembic import op\n'), ((958, 992), 'alembic.op.drop_column', 'op.drop_column', (['"""bills"""', '"""created"""'], {}), "('bills', 'created')\n", (972, 992), False, 'from alembic import op\n'), ((417, 426), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (424, 426), True, 'import sqlalchemy as sa\n'), ((496, 505), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (503, 505), True, 'import sqlalchemy as sa\n'), ((579, 588), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (586, 588), True, 'import sqlalchemy as sa\n'), ((666, 675), 'sqlalchemy.Date', 'sa.Date', ([], {}), '()\n', (673, 675), True, 'import sqlalchemy as sa\n')]
import subprocess commit_sha_base = 'c855573fd1acadf2dd3b1cdc1e4581cd49c77f05' cmake_command = 'cmake -S . -B cmake_build -DTFM_PLATFORM=arm/mps2/an521 \ -DTFM_TOOLCHAIN_FILE=toolchain_GNUARM.cmake \ -DCMAKE_BUILD_TYPE=Release \ -DTFM_PROFILE=profile_small' build_command = 'cmake --build cmake_build -- install -j32' # p = subprocess.Popen(, shell=True, # stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # for line in p.stdout.readlines(): # if line.find('reference is not a tree') > 0: # print("Python exec shell command error") # exit(2) # retval = p.wait() def sys_run(command): p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in p.stdout.readlines(): print(line) retval = p.wait() return p.stdout sys_run('git checkout ' + commit_sha_base) sys_run('rm -rf cmake_build') sys_run(cmake_command) sys_run(build_command)
[ "subprocess.Popen" ]
[((701, 793), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (717, 793), False, 'import subprocess\n')]
from collections import defaultdict import copy class CooSparseMatrix: def _prepare_coords(self, coords): i, j = tuple(map(int, coords)) if 0 > i or i >= self.Shape[0] or 0 > j or j >= self.Shape[1]: raise TypeError return i, j def __get_copy(self): return copy.deepcopy(self) def __init__(self, ijx_list, shape=None): self.data = defaultdict(dict) for *coords, x in ijx_list: i, j = tuple(map(int, coords)) if (i, j) in self.data: raise TypeError self.data[i][j] = x self.Shape = shape def __getitem__(self, coords): if len(coords) == 2: i, j = self._prepare_coords(coords) return self.data.get(coords[0], dict()).get(coords[1], 0.) i, _ = self._prepare_coords((coords[0], 0)) return CooSparseMatrix([(i, j, v) for j, v in self.data[i].items()], Shape=(1, self.Shape[1])) def __setitem__(self, coords, value): i, j = self._prepare_coords(coords) if value == 0: if i in self.data and j in self.data[i]: del self.data[i][j] else: self.data[i][j] = value def __add__(self, other): if self.Shape != other.Shape: raise TypeError res = self.__get_copy() for i, d in other.data.items(): if i not in res.data: res.data[i] = copy.copy(other.data[i]) continue for j, v in d.items(): cur_val = res.data[i].get(j, 0) + v if cur_val == 0: res.data[i].pop(j, None) else: res.data[i][j] = cur_val if len(res.data[i]) == 0: del res.data[i] return res def __mul__(self, value): if value == 0: return CooSparseMatrix([], Shape=self.Shape) res = self.__get_copy() for i in self.data.keys(): for j in self.data[i].keys(): res.data[i][j] *= value return res def __rmul__(self, value): return self * value def __sub__(self, other): return self + other * -1 def __deepcopy__(self, memo): res = CooSparseMatrix([], shape=self.Shape) res.data = copy.deepcopy(self.data) return res def __setattr__(self, attr, value): if attr == 'shape': if not isinstance(value, tuple): raise TypeError if len(value) != 2 or type(value[0]) is not int or type(value[1]) is not int: raise TypeError if value[0] * value[1] != self.Shape[0] * self.Shape[1]: raise TypeError res = CooSparseMatrix([], value) for i, d in self.data.items(): for j, v in d.items(): pos = i * self.Shape[1] + j res[pos // value[1], pos % value[1]] = v self.Shape = value self.data = res.data elif attr == 'T': raise AttributeError else: self.__dict__[attr] = value def __getattr__(self, attr): if attr == 'shape': return self.Shape elif attr == 'T': res = CooSparseMatrix([], self.Shape[::-1]) for i, d in self.data.items(): for j, v in d.items(): res[j, i] = v return res else: return self.__dict__[attr]
[ "copy.copy", "collections.defaultdict", "copy.deepcopy" ]
[((312, 331), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (325, 331), False, 'import copy\n'), ((399, 416), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (410, 416), False, 'from collections import defaultdict\n'), ((2360, 2384), 'copy.deepcopy', 'copy.deepcopy', (['self.data'], {}), '(self.data)\n', (2373, 2384), False, 'import copy\n'), ((1476, 1500), 'copy.copy', 'copy.copy', (['other.data[i]'], {}), '(other.data[i])\n', (1485, 1500), False, 'import copy\n')]
# coding=utf-8 # """ Copyright (c) 2020, <NAME>. All rights reserved. license: BSD 3-Clause License, see LICENSE for more details. """ from zm.pyutils import struct class AnyStrKey(object): """ Any amount of string keys""" __slots__ = () def __eq__(self, other): if not isinstance(other, AnyStrKey): # don't attempt to compare against unrelated types return NotImplemented # pragma: no cover return True def __hash__(self): # necessary for instances to behave sanely in dicts and sets. return hash(self.__class__) ANYSTR_KEY = AnyStrKey() _PATHS_SCHEME_DICT_VARS = { 'incl' : { 'type': ('str', 'list-of-strs') }, 'excl' : { 'type': ('str', 'list-of-strs') }, 'ignorecase' : { 'type': 'bool' }, 'startdir' : { 'type': 'str' }, } PATHS_SCHEME = { 'type' : ('str', 'list', 'dict'), 'dict' : { 'vars' : _PATHS_SCHEME_DICT_VARS, }, 'list' : { 'vars-type' : ('str', 'dict'), 'dict-vars' : _PATHS_SCHEME_DICT_VARS, }, 'traits' : ['complex-path'], } ConfNode = struct('ConfNode', 'val, traits')
[ "zm.pyutils.struct" ]
[((1114, 1147), 'zm.pyutils.struct', 'struct', (['"""ConfNode"""', '"""val, traits"""'], {}), "('ConfNode', 'val, traits')\n", (1120, 1147), False, 'from zm.pyutils import struct\n')]
from __future__ import absolute_import import logging log = logging.getLogger(__name__) from ..message import Message from . import register @register class ok_1(Message): ''' Define the ``OK`` message (revision 1) for acknowledging successful handling of a previous message. The ``content`` fragment of for this message is empty. ''' msgtype = 'OK' revision = 1 @classmethod def create(cls, request_id, **metadata): ''' Create an ``OK`` message Args: request_id (str) : The message ID for the message the precipitated the OK. Any additional keyword arguments will be put into the message ``metadata`` fragment as-is. ''' header = cls.create_header(request_id=request_id) return cls(header, metadata, {})
[ "logging.getLogger" ]
[((61, 88), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (78, 88), False, 'import logging\n')]
#!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Copies the liblouis braille translation tables to a destination.''' import liblouis_list_tables import optparse import os import shutil def LinkOrCopyFiles(sources, dest_dir): def LinkOrCopyOneFile(src, dst): if os.path.exists(dst): os.unlink(dst) try: os.link(src, dst) except: shutil.copy(src, dst) if not os.path.exists(dest_dir): os.makedirs(dest_dir) for source in sources: LinkOrCopyOneFile(source, os.path.join(dest_dir, os.path.basename(source))) def WriteDepfile(depfile, infiles): stampfile = depfile + '.stamp' with open(stampfile, 'w'): os.utime(stampfile, None) content = '%s: %s' % (stampfile, ' '.join(infiles)) open(depfile, 'w').write(content) def main(): parser = optparse.OptionParser(description=__doc__) parser.add_option('-D', '--directory', dest='directories', action='append', help='Where to search for table files') parser.add_option('-e', '--extra_file', dest='extra_files', action='append', default=[], help='Extra liblouis table file to process') parser.add_option('-d', '--dest_dir', action='store', metavar='DIR', help=('Destination directory. Used when translating ' + 'input paths to output paths and when copying ' 'files.')) parser.add_option('--depfile', metavar='FILENAME', help=('Store .d style dependencies in FILENAME and touch ' 'FILENAME.stamp after copying the files')) options, args = parser.parse_args() if len(args) != 1: parser.error('Expecting exactly one argument') if not options.directories: parser.error('At least one --directory option must be specified') if not options.dest_dir: parser.error('At least one --dest_dir option must be specified') files = liblouis_list_tables.GetTableFiles(args[0], options.directories, options.extra_files) LinkOrCopyFiles(files, options.dest_dir) if options.depfile: WriteDepfile(options.depfile, files) if __name__ == '__main__': main()
[ "os.path.exists", "liblouis_list_tables.GetTableFiles", "os.makedirs", "optparse.OptionParser", "os.utime", "os.unlink", "shutil.copy", "os.path.basename", "os.link" ]
[((937, 979), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (958, 979), False, 'import optparse\n'), ((2052, 2142), 'liblouis_list_tables.GetTableFiles', 'liblouis_list_tables.GetTableFiles', (['args[0]', 'options.directories', 'options.extra_files'], {}), '(args[0], options.directories, options.\n extra_files)\n', (2086, 2142), False, 'import liblouis_list_tables\n'), ((409, 428), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (423, 428), False, 'import os\n'), ((534, 558), 'os.path.exists', 'os.path.exists', (['dest_dir'], {}), '(dest_dir)\n', (548, 558), False, 'import os\n'), ((564, 585), 'os.makedirs', 'os.makedirs', (['dest_dir'], {}), '(dest_dir)\n', (575, 585), False, 'import os\n'), ((795, 820), 'os.utime', 'os.utime', (['stampfile', 'None'], {}), '(stampfile, None)\n', (803, 820), False, 'import os\n'), ((436, 450), 'os.unlink', 'os.unlink', (['dst'], {}), '(dst)\n', (445, 450), False, 'import os\n'), ((466, 483), 'os.link', 'os.link', (['src', 'dst'], {}), '(src, dst)\n', (473, 483), False, 'import os\n'), ((502, 523), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (513, 523), False, 'import shutil\n'), ((664, 688), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (680, 688), False, 'import os\n')]
# -*- coding: utf-8 -*_ # # Copyright (c) 2020, Pureport, Inc. # All Rights Reserved from __future__ import absolute_import from click import ( option, Choice ) from pureport_client.helpers import format_date from pureport_client.commands import ( CommandBase, AccountsMixin ) EVENT_TYPES = ('USER_LOGIN', 'USER_FORGOT_PASSWORD', 'API_LOGIN', 'ACCOUNT_CREATE', 'ACCOUNT_UPDATE', 'ACCOUNT_DELETE', 'ACCOUNT_BILLING_CREATE', 'ACCOUNT_BILLING_UPDATE', 'ACCOUNT_BILLING_DELETE', 'NETWORK_CREATE', 'NETWORK_UPDATE', 'NETWORK_DELETE', 'CONNECTION_CREATE', 'CONNECTION_UPDATE', 'CONNECTION_DELETE', 'GATEWAY_CREATE', 'GATEWAY_UPDATE', 'GATEWAY_DELETE', 'API_KEY_CREATE', 'API_KEY_UPDATE', 'API_KEY_DELETE', 'ROLE_CREATE', 'ROLE_UPDATE', 'ROLE_DELETE', 'USER_CREATE', 'USER_UPDATE', 'USER_DELETE', 'USER_DOMAIN_CREATE', 'USER_DOMAIN_UPDATE', 'USER_DOMAIN_DELETE', 'PORT_CREATE', 'PORT_UPDATE', 'PORT_DELETE', 'MEMBER_INVITE_CREATE', 'MEMBER_INVITE_ACCEPT', 'MEMBER_INVITE_UPDATE', 'MEMBER_INVITE_DELETE', 'ACCOUNT_MEMBER_CREATE', 'ACCOUNT_MEMBER_UPDATE', 'ACCOUNT_MEMBER_DELETE', 'CONNECTION_STATE_CHANGE', 'GATEWAY_STATE_CHANGE', 'GATEWAY_BGP_STATUS_CHANGE', 'GATEWAY_IPSEC_STATUS_CHANGE', 'NOTIFICATION_CREATE', 'NOTIFICATION_UPDATE', 'NOTIFICATION_DELETE', 'TASK_CREATE', 'TASK_UPDATE', 'TASK_DELETE') SUBJECT_TYPES = ('ACCOUNT', 'CONNECTION', 'NETWORK', 'USER', 'USER_DOMAIN', 'ROLE', 'API_KEY', 'GATEWAY', 'NOTIFICATION', 'ACCOUNT_INVITE', 'ACCOUNT_BILLING', 'PORT', 'ACCOUNT_MEMBER', 'TASK') SORT_CHOICES = ('timestamp', 'eventType', 'subjectType', 'ipAddress', 'userAgent', 'source', 'result') class Command(AccountsMixin, CommandBase): """Display Pureport account audit log details """ @option('-pn', '--page_number', type=int, help='The page number for pagination.') @option('-ps', '--page_size', type=int, help='The page size for pagination.') @option('-s', '--sort', type=Choice(SORT_CHOICES), help='How should the data be sorted.') @option('-sd', '--sort_direction', type=Choice(['ASC', 'DESC']), help='The direction the results will be sorted.') @option('-st', '--start_time', help='The start time for selecting results between a time range.') @option('-et', '--end_time', help='The end time for selecting results between a time range.') @option('-i', '--include_child_accounts', is_flag=True, help='If the results should include entries from child accounts.') @option('-ev', '--event_types', type=Choice(EVENT_TYPES), help='Limit the results to particular event types.') @option('-r', '--result', type=Choice(('SUCCESS', 'FAILURE')), help='If the result was successful or not.') @option('-pi', '--principal_id', help='The principal id, e.g. user or api key id.') @option('-ci', '--correlation_id', help='The correlation id, e.g. id of audit event to surface related events.') @option('-si', '--subject_id', help='The subject id, e.g. id of audit subject ' '(connection, network, etc.) to surface related events.') @option('-su', '--subject_type', type=Choice(SUBJECT_TYPES), help='The subject type') @option('-ics', '--include_child_subjects', is_flag=True, help='If the results should include entries from child subjects from the subject id.') def query(self, page_number=None, page_size=None, sort=None, sort_direction=None, start_time=None, end_time=None, include_child_accounts=None, event_types=None, result=None, principal_id=None, ip_address=None, correlation_id=None, subject_id=None, subject_type=None, include_child_subjects=None): """ Query the audit log for this account. \f :param int page_number: :param int page_size: :param str sort: :param str sort_direction: :param str start_time: formatted as 'YYYY-MM-DDT00:00:00.000Z' :param str end_time: formatted as 'YYYY-MM-DDT00:00:00.000Z' :param bool include_child_accounts: :param list[str] event_types: :param str result: :param str principal_id: :param str ip_address: :param str correlation_id: :param str subject_id: :param str subject_type: :param bool include_child_subjects: :rtype: Page[AuditEntry] :raises: .exception.ClientHttpError """ params = { 'pageNumber': page_number, 'pageSize': page_size, 'sort': sort, 'sortDirection': sort_direction, 'startTime': format_date(start_time), 'endTime': format_date(end_time), 'includeChildAccounts': include_child_accounts, 'eventTypes': event_types, 'result': result, 'principalId': principal_id, 'ipAddress': ip_address, 'correlationId': correlation_id, 'subjectId': subject_id, 'subjectType': subject_type, 'includeChildSubjects': include_child_subjects } kwargs = {'query': dict(((k, v) for k, v in params.items() if v))} return self.__call__('get', 'auditLog', **kwargs)
[ "click.option", "click.Choice", "pureport_client.helpers.format_date" ]
[((2079, 2164), 'click.option', 'option', (['"""-pn"""', '"""--page_number"""'], {'type': 'int', 'help': '"""The page number for pagination."""'}), "('-pn', '--page_number', type=int, help='The page number for pagination.'\n )\n", (2085, 2164), False, 'from click import option, Choice\n'), ((2165, 2241), 'click.option', 'option', (['"""-ps"""', '"""--page_size"""'], {'type': 'int', 'help': '"""The page size for pagination."""'}), "('-ps', '--page_size', type=int, help='The page size for pagination.')\n", (2171, 2241), False, 'from click import option, Choice\n'), ((2484, 2585), 'click.option', 'option', (['"""-st"""', '"""--start_time"""'], {'help': '"""The start time for selecting results between a time range."""'}), "('-st', '--start_time', help=\n 'The start time for selecting results between a time range.')\n", (2490, 2585), False, 'from click import option, Choice\n'), ((2598, 2695), 'click.option', 'option', (['"""-et"""', '"""--end_time"""'], {'help': '"""The end time for selecting results between a time range."""'}), "('-et', '--end_time', help=\n 'The end time for selecting results between a time range.')\n", (2604, 2695), False, 'from click import option, Choice\n'), ((2708, 2834), 'click.option', 'option', (['"""-i"""', '"""--include_child_accounts"""'], {'is_flag': '(True)', 'help': '"""If the results should include entries from child accounts."""'}), "('-i', '--include_child_accounts', is_flag=True, help=\n 'If the results should include entries from child accounts.')\n", (2714, 2834), False, 'from click import option, Choice\n'), ((3098, 3185), 'click.option', 'option', (['"""-pi"""', '"""--principal_id"""'], {'help': '"""The principal id, e.g. user or api key id."""'}), "('-pi', '--principal_id', help=\n 'The principal id, e.g. user or api key id.')\n", (3104, 3185), False, 'from click import option, Choice\n'), ((3198, 3314), 'click.option', 'option', (['"""-ci"""', '"""--correlation_id"""'], {'help': '"""The correlation id, e.g. id of audit event to surface related events."""'}), "('-ci', '--correlation_id', help=\n 'The correlation id, e.g. id of audit event to surface related events.')\n", (3204, 3314), False, 'from click import option, Choice\n'), ((3327, 3470), 'click.option', 'option', (['"""-si"""', '"""--subject_id"""'], {'help': '"""The subject id, e.g. id of audit subject (connection, network, etc.) to surface related events."""'}), "('-si', '--subject_id', help=\n 'The subject id, e.g. id of audit subject (connection, network, etc.) to surface related events.'\n )\n", (3333, 3470), False, 'from click import option, Choice\n'), ((3600, 3753), 'click.option', 'option', (['"""-ics"""', '"""--include_child_subjects"""'], {'is_flag': '(True)', 'help': '"""If the results should include entries from child subjects from the subject id."""'}), "('-ics', '--include_child_subjects', is_flag=True, help=\n 'If the results should include entries from child subjects from the subject id.'\n )\n", (3606, 3753), False, 'from click import option, Choice\n'), ((5025, 5048), 'pureport_client.helpers.format_date', 'format_date', (['start_time'], {}), '(start_time)\n', (5036, 5048), False, 'from pureport_client.helpers import format_date\n'), ((5073, 5094), 'pureport_client.helpers.format_date', 'format_date', (['end_time'], {}), '(end_time)\n', (5084, 5094), False, 'from pureport_client.helpers import format_date\n'), ((2275, 2295), 'click.Choice', 'Choice', (['SORT_CHOICES'], {}), '(SORT_CHOICES)\n', (2281, 2295), False, 'from click import option, Choice\n'), ((2392, 2415), 'click.Choice', 'Choice', (["['ASC', 'DESC']"], {}), "(['ASC', 'DESC'])\n", (2398, 2415), False, 'from click import option, Choice\n'), ((2883, 2902), 'click.Choice', 'Choice', (['EVENT_TYPES'], {}), '(EVENT_TYPES)\n', (2889, 2902), False, 'from click import option, Choice\n'), ((3004, 3034), 'click.Choice', 'Choice', (["('SUCCESS', 'FAILURE')"], {}), "(('SUCCESS', 'FAILURE'))\n", (3010, 3034), False, 'from click import option, Choice\n'), ((3535, 3556), 'click.Choice', 'Choice', (['SUBJECT_TYPES'], {}), '(SUBJECT_TYPES)\n', (3541, 3556), False, 'from click import option, Choice\n')]
import os from tqdm import tqdm import numpy as np import pandas as pd import os import pdb import cv2 import time import json import torch import random import scipy import logging import traceback import numpy as np from datetime import datetime # from config import HOME from tensorboard_logger import log_value, log_images from matplotlib import pyplot as plt plt.switch_backend("agg") def logger_init(save_folder): mkdir(save_folder) logging.basicConfig( filename=os.path.join(save_folder, "log.txt"), filemode="a", level=logging.DEBUG, format="%(asctime)s %(message)s", datefmt="%H:%M:%S", ) console = logging.StreamHandler() logger = logging.getLogger(__name__) logger.addHandler(console) return logger def plot_ROC(roc, targets, predictions, phase, epoch, folder): roc_plot_folder = os.path.join(folder, "ROC_plots") mkdir(os.path.join(roc_plot_folder)) fpr, tpr, thresholds = roc_curve(targets, predictions) roc_plot_name = "ROC_%s_%s_%0.4f" % (phase, epoch, roc) roc_plot_path = os.path.join(roc_plot_folder, roc_plot_name + ".jpg") fig = plt.figure(figsize=(10, 5)) plt.plot([0, 1], [0, 1], linestyle="--") plt.plot(fpr, tpr, marker=".") plt.legend(["diagonal-line", roc_plot_name]) fig.savefig(roc_plot_path, bbox_inches="tight", pad_inches=0) plt.close(fig) # see footnote [1] plot = cv2.imread(roc_plot_path) log_images(roc_plot_name, [plot], epoch) def print_time(log, start, string): diff = time.time() - start log(string + ": %02d:%02d" % (diff // 60, diff % 60)) def iter_log(log, phase, epoch, iteration, epoch_size, loss, start): diff = time.time() - start log( "%s epoch: %d (%d/%d) loss: %.4f || %02d:%02d", phase, epoch, iteration, epoch_size, loss.item(), diff // 60, diff % 60, ) def mkdir(folder): if not os.path.exists(folder): os.mkdir(folder) def save_hyperparameters(trainer, remark): hp_file = os.path.join(trainer.save_folder, "parameters.txt") time_now = datetime.now() augmentations = trainer.dataloaders["train"].dataset.transforms.transforms # pdb.set_trace() string_to_write = ( f"Time: {time_now}\n" + f"model_name: {trainer.model_name}\n" + f"train_df_name: {trainer.train_df_name}\n" #+ f"images_folder: {trainer.images_folder}\n" + f"resume: {trainer.resume}\n" + f"pretrained: {trainer.pretrained}\n" + f"pretrained_path: {trainer.pretrained_path}\n" + f"folder: {trainer.folder}\n" + f"fold: {trainer.fold}\n" + f"total_folds: {trainer.total_folds}\n" + f"num_samples: {trainer.num_samples}\n" + f"sampling class weights: {trainer.class_weights}\n" + f"size: {trainer.size}\n" + f"top_lr: {trainer.top_lr}\n" + f"base_lr: {trainer.base_lr}\n" + f"num_workers: {trainer.num_workers}\n" + f"batchsize: {trainer.batch_size}\n" + f"momentum: {trainer.momentum}\n" + f"mean: {trainer.mean}\n" + f"std: {trainer.std}\n" + f"start_epoch: {trainer.start_epoch}\n" + f"augmentations: {augmentations}\n" + f"criterion: {trainer.criterion}\n" + f"optimizer: {trainer.optimizer}\n" + f"remark: {remark}\n" ) with open(hp_file, "a") as f: f.write(string_to_write) print(string_to_write) def seed_pytorch(seed=69): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True
[ "logging.getLogger", "os.path.exists", "logging.StreamHandler", "matplotlib.pyplot.plot", "os.path.join", "tensorboard_logger.log_images", "random.seed", "time.time", "matplotlib.pyplot.close", "datetime.datetime.now", "matplotlib.pyplot.figure", "numpy.random.seed", "os.mkdir", "matplotli...
[((366, 391), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (384, 391), True, 'from matplotlib import pyplot as plt\n'), ((668, 691), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (689, 691), False, 'import logging\n'), ((705, 732), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (722, 732), False, 'import logging\n'), ((870, 903), 'os.path.join', 'os.path.join', (['folder', '"""ROC_plots"""'], {}), "(folder, 'ROC_plots')\n", (882, 903), False, 'import os\n'), ((1084, 1137), 'os.path.join', 'os.path.join', (['roc_plot_folder', "(roc_plot_name + '.jpg')"], {}), "(roc_plot_folder, roc_plot_name + '.jpg')\n", (1096, 1137), False, 'import os\n'), ((1148, 1175), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1158, 1175), True, 'from matplotlib import pyplot as plt\n'), ((1180, 1220), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""'}), "([0, 1], [0, 1], linestyle='--')\n", (1188, 1220), True, 'from matplotlib import pyplot as plt\n'), ((1225, 1255), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'marker': '"""."""'}), "(fpr, tpr, marker='.')\n", (1233, 1255), True, 'from matplotlib import pyplot as plt\n'), ((1260, 1304), 'matplotlib.pyplot.legend', 'plt.legend', (["['diagonal-line', roc_plot_name]"], {}), "(['diagonal-line', roc_plot_name])\n", (1270, 1304), True, 'from matplotlib import pyplot as plt\n'), ((1375, 1389), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1384, 1389), True, 'from matplotlib import pyplot as plt\n'), ((1422, 1447), 'cv2.imread', 'cv2.imread', (['roc_plot_path'], {}), '(roc_plot_path)\n', (1432, 1447), False, 'import cv2\n'), ((1452, 1492), 'tensorboard_logger.log_images', 'log_images', (['roc_plot_name', '[plot]', 'epoch'], {}), '(roc_plot_name, [plot], epoch)\n', (1462, 1492), False, 'from tensorboard_logger import log_value, log_images\n'), ((2063, 2114), 'os.path.join', 'os.path.join', (['trainer.save_folder', '"""parameters.txt"""'], {}), "(trainer.save_folder, 'parameters.txt')\n", (2075, 2114), False, 'import os\n'), ((2130, 2144), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2142, 2144), False, 'from datetime import datetime\n'), ((3525, 3542), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3536, 3542), False, 'import random\n'), ((3592, 3612), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3606, 3612), True, 'import numpy as np\n'), ((3617, 3645), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (3639, 3645), False, 'import torch\n'), ((914, 943), 'os.path.join', 'os.path.join', (['roc_plot_folder'], {}), '(roc_plot_folder)\n', (926, 943), False, 'import os\n'), ((1542, 1553), 'time.time', 'time.time', ([], {}), '()\n', (1551, 1553), False, 'import time\n'), ((1703, 1714), 'time.time', 'time.time', ([], {}), '()\n', (1712, 1714), False, 'import time\n'), ((1955, 1977), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1969, 1977), False, 'import os\n'), ((1987, 2003), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (1995, 2003), False, 'import os\n'), ((489, 525), 'os.path.join', 'os.path.join', (['save_folder', '"""log.txt"""'], {}), "(save_folder, 'log.txt')\n", (501, 525), False, 'import os\n')]
#!/usr/bin/env python import logging import sys import imp import os from flask import Flask, request, abort, g app = Flask(__name__) @app.route('/healthz', methods=['GET']) def healthz(): return "", 200, { 'Content-Type': 'text/plain' } def configure_logging(logLevel): global app root = logging.getLogger() root.setLevel(logLevel) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logLevel) ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) app.logger.addHandler(ch) configure_logging(logging.DEBUG) app.logger.info("Listening port 8080 ...") app.run(host='0.0.0.0', port=8080)
[ "logging.getLogger", "logging.Formatter", "logging.StreamHandler", "flask.Flask" ]
[((121, 136), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'from flask import Flask, request, abort, g\n'), ((306, 325), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (323, 325), False, 'import logging\n'), ((363, 396), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (384, 396), False, 'import logging\n'), ((443, 505), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (460, 505), False, 'import logging\n')]
#!/usr/bin/env python3 """Design probes for genome capture. This is the main executable of CATCH for probe design. """ import argparse import importlib import logging import os import random from catch import coverage_analysis from catch import probe from catch.filter import adapter_filter from catch.filter import duplicate_filter from catch.filter import fasta_filter from catch.filter import n_expansion_filter from catch.filter import near_duplicate_filter from catch.filter import polya_filter from catch.filter import probe_designer from catch.filter import reverse_complement_filter from catch.filter import set_cover_filter from catch.utils import cluster from catch.utils import ncbi_neighbors from catch.utils import seq_io, version, log __author__ = '<NAME> <<EMAIL>>' def main(args): logger = logging.getLogger(__name__) # Set NCBI API key if args.ncbi_api_key: ncbi_neighbors.ncbi_api_key = args.ncbi_api_key # Read the genomes from FASTA sequences genomes_grouped = [] genomes_grouped_names = [] for ds in args.dataset: if ds.startswith('collection:'): # Process a collection of datasets collection_name = ds[len('collection:'):] try: collection = importlib.import_module( 'catch.datasets.collections.' + collection_name) except ImportError: raise ValueError("Unknown dataset collection %s" % collection_name) for name, dataset in collection.import_all(): genomes_grouped += [seq_io.read_dataset_genomes(dataset)] genomes_grouped_names += [name] elif ds.startswith('download:'): # Download a FASTA for an NCBI taxonomic ID taxid = ds[len('download:'):] if args.write_taxid_acc: taxid_fn = os.path.join(args.write_taxid_acc, str(taxid) + '.txt') else: taxid_fn = None if '-' in taxid: taxid, segment = taxid.split('-') else: segment = None ds_fasta_tf = ncbi_neighbors.construct_fasta_for_taxid(taxid, segment=segment, write_to=taxid_fn) genomes_grouped += [seq_io.read_genomes_from_fasta(ds_fasta_tf.name)] genomes_grouped_names += ['taxid:' + str(taxid)] ds_fasta_tf.close() elif os.path.isfile(ds): # Process a custom fasta file with sequences genomes_grouped += [seq_io.read_genomes_from_fasta(ds)] genomes_grouped_names += [os.path.basename(ds)] else: # Process an individual dataset try: dataset = importlib.import_module( 'catch.datasets.' + ds) except ImportError: raise ValueError("Unknown file or dataset '%s'" % ds) genomes_grouped += [seq_io.read_dataset_genomes(dataset)] genomes_grouped_names += [ds] if (args.limit_target_genomes and args.limit_target_genomes_randomly_with_replacement): raise Exception(("Cannot --limit-target-genomes and " "--limit-target-genomes-randomly-with-replacement at " "the same time")) elif args.limit_target_genomes: genomes_grouped = [genomes[:args.limit_target_genomes] for genomes in genomes_grouped] elif args.limit_target_genomes_randomly_with_replacement: k = args.limit_target_genomes_randomly_with_replacement genomes_grouped = [random.choices(genomes, k=k) for genomes in genomes_grouped] # Store the FASTA paths of blacklisted genomes blacklisted_genomes_fasta = [] if args.blacklist_genomes: for bg in args.blacklist_genomes: if os.path.isfile(bg): # Process a custom fasta file with sequences blacklisted_genomes_fasta += [bg] else: # Process an individual dataset try: dataset = importlib.import_module( 'catch.datasets.' + bg) except ImportError: raise ValueError("Unknown file or dataset '%s'" % bg) for fp in dataset.fasta_paths: blacklisted_genomes_fasta += [fp] # Setup and verify parameters related to probe length if not args.lcf_thres: args.lcf_thres = args.probe_length if args.probe_stride > args.probe_length: logger.warning(("PROBE_STRIDE (%d) is greater than PROBE_LENGTH " "(%d), which is usually undesirable and may lead " "to undefined behavior"), args.probe_stride, args.probe_length) if args.lcf_thres > args.probe_length: logger.warning(("LCF_THRES (%d) is greater than PROBE_LENGTH " "(%d), which is usually undesirable and may lead " "to undefined behavior"), args.lcf_thres, args.probe_length) if args.island_of_exact_match > args.probe_length: logger.warning(("ISLAND_OF_EXACT_MATCH (%d) is greater than " "PROBE_LENGTH (%d), which is usually undesirable " "and may lead to undefined behavior"), args.island_of_exact_match, args.probe_length) # Setup and verify parameters related to k-mer length in probe map if args.kmer_probe_map_k: # Check that k is sufficiently small if args.kmer_probe_map_k > args.probe_length: raise Exception(("KMER_PROBE_MAP_K (%d) exceeds PROBE_LENGTH " "(%d), which is not permitted") % (args.kmer_probe_map_k, args.probe_length)) # Use this value for the SetCoverFilter, AdapterFilter, and # the Analyzer kmer_probe_map_k_scf = args.kmer_probe_map_k kmer_probe_map_k_af = args.kmer_probe_map_k kmer_probe_map_k_analyzer = args.kmer_probe_map_k else: if args.probe_length <= 20: logger.warning(("PROBE_LENGTH (%d) is small; you may want to " "consider setting --kmer-probe-map-k to be " "small as well in order to be more sensitive " "in mapping candidate probes to target sequence"), args.probe_length) # Use a default k of 20 for the SetCoverFilter and AdapterFilter, # and 10 for the Analyzer since we would like to be more sensitive # (potentially at the cost of slower runtime) for the latter kmer_probe_map_k_scf = 20 kmer_probe_map_k_af = 20 kmer_probe_map_k_analyzer = 10 # Set the maximum number of processes in multiprocessing pools if args.max_num_processes: probe.set_max_num_processes_for_probe_finding_pools( args.max_num_processes) cluster.set_max_num_processes_for_creating_distance_matrix( args.max_num_processes) # Raise exceptions or warn based on use of adapter arguments if args.add_adapters: if not (args.adapter_a or args.adapter_b): logger.warning(("Adapter sequences will be added, but default " "sequences will be used; to provide adapter " "sequences, use --adapter-a and --adapter-b")) else: if args.adapter_a or args.adapter_b: raise Exception(("Adapter sequences were provided with " "--adapter-a and --adapter-b, but --add-adapters is required " "to add adapter sequences onto the ends of probes")) # Do not allow both --small-seq-skip and --small-seq-min, since they # have different intentions if args.small_seq_skip is not None and args.small_seq_min is not None: raise Exception(("Both --small-seq-skip and --small-seq-min were " "specified, but both cannot be used together")) # Check arguments involving clustering if args.cluster_and_design_separately and args.identify: raise Exception(("Cannot use --cluster-and-design-separately with " "--identify, because clustering collapses genome groupings into " "one")) if args.cluster_from_fragments and not args.cluster_and_design_separately: raise Exception(("Cannot use --cluster-from-fragments without also " "setting --cluster-and-design-separately")) # Check for whether a custom hybridization function was provided if args.custom_hybridization_fn: custom_cover_range_fn = tuple(args.custom_hybridization_fn) else: custom_cover_range_fn = None if args.custom_hybridization_fn_tolerant: custom_cover_range_tolerant_fn = tuple(args.custom_hybridization_fn_tolerant) else: custom_cover_range_tolerant_fn = None # Setup the filters # The filters we use are, in order: filters = [] # [Optional] # Fasta filter (ff) -- leave out candidate probes if args.filter_from_fasta: ff = fasta_filter.FastaFilter(args.filter_from_fasta, skip_reverse_complements=True) filters += [ff] # [Optional] # Poly(A) filter (paf) -- leave out probes with stretches of 'A' or 'T' if args.filter_polya: polya_length, polya_mismatches = args.filter_polya if polya_length > args.probe_length: logger.warning(("Length of poly(A) stretch to filter (%d) is " "greater than PROBE_LENGTH (%d), which is usually " "undesirable"), polya_length, args.probe_length) if polya_length < 10: logger.warning(("Length of poly(A) stretch to filter (%d) is " "short, and may lead to many probes being " "filtered"), polya_length) if polya_mismatches > 10: logger.warning(("Number of mismatches to tolerate when searching " "for poly(A) stretches (%d) is high, and may " "lead to many probes being filtered"), polya_mismatches) paf = polya_filter.PolyAFilter(polya_length, polya_mismatches) filters += [paf] # Duplicate filter (df) -- condense all candidate probes that # are identical down to one; this is not necessary for # correctness, as the set cover filter achieves the same task # implicitly, but it does significantly lower runtime by # decreasing the input size to the set cover filter # Near duplicate filter (ndf) -- condense candidate probes that # are near-duplicates down to one using locality-sensitive # hashing; like the duplicate filter, this is not necessary # but can significantly lower runtime and reduce memory usage # (even more than the duplicate filter) if (args.filter_with_lsh_hamming is not None and args.filter_with_lsh_minhash is not None): raise Exception(("Cannot use both --filter-with-lsh-hamming " "and --filter-with-lsh-minhash")) if args.filter_with_lsh_hamming is not None: if args.filter_with_lsh_hamming > args.mismatches: logger.warning(("Setting FILTER_WITH_LSH_HAMMING (%d) to be greater " "than MISMATCHES (%d) may cause the probes to achieve less " "than the desired coverage"), args.filter_with_lsh_hamming, args.mismatches) ndf = near_duplicate_filter.NearDuplicateFilterWithHammingDistance( args.filter_with_lsh_hamming, args.probe_length) filters += [ndf] elif args.filter_with_lsh_minhash is not None: ndf = near_duplicate_filter.NearDuplicateFilterWithMinHash( args.filter_with_lsh_minhash) filters += [ndf] else: df = duplicate_filter.DuplicateFilter() filters += [df] # Set cover filter (scf) -- solve the problem by treating it as # an instance of the set cover problem scf = set_cover_filter.SetCoverFilter( mismatches=args.mismatches, lcf_thres=args.lcf_thres, island_of_exact_match=args.island_of_exact_match, mismatches_tolerant=args.mismatches_tolerant, lcf_thres_tolerant=args.lcf_thres_tolerant, island_of_exact_match_tolerant=args.island_of_exact_match_tolerant, custom_cover_range_fn=custom_cover_range_fn, custom_cover_range_tolerant_fn=custom_cover_range_tolerant_fn, identify=args.identify, blacklisted_genomes=blacklisted_genomes_fasta, coverage=args.coverage, cover_extension=args.cover_extension, cover_groupings_separately=args.cover_groupings_separately, kmer_probe_map_k=kmer_probe_map_k_scf, kmer_probe_map_use_native_dict=args.use_native_dict_when_finding_tolerant_coverage) filters += [scf] # [Optional] # Adapter filter (af) -- add adapters to both the 5' and 3' ends # of each probe if args.add_adapters: # Set default adapter sequences, if not provided if args.adapter_a: adapter_a = tuple(args.adapter_a) else: adapter_a = ('ATACGCCATGCTGGGTCTCC', 'CGTACTTGGGAGTCGGCCAT') if args.adapter_b: adapter_b = tuple(args.adapter_b) else: adapter_b = ('AGGCCCTGGCTGCTGATATG', 'GACCTTTTGGGACAGCGGTG') af = adapter_filter.AdapterFilter(adapter_a, adapter_b, mismatches=args.mismatches, lcf_thres=args.lcf_thres, island_of_exact_match=\ args.island_of_exact_match, custom_cover_range_fn=\ custom_cover_range_fn, kmer_probe_map_k=kmer_probe_map_k_af) filters += [af] # [Optional] # N expansion filter (nef) -- expand Ns in probe sequences # to avoid ambiguity if args.expand_n is not None: nef = n_expansion_filter.NExpansionFilter( limit_n_expansion_randomly=args.expand_n) filters += [nef] # [Optional] # Reverse complement (rc) -- add the reverse complement of each # probe that remains if args.add_reverse_complements: rc = reverse_complement_filter.ReverseComplementFilter() filters += [rc] # If requested, don't apply the set cover filter if args.skip_set_cover: filter_before_scf = filters[filters.index(scf) - 1] filters.remove(scf) # Define parameters for clustering sequences if args.cluster_and_design_separately: cluster_threshold = args.cluster_and_design_separately if args.skip_set_cover: cluster_merge_after = filter_before_scf else: cluster_merge_after = scf cluster_fragment_length = args.cluster_from_fragments else: cluster_threshold = None cluster_merge_after = None cluster_fragment_length = None # Design the probes pb = probe_designer.ProbeDesigner(genomes_grouped, filters, probe_length=args.probe_length, probe_stride=args.probe_stride, allow_small_seqs=args.small_seq_min, seq_length_to_skip=args.small_seq_skip, cluster_threshold=cluster_threshold, cluster_merge_after=cluster_merge_after, cluster_fragment_length=cluster_fragment_length) pb.design() # Write the final probes to the file args.output_probes seq_io.write_probe_fasta(pb.final_probes, args.output_probes) if (args.print_analysis or args.write_analysis_to_tsv or args.write_sliding_window_coverage or args.write_probe_map_counts_to_tsv): analyzer = coverage_analysis.Analyzer( pb.final_probes, args.mismatches, args.lcf_thres, genomes_grouped, genomes_grouped_names, island_of_exact_match=args.island_of_exact_match, custom_cover_range_fn=custom_cover_range_fn, cover_extension=args.cover_extension, kmer_probe_map_k=kmer_probe_map_k_analyzer, rc_too=args.add_reverse_complements) analyzer.run() if args.write_analysis_to_tsv: analyzer.write_data_matrix_as_tsv( args.write_analysis_to_tsv) if args.write_sliding_window_coverage: analyzer.write_sliding_window_coverage( args.write_sliding_window_coverage) if args.write_probe_map_counts_to_tsv: analyzer.write_probe_map_counts( args.write_probe_map_counts_to_tsv) if args.print_analysis: analyzer.print_analysis() else: # Just print the number of probes print(len(pb.final_probes)) if __name__ == "__main__": parser = argparse.ArgumentParser() # Input data parser.add_argument('dataset', nargs='+', help=("One or more target datasets (e.g., one per species). Each " "dataset can be specified in one of multiple ways. (a) If " "dataset is in the format 'download:TAXID', then CATCH downloads " "from NCBI all whole genomes for the NCBI taxonomy with id " "TAXID, and uses these sequences as input. (b) If dataset is " "a path to a FASTA file, then its sequences are read and used " "as input. (c) Otherwise, it is assumed that this is a label " "for a dataset included in this package (e.g., 'zika'). If " "the label starts with 'collection:' (e.g., 'collection:viruses" "_with_human_host'), then this reads from an available " "collection of datasets. For segmented viruses, the format " "for NCBI downloads can also be 'download:TAXID-SEGMENT'.")) # Outputting probes parser.add_argument('-o', '--output-probes', required=True, help=("The file to which all final probes should be " "written; they are written in FASTA format")) # Outputting downloaed data parser.add_argument('--write-taxid-acc', help=("If 'download:' labels are used in datasets, write downloaded " "accessions to a file in this directory. Accessions are written " "to WRITE_TAXID_ACC/TAXID.txt")) # Parameters on probe length and stride parser.add_argument('-pl', '--probe-length', type=int, default=100, help=("(Optional) Make probes be PROBE_LENGTH nt long")) parser.add_argument('-ps', '--probe-stride', type=int, default=50, help=("(Optional) Generate candidate probes from the input " "that are separated by PROBE_STRIDE nt")) # Parameters governing probe hybridization parser.add_argument('-m', '--mismatches', type=int, default=0, help=("(Optional) Allow for MISMATCHES mismatches when determining " "whether a probe covers a sequence")) parser.add_argument('-l', '--lcf-thres', type=int, help=("(Optional) Say that a portion of a probe covers a portion " "of a sequence if the two share a substring with at most " "MISMATCHES mismatches that has length >= LCF_THRES " "nt; if unspecified, this is set to PROBE_LENGTH")) parser.add_argument('--island-of-exact-match', type=int, default=0, help=("(Optional) When determining whether a probe covers a " "sequence, require that there be an exact match (i.e., " "no mismatches) of length at least ISLAND_OF_EXACT_" "MATCH nt between a portion of the probe and a portion " "of the sequence")) # Custom function (dynamically loaded) to determine probe hybridization # When set, this makes values of the above arguments (--mismatches, # --lcf-thres, and --island-of-exact-match) meaningless parser.add_argument('--custom-hybridization-fn', nargs=2, help=("(Optional) Args: <PATH> <FUNC>; PATH is a path to a Python " "module (.py file) and FUNC is a string giving the name of " "a function in that module. FUNC provides a custom model of " "hybridization between a probe and target sequence to use in " "the probe set design. If this is set, the arguments " "--mismatches, --lcf-thres, and --island-of-exact-match are " "not used because these are meant for the default model of " "hybridization. The function FUNC in PATH is dynamically " "loaded to use when determining whether a probe hybridizes to " "a target sequence (and, if so, what portion). FUNC must " "accept the following arguments in order, though it " "may choose to ignore some values: (1) array giving sequence " "of a probe; (2) str giving subsequence of target sequence to " "which the probe may hybridize, of the same length as the " "given probe sequence; (3) int giving the position in the " "probe (equivalently, the target subsequence) of the start " "of a k-mer around which the probe and target subsequence " "are anchored (the probe and target subsequence are aligned " "using this k-mer as an anchor); (4) int giving the end " "position (exclusive) of the anchor k-mer; (5) int giving the " "full length of the probe (the probe provided in (1) may be " "cutoff on an end if it extends further than where the " "target sequence ends); (6) int giving the full length of the " "target sequence of which the subsequence in (2) is part. " "FUNC must return None if it deems that the probe does not " "hybridize to the target subsequence; otherwise, it must " "return a tuple (start, end) where start is an int giving " "the start position in the probe (equivalently, in the " "target subsequence) at which the probe will hybridize to " "the target subsequence, and end is an int (exclusive) giving " "the end position of the hybridization.")) # Desired coverage of target genomes def check_coverage(val): fval = float(val) ival = int(fval) if fval >= 0 and fval <= 1: # a float in [0,1] giving fractional coverage return fval elif fval > 1 and fval == ival: # an int > 1 giving number of bp to cover return ival else: raise argparse.ArgumentTypeError(("%s is an invalid coverage " "value") % val) parser.add_argument('-c', '--coverage', type=check_coverage, default=1.0, help=("If this is a float in [0,1], it gives the fraction of " "each target genome that must be covered by the selected " "probes; if this is an int > 1, it gives the number of " "bp of each target genome that must be covered by the " "selected probes")) # Amount of cover extension to assume parser.add_argument('-e', '--cover-extension', type=int, default=0, help=("Extend the coverage of each side of a probe by COVER_EXTENSION " "nt. That is, a probe covers a region that consists of the " "portion of a sequence it hybridizes to, as well as this " "number of nt on each side of that portion. This is useful " "in modeling hybrid selection, where a probe hybridizes to" "a fragment that includes the region targeted by the probe, " "along with surrounding portions of the sequence. Increasing " "its value should reduce the number of probes required to " "achieve the desired coverage.")) # Differential identification and blacklisting parser.add_argument('-i', '--identify', dest="identify", action="store_true", help=("Design probes meant to make it possible to identify " "nucleic acid from a particular input dataset against " "the other datasets; when set, the coverage should " "generally be small")) parser.add_argument('--blacklist-genomes', nargs='+', help=("One or more blacklisted genomes; penalize probes based " "on how much of each of these genomes they cover. If " "the value is a path to a file, then that file is treated " "as a FASTA file and its sequences are read. Otherwise, " "it is assumed that this is a label for a dataset included " "in this package (e.g., 'zika').")) parser.add_argument('-mt', '--mismatches-tolerant', type=int, help=("(Optional) A more tolerant value for 'mismatches'; " "this should be greater than the value of MISMATCHES. " "Allows for capturing more possible hybridizations " "(i.e., more sensitivity) when designing probes for " "identification or when genomes are blacklisted.")) parser.add_argument('-lt', '--lcf-thres-tolerant', type=int, help=("(Optional) A more tolerant value for 'lcf_thres'; " "this should be less than LCF_THRES. " "Allows for capturing more possible hybridizations " "(i.e., more sensitivity) when designing probes for " "identification or when genomes are blacklisted.")) parser.add_argument('--island-of-exact-match-tolerant', type=int, default=0, help=("(Optional) A more tolerant value for 'island_of_" "exact_match'; this should be less than ISLAND_OF_ " "EXACT_MATCH. Allows for capturing more " "possible hybridizations (i.e., more sensitivity) " "when designing probes for identification or when " "genomes are blacklisted.")) parser.add_argument('--custom-hybridization-fn-tolerant', nargs=2, help=("(Optional) A more tolerant model than the one " "implemented in custom_hybridization_fn. This should capture " "more possible hybridizations (i.e., be more sensitive) " "when designing probes for identification or when genomes " "are blacklisted. See --custom-hybridization-fn for details " "of how this function should be implemented and provided.")) # Outputting coverage analyses parser.add_argument('--print-analysis', dest="print_analysis", action="store_true", help="Print analysis of the probe set's coverage") parser.add_argument('--write-analysis-to-tsv', help=("(Optional) The file to which to write a TSV-formatted matrix " "of the probe set's coverage analysis")) parser.add_argument('--write-sliding-window-coverage', help=("(Optional) The file to which to write the average coverage " "achieved by the probe set within sliding windows of each " "target genome")) parser.add_argument('--write-probe-map-counts-to-tsv', help=("(Optional) The file to which to write a TSV-formatted list of " "the number of sequences each probe maps to. This explicitly " "does not count reverse complements.")) # Accepting probes as input and skipping set cover process parser.add_argument('--filter-from-fasta', help=("(Optional) A FASTA file from which to select candidate probes. " "Before running any other filters, keep only the candidate " "probes that are equal to sequences in the file and remove " "all probes not equal to any of these sequences. This, by " "default, ignores sequences in the file whose header contains " "the string 'reverse complement'; that is, if there is some " "probe with sequence S, it may be filtered out (even if there " "is a sequence S in the file) if the header of S in the file " "contains 'reverse complement'. This is useful if we already " "have probes decided by the set cover filter, but simply " "want to process them further by, e.g., adding adapters or " "running a coverage analysis. For example, if we have already " "run the time-consuming set cover filter and have a FASTA " "containing those probes, we can provide a path to that " "FASTA file for this argument, and also provide the " "--skip-set-cover argument, in order to add adapters to " "those probes without having to re-run the set cover filter.")) parser.add_argument('--skip-set-cover', dest="skip_set_cover", action="store_true", help=("Skip the set cover filter; this is useful when we " "wish to see the probes generated from only the " "duplicate and reverse complement filters, to gauge " "the effects of the set cover filter")) # Adding adapters parser.add_argument('--add-adapters', dest="add_adapters", action="store_true", help=("Add adapters to the ends of probes; to specify adapter " "sequences, use --adapter-a and --adapter-b")) parser.add_argument('--adapter-a', nargs=2, help=("(Optional) Args: <X> <Y>; Custom A adapter to use; two ordered " "where X is the A adapter sequence to place on the 5' end of " "a probe and Y is the A adapter sequence to place on the 3' " "end of a probe")) parser.add_argument('--adapter-b', nargs=2, help=("(Optional) Args: <X> <Y>; Custom B adapter to use; two ordered " "where X is the B adapter sequence to place on the 5' end of " "a probe and Y is the B adapter sequence to place on the 3' " "end of a probe")) # Filtering poly(A) sequence from probes parser.add_argument('--filter-polya', nargs=2, type=int, help=("(Optional) Args: <X> <Y> (integers); do not output any probe " "that contains a stretch of X or more 'A' bases, tolerating " "up to Y mismatches (and likewise for 'T' bases)")) # Adjusting probe output parser.add_argument('--add-reverse-complements', dest="add_reverse_complements", action="store_true", help=("Add to the output the reverse complement of each probe")) parser.add_argument('--expand-n', nargs='?', type=int, default=None, const=3, help=("Expand each probe so that 'N' bases are replaced by real " "bases; for example, the probe 'ANA' would be replaced " "with the probes 'AAA', 'ATA', 'ACA', and 'AGA'; this is " "done combinatorially across all 'N' bases in a probe, and " "thus the number of new probes grows exponentially with the " "number of 'N' bases in a probe. If followed by a command- " "line argument (INT), this only expands at most INT randomly " "selected N bases, and the rest are replaced with random " "unambiguous bases (default INT is 3).")) # Limiting input parser.add_argument('--limit-target-genomes', type=int, help=("(Optional) Use only the first LIMIT_TARGET_GENOMES target " "genomes in the dataset")) parser.add_argument('--limit-target-genomes-randomly-with-replacement', type=int, help=("(Optional) Randomly select LIMIT_TARGET_GENOMES_RANDOMLY_" "WITH_REPLACMENT target genomes in the dataset with " "replacement")) # Clustering input sequences def check_cluster_and_design_separately(val): fval = float(val) if fval > 0 and fval <= 0.5: # a float in (0,0.5] return fval else: raise argparse.ArgumentTypeError(("%s is an invalid average " "nucleotide dissimilarity") % val) parser.add_argument('--cluster-and-design-separately', type=check_cluster_and_design_separately, help=("(Optional) If set, cluster all input sequences using their " "MinHash signatures, design probes separately on each cluster, " "and combine the resulting probes. This can significantly lower " "runtime and memory usage, but may lead to a suboptimal " "solution. The value CLUSTER_AND_DESIGN_SEPARATELY gives the " "inter-cluster distance threshold to merge clusters (1-ANI, " "where ANI is average nucleotide identity); higher values " "result in fewer clusters, and thus longer runtime. Values " "must be in (0,0.5], and generally should be around 0.1 or " "0.2. When used, this creates a separate genome for each " "input sequence -- it collapses all sequences, across both " "groups and genomes, into one list of sequences in one group. " "Therefore, genomes will not be grouped as specified in the " "input and sequences will not be grouped by genome, and " "differential identification is not supported")) parser.add_argument('--cluster-from-fragments', type=int, help=("(Optional) If set, break all sequences into sequences of " "length CLUSTER_FROM_FRAGMENTS nt, and cluster these fragments. " "This can be useful for improving runtime on input with " "especially large genomes, in which probes for different " "fragments can be designed separately. Values should generally " "be around 10,000. For this to be used, " "--cluster-and-design-separately must also be set.")) # Filter candidate probes with LSH parser.add_argument('--filter-with-lsh-hamming', type=int, help=("(Optional) If set, filter candidate probes for near-" "duplicates using LSH with a family of hash functions that " "works with Hamming distance. FILTER_WITH_LSH_HAMMING gives " "the maximum Hamming distance at which to call near-" "duplicates; it should be commensurate with (but not greater " "than) MISMATCHES. Using this may significantly improve " "runtime and reduce memory usage by reducing the number of " "candidate probes to consider, but may lead to a slightly " "sub-optimal solution. It may also, particularly with " "relatively high values of FILTER_WITH_LSH_HAMMING, cause " "coverage obtained for each genome to be slightly less than " "the desired coverage (COVERAGE) when that desired coverage " "is the complete genome; it is recommended to also use " "--print-analysis or --write-analysis-to-tsv with this " "to see the coverage that is obtained.")) def check_filter_with_lsh_minhash(val): fval = float(val) if fval >= 0.0 and fval <= 1.0: # a float in [0,1] return fval else: raise argparse.ArgumentTypeError(("%s is an invalid Jaccard " "distance") % val) parser.add_argument('--filter-with-lsh-minhash', type=check_filter_with_lsh_minhash, help=("(Optional) If set, filter candidate probes for near-" "duplicates using LSH with a MinHash family. " "FILTER_WITH_LSH_MINHASH gives the maximum Jaccard distance " "(1 minus Jaccard similarity) at which to call near-duplicates; " "the Jaccard similarity is calculated by treating each probe " "as a set of overlapping 10-mers. Its value should be " "commensurate with parameter values determining whether a probe " "hybridizes to a target sequence, but this can be difficult " "to measure compared to the input for --filter-with-lsh-hamming. " "However, this allows more sensitivity in near-duplicate " "detection than --filter-with-lsh-hamming (e.g., if near-" "duplicates should involve probes shifted relative to each " "other). The same caveats mentioned in help for " "--filter-with-lsh-hamming also apply here. Values of " "FILTER_WITH_LSH_MINHASH above ~0.7 may start to require " "significant memory and runtime for near-duplicate detection.")) # Miscellaneous technical adjustments parser.add_argument('--cover-groupings-separately', dest="cover_groupings_separately", action="store_true", help=("Run a separate instance of set cover with the target genomes " "from each grouping and pool (union) the resulting probes. " "When set, the software will run faster than when not set, but " "it may yield more probes than when it is not set.")) parser.add_argument('--small-seq-skip', type=int, help=("(Optional) Do not create candidate probes from sequences " "whose length is <= SMALL_SEQ_SKIP. If set to (PROBE_LENGTH - " "1), this avoids the error raised when sequences are less " "than the probe length")) parser.add_argument('--small-seq-min', type=int, help=("(Optional) If set, allow sequences as input that are " "shorter than PROBE_LENGTH (when not set, the program will " "error on such input). SMALL_SEQ_MIN is the " "minimum sequence length that should be accepted as input. " "When a sequence is less than PROBE_LENGTH, a candidate " "probe is created that is equal to the sequence; thus, " "the output probes may have different lengths. Note that, " "when this is set, it might be a good idea to also set " "LCF_THRES to be a value smaller than PROBE_LENGTH -- " "e.g., the length of the shortest input sequence; otherwise, " "when a probe of length p_l is mapped to a sequence of length " "s_l, then lcf_thres is treated as being min(LCF_THRES, p_l, " "s_l) so that a probe is able to 'cover' a sequence shorter " "than the probe and so that a probe shorter than lcf_thres " "is able to 'cover' a sequence")) def check_max_num_processes(val): ival = int(val) if ival >= 1: return ival else: raise argparse.ArgumentTypeError(("MAX_NUM_PROCESSES must be " "an int >= 1")) parser.add_argument('--max-num-processes', type=check_max_num_processes, help=("(Optional) An int >= 1 that gives the maximum number of " "processes to use in multiprocessing pools; uses min(number " "of CPUs in the system, MAX_NUM_PROCESSES) processes")) parser.add_argument('--kmer-probe-map-k', type=int, help=("(Optional) Use this value (KMER_PROBE_LENGTH_K) as the " "k-mer length when constructing a map of k-mers to the probes " "that contain these k-mers. This map is used when mapping " "candidate probes to target sequences and the k-mers serve " "as seeds for calculating whether a candidate probe 'covers' " "a subsequence. The value should be sufficiently less than " "PROBE_LENGTH so that it can find mappings even when the " "candidate probe and target sequence are divergent. In " "particular, CATCH will try to find a value k >= " "KMER_PROBE_LENGTH_K (by default, >=20) such that k divides " "PROBE_LENGTH and k < PROBE_LENGTH / MISMATCHES (if " "MISMATCHES=0, then k=PROBE_LENGTH). It will then use this " "k as the k-mer length in mappings; if no such k exists, it " "will use a randomized approach with KMER_PROBE_LENGTH_K as " "the k-mer length. If --custom-hybridization-fn is set, " "it will always use the randomized approach with " "KMER_PROBE_LENGTH_K (by default, 20) as the k-mer length.")) parser.add_argument('--use-native-dict-when-finding-tolerant-coverage', dest="use_native_dict_when_finding_tolerant_coverage", action="store_true", help=("When finding probe coverage for blacklisting and " "identification (i.e., when using tolerant parameters), " "use a native Python dict as the kmer_probe_map across " "processes, rather than the primitives in SharedKmerProbeMap " "that are more suited to sharing across processes. Depending " "on the input (particularly if there are many candidate probes) " "this may result in substantial memory usage; but it may provide " "an improvement in runtime when there are relatively few " "candidate probes and a very large blacklisted input")) parser.add_argument('--ncbi-api-key', help=("API key to use for NCBI e-utils. Using this increases the " "limit on requests/second and may prevent an IP address " "from being block due to too many requests")) # Log levels and version parser.add_argument('--debug', dest="log_level", action="store_const", const=logging.DEBUG, default=logging.WARNING, help=("Debug output")) parser.add_argument('--verbose', dest="log_level", action="store_const", const=logging.INFO, help=("Verbose output")) parser.add_argument('-V', '--version', action='version', version=version.get_version()) args = parser.parse_args() log.configure_logging(args.log_level) main(args)
[ "logging.getLogger", "catch.utils.seq_io.write_probe_fasta", "catch.coverage_analysis.Analyzer", "catch.filter.near_duplicate_filter.NearDuplicateFilterWithMinHash", "catch.utils.version.get_version", "random.choices", "catch.filter.reverse_complement_filter.ReverseComplementFilter", "catch.filter.n_e...
[((816, 843), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (833, 843), False, 'import logging\n'), ((12299, 13067), 'catch.filter.set_cover_filter.SetCoverFilter', 'set_cover_filter.SetCoverFilter', ([], {'mismatches': 'args.mismatches', 'lcf_thres': 'args.lcf_thres', 'island_of_exact_match': 'args.island_of_exact_match', 'mismatches_tolerant': 'args.mismatches_tolerant', 'lcf_thres_tolerant': 'args.lcf_thres_tolerant', 'island_of_exact_match_tolerant': 'args.island_of_exact_match_tolerant', 'custom_cover_range_fn': 'custom_cover_range_fn', 'custom_cover_range_tolerant_fn': 'custom_cover_range_tolerant_fn', 'identify': 'args.identify', 'blacklisted_genomes': 'blacklisted_genomes_fasta', 'coverage': 'args.coverage', 'cover_extension': 'args.cover_extension', 'cover_groupings_separately': 'args.cover_groupings_separately', 'kmer_probe_map_k': 'kmer_probe_map_k_scf', 'kmer_probe_map_use_native_dict': 'args.use_native_dict_when_finding_tolerant_coverage'}), '(mismatches=args.mismatches, lcf_thres=args.\n lcf_thres, island_of_exact_match=args.island_of_exact_match,\n mismatches_tolerant=args.mismatches_tolerant, lcf_thres_tolerant=args.\n lcf_thres_tolerant, island_of_exact_match_tolerant=args.\n island_of_exact_match_tolerant, custom_cover_range_fn=\n custom_cover_range_fn, custom_cover_range_tolerant_fn=\n custom_cover_range_tolerant_fn, identify=args.identify,\n blacklisted_genomes=blacklisted_genomes_fasta, coverage=args.coverage,\n cover_extension=args.cover_extension, cover_groupings_separately=args.\n cover_groupings_separately, kmer_probe_map_k=kmer_probe_map_k_scf,\n kmer_probe_map_use_native_dict=args.\n use_native_dict_when_finding_tolerant_coverage)\n', (12330, 13067), False, 'from catch.filter import set_cover_filter\n'), ((15477, 15818), 'catch.filter.probe_designer.ProbeDesigner', 'probe_designer.ProbeDesigner', (['genomes_grouped', 'filters'], {'probe_length': 'args.probe_length', 'probe_stride': 'args.probe_stride', 'allow_small_seqs': 'args.small_seq_min', 'seq_length_to_skip': 'args.small_seq_skip', 'cluster_threshold': 'cluster_threshold', 'cluster_merge_after': 'cluster_merge_after', 'cluster_fragment_length': 'cluster_fragment_length'}), '(genomes_grouped, filters, probe_length=args.\n probe_length, probe_stride=args.probe_stride, allow_small_seqs=args.\n small_seq_min, seq_length_to_skip=args.small_seq_skip,\n cluster_threshold=cluster_threshold, cluster_merge_after=\n cluster_merge_after, cluster_fragment_length=cluster_fragment_length)\n', (15505, 15818), False, 'from catch.filter import probe_designer\n'), ((16147, 16208), 'catch.utils.seq_io.write_probe_fasta', 'seq_io.write_probe_fasta', (['pb.final_probes', 'args.output_probes'], {}), '(pb.final_probes, args.output_probes)\n', (16171, 16208), False, 'from catch.utils import seq_io, version, log\n'), ((17493, 17518), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17516, 17518), False, 'import argparse\n'), ((43054, 43091), 'catch.utils.log.configure_logging', 'log.configure_logging', (['args.log_level'], {}), '(args.log_level)\n', (43075, 43091), False, 'from catch.utils import seq_io, version, log\n'), ((7023, 7098), 'catch.probe.set_max_num_processes_for_probe_finding_pools', 'probe.set_max_num_processes_for_probe_finding_pools', (['args.max_num_processes'], {}), '(args.max_num_processes)\n', (7074, 7098), False, 'from catch import probe\n'), ((7120, 7207), 'catch.utils.cluster.set_max_num_processes_for_creating_distance_matrix', 'cluster.set_max_num_processes_for_creating_distance_matrix', (['args.max_num_processes'], {}), '(args.\n max_num_processes)\n', (7178, 7207), False, 'from catch.utils import cluster\n'), ((9271, 9350), 'catch.filter.fasta_filter.FastaFilter', 'fasta_filter.FastaFilter', (['args.filter_from_fasta'], {'skip_reverse_complements': '(True)'}), '(args.filter_from_fasta, skip_reverse_complements=True)\n', (9295, 9350), False, 'from catch.filter import fasta_filter\n'), ((10415, 10471), 'catch.filter.polya_filter.PolyAFilter', 'polya_filter.PolyAFilter', (['polya_length', 'polya_mismatches'], {}), '(polya_length, polya_mismatches)\n', (10439, 10471), False, 'from catch.filter import polya_filter\n'), ((11757, 11871), 'catch.filter.near_duplicate_filter.NearDuplicateFilterWithHammingDistance', 'near_duplicate_filter.NearDuplicateFilterWithHammingDistance', (['args.filter_with_lsh_hamming', 'args.probe_length'], {}), '(args.\n filter_with_lsh_hamming, args.probe_length)\n', (11817, 11871), False, 'from catch.filter import near_duplicate_filter\n'), ((13686, 13937), 'catch.filter.adapter_filter.AdapterFilter', 'adapter_filter.AdapterFilter', (['adapter_a', 'adapter_b'], {'mismatches': 'args.mismatches', 'lcf_thres': 'args.lcf_thres', 'island_of_exact_match': 'args.island_of_exact_match', 'custom_cover_range_fn': 'custom_cover_range_fn', 'kmer_probe_map_k': 'kmer_probe_map_k_af'}), '(adapter_a, adapter_b, mismatches=args.\n mismatches, lcf_thres=args.lcf_thres, island_of_exact_match=args.\n island_of_exact_match, custom_cover_range_fn=custom_cover_range_fn,\n kmer_probe_map_k=kmer_probe_map_k_af)\n', (13714, 13937), False, 'from catch.filter import adapter_filter\n'), ((14446, 14523), 'catch.filter.n_expansion_filter.NExpansionFilter', 'n_expansion_filter.NExpansionFilter', ([], {'limit_n_expansion_randomly': 'args.expand_n'}), '(limit_n_expansion_randomly=args.expand_n)\n', (14481, 14523), False, 'from catch.filter import n_expansion_filter\n'), ((14726, 14777), 'catch.filter.reverse_complement_filter.ReverseComplementFilter', 'reverse_complement_filter.ReverseComplementFilter', ([], {}), '()\n', (14775, 14777), False, 'from catch.filter import reverse_complement_filter\n'), ((16389, 16737), 'catch.coverage_analysis.Analyzer', 'coverage_analysis.Analyzer', (['pb.final_probes', 'args.mismatches', 'args.lcf_thres', 'genomes_grouped', 'genomes_grouped_names'], {'island_of_exact_match': 'args.island_of_exact_match', 'custom_cover_range_fn': 'custom_cover_range_fn', 'cover_extension': 'args.cover_extension', 'kmer_probe_map_k': 'kmer_probe_map_k_analyzer', 'rc_too': 'args.add_reverse_complements'}), '(pb.final_probes, args.mismatches, args.lcf_thres,\n genomes_grouped, genomes_grouped_names, island_of_exact_match=args.\n island_of_exact_match, custom_cover_range_fn=custom_cover_range_fn,\n cover_extension=args.cover_extension, kmer_probe_map_k=\n kmer_probe_map_k_analyzer, rc_too=args.add_reverse_complements)\n', (16415, 16737), False, 'from catch import coverage_analysis\n'), ((3930, 3948), 'os.path.isfile', 'os.path.isfile', (['bg'], {}), '(bg)\n', (3944, 3948), False, 'import os\n'), ((11970, 12057), 'catch.filter.near_duplicate_filter.NearDuplicateFilterWithMinHash', 'near_duplicate_filter.NearDuplicateFilterWithMinHash', (['args.filter_with_lsh_minhash'], {}), '(args.\n filter_with_lsh_minhash)\n', (12022, 12057), False, 'from catch.filter import near_duplicate_filter\n'), ((12114, 12148), 'catch.filter.duplicate_filter.DuplicateFilter', 'duplicate_filter.DuplicateFilter', ([], {}), '()\n', (12146, 12148), False, 'from catch.filter import duplicate_filter\n'), ((32935, 33025), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is an invalid average nucleotide dissimilarity' % val)"], {}), "(\n '%s is an invalid average nucleotide dissimilarity' % val)\n", (32961, 33025), False, 'import argparse\n'), ((36262, 36331), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is an invalid Jaccard distance' % val)"], {}), "('%s is an invalid Jaccard distance' % val)\n", (36288, 36331), False, 'import argparse\n'), ((39726, 39793), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""MAX_NUM_PROCESSES must be an int >= 1"""'], {}), "('MAX_NUM_PROCESSES must be an int >= 1')\n", (39752, 39793), False, 'import argparse\n'), ((42994, 43015), 'catch.utils.version.get_version', 'version.get_version', ([], {}), '()\n', (43013, 43015), False, 'from catch.utils import seq_io, version, log\n'), ((1267, 1339), 'importlib.import_module', 'importlib.import_module', (["('catch.datasets.collections.' + collection_name)"], {}), "('catch.datasets.collections.' + collection_name)\n", (1290, 1339), False, 'import importlib\n'), ((2177, 2265), 'catch.utils.ncbi_neighbors.construct_fasta_for_taxid', 'ncbi_neighbors.construct_fasta_for_taxid', (['taxid'], {'segment': 'segment', 'write_to': 'taxid_fn'}), '(taxid, segment=segment, write_to=\n taxid_fn)\n', (2217, 2265), False, 'from catch.utils import ncbi_neighbors\n'), ((2469, 2487), 'os.path.isfile', 'os.path.isfile', (['ds'], {}), '(ds)\n', (2483, 2487), False, 'import os\n'), ((23385, 23452), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is an invalid coverage value' % val)"], {}), "('%s is an invalid coverage value' % val)\n", (23411, 23452), False, 'import argparse\n'), ((1604, 1640), 'catch.utils.seq_io.read_dataset_genomes', 'seq_io.read_dataset_genomes', (['dataset'], {}), '(dataset)\n', (1631, 1640), False, 'from catch.utils import seq_io, version, log\n'), ((2313, 2361), 'catch.utils.seq_io.read_genomes_from_fasta', 'seq_io.read_genomes_from_fasta', (['ds_fasta_tf.name'], {}), '(ds_fasta_tf.name)\n', (2343, 2361), False, 'from catch.utils import seq_io, version, log\n'), ((3667, 3695), 'random.choices', 'random.choices', (['genomes'], {'k': 'k'}), '(genomes, k=k)\n', (3681, 3695), False, 'import random\n'), ((4178, 4225), 'importlib.import_module', 'importlib.import_module', (["('catch.datasets.' + bg)"], {}), "('catch.datasets.' + bg)\n", (4201, 4225), False, 'import importlib\n'), ((2578, 2612), 'catch.utils.seq_io.read_genomes_from_fasta', 'seq_io.read_genomes_from_fasta', (['ds'], {}), '(ds)\n', (2608, 2612), False, 'from catch.utils import seq_io, version, log\n'), ((2652, 2672), 'os.path.basename', 'os.path.basename', (['ds'], {}), '(ds)\n', (2668, 2672), False, 'import os\n'), ((2775, 2822), 'importlib.import_module', 'importlib.import_module', (["('catch.datasets.' + ds)"], {}), "('catch.datasets.' + ds)\n", (2798, 2822), False, 'import importlib\n'), ((2986, 3022), 'catch.utils.seq_io.read_dataset_genomes', 'seq_io.read_dataset_genomes', (['dataset'], {}), '(dataset)\n', (3013, 3022), False, 'from catch.utils import seq_io, version, log\n')]
""" Module that contains some example filters """ import numpy as np import matplotlib.pyplot as plt from graph import Node, Edge, Graph from resistor import Resistor from capacitor import Capacitor from diode import Diode from opamp import Opamp from wire import Wire from units import Units from filter import Filter def make_mxr_distortion_filter(): """ Return the MXR filter from: http://www.premierguitar.com/articles/mxr-distortion-plus-mods-1 Returns: Filter object """ probes = [] graph = Graph() # Knobs gain_param = 0.5 mix_param = 0.1 # Input / output node_in = Node(graph, fixed=True, source=True, label="Vin") node_out = Node(graph, output=True, label="Vout") # Supply node_4_5 = Node(graph, value=4.5, fixed=True, source=True, label="4.5V") node_gnd = Node(graph, value=0, fixed=True, source=True, label="GND") # Probe Vin probes.append(node_in) # Op amp plus section edge = Edge(graph, node_in, node_gnd, label="I1") capacitor = Capacitor(graph, .001 * Units.u, node_in, node_gnd, edge) graph.add_component(capacitor) node = Node(graph, label="V1") edge = Edge(graph, node_in, node, label="I2") #capacitor = Capacitor(graph, .01 * Units.u, node_in, node, edge) #graph.add_component(capacitor) wire = Wire(graph, node_in, node, edge) graph.add_component(wire) node_plus = Node(graph, label="V+") edge = Edge(graph, node, node_plus, label="I3") resistor = Resistor(graph, 10 * Units.K, node, node_plus, edge) graph.add_component(resistor) edge = Edge(graph, node_plus, node_4_5, label="I4") resistor = Resistor(graph, 1 * Units.M, node_plus, node_4_5, edge) graph.add_component(resistor) # Op amp minus section node = Node(graph, label="V2") edge = Edge(graph, node, node_gnd, label="I5") resistor = Resistor(graph, gain_param * (1 * Units.M), node, node_gnd, edge) graph.add_component(resistor) node_1 = Node(graph, label="V3") edge = Edge(graph, node, node_1, label="I6") resistor = Resistor(graph, 4.7 * Units.K, node, node_1, edge) graph.add_component(resistor) node_minus = Node(graph, label="V-") edge = Edge(graph, node_1, node_minus, label="I7") #capacitor = Capacitor(graph, 0.047 * Units.u, node_1, node_minus, edge) #graph.add_component(capacitor) wire = Wire(graph, node_1, node_minus, edge) graph.add_component(wire) # Op amp node_output = Node(graph, source=True, label="Vo") op_amp = Opamp(graph, node_a=node_minus, node_b=node_plus, node_out=node_output) graph.add_component(op_amp) edge = Edge(graph, node_minus, node_output, label="I8") resistor = Resistor(graph, 1 * Units.M, node_minus, node_output, edge) graph.add_component(resistor) # Op amp output node = Node(graph, label="V4") edge = Edge(graph, node_output, node, label="I9") capacitor = Capacitor(graph, 1 * Units.u, node_output, node, edge) graph.add_component(capacitor) node_1 = Node(graph, label="V5") edge = Edge(graph, node, node_1, label="I10") resistor = Resistor(graph, 10 * Units.K, node, node_1, edge) graph.add_component(resistor) edge = Edge(graph, node_1, node_gnd, label="I11") diode1 = Diode(graph, node_a=node_1, node_b=node_gnd, edge_i=edge) graph.add_component(diode1) edge = Edge(graph, node_gnd, node_1, label="I12") diode2 = Diode(graph, node_a=node_gnd, node_b=node_1, edge_i=edge) graph.add_component(diode2) edge = Edge(graph, node_1, node_gnd, label="I13") capacitor = Capacitor(graph, .001 * Units.u, node_1, node_gnd, edge) graph.add_component(capacitor) # Output potentiometer edge = Edge(graph, node_1, node_out, label="I14") resistor = Resistor(graph, mix_param * (10 * Units.K), node_1, node_out, edge) graph.add_component(resistor) edge = Edge(graph, node_out, node_gnd, label="I15") resistor = Resistor(graph, (1 - mix_param) * (10 * Units.K), node_out, node_gnd, edge) graph.add_component(resistor) # Probe Vout probes.append(node_out) mxr_filter = Filter(graph, node_in, node_out, probes=probes) return mxr_filter
[ "capacitor.Capacitor", "diode.Diode", "resistor.Resistor", "opamp.Opamp", "filter.Filter", "graph.Graph", "graph.Edge", "graph.Node", "wire.Wire" ]
[((548, 555), 'graph.Graph', 'Graph', ([], {}), '()\n', (553, 555), False, 'from graph import Node, Edge, Graph\n'), ((646, 695), 'graph.Node', 'Node', (['graph'], {'fixed': '(True)', 'source': '(True)', 'label': '"""Vin"""'}), "(graph, fixed=True, source=True, label='Vin')\n", (650, 695), False, 'from graph import Node, Edge, Graph\n'), ((711, 749), 'graph.Node', 'Node', (['graph'], {'output': '(True)', 'label': '"""Vout"""'}), "(graph, output=True, label='Vout')\n", (715, 749), False, 'from graph import Node, Edge, Graph\n'), ((779, 840), 'graph.Node', 'Node', (['graph'], {'value': '(4.5)', 'fixed': '(True)', 'source': '(True)', 'label': '"""4.5V"""'}), "(graph, value=4.5, fixed=True, source=True, label='4.5V')\n", (783, 840), False, 'from graph import Node, Edge, Graph\n'), ((856, 914), 'graph.Node', 'Node', (['graph'], {'value': '(0)', 'fixed': '(True)', 'source': '(True)', 'label': '"""GND"""'}), "(graph, value=0, fixed=True, source=True, label='GND')\n", (860, 914), False, 'from graph import Node, Edge, Graph\n'), ((997, 1039), 'graph.Edge', 'Edge', (['graph', 'node_in', 'node_gnd'], {'label': '"""I1"""'}), "(graph, node_in, node_gnd, label='I1')\n", (1001, 1039), False, 'from graph import Node, Edge, Graph\n'), ((1056, 1114), 'capacitor.Capacitor', 'Capacitor', (['graph', '(0.001 * Units.u)', 'node_in', 'node_gnd', 'edge'], {}), '(graph, 0.001 * Units.u, node_in, node_gnd, edge)\n', (1065, 1114), False, 'from capacitor import Capacitor\n'), ((1161, 1184), 'graph.Node', 'Node', (['graph'], {'label': '"""V1"""'}), "(graph, label='V1')\n", (1165, 1184), False, 'from graph import Node, Edge, Graph\n'), ((1196, 1234), 'graph.Edge', 'Edge', (['graph', 'node_in', 'node'], {'label': '"""I2"""'}), "(graph, node_in, node, label='I2')\n", (1200, 1234), False, 'from graph import Node, Edge, Graph\n'), ((1352, 1384), 'wire.Wire', 'Wire', (['graph', 'node_in', 'node', 'edge'], {}), '(graph, node_in, node, edge)\n', (1356, 1384), False, 'from wire import Wire\n'), ((1432, 1455), 'graph.Node', 'Node', (['graph'], {'label': '"""V+"""'}), "(graph, label='V+')\n", (1436, 1455), False, 'from graph import Node, Edge, Graph\n'), ((1467, 1507), 'graph.Edge', 'Edge', (['graph', 'node', 'node_plus'], {'label': '"""I3"""'}), "(graph, node, node_plus, label='I3')\n", (1471, 1507), False, 'from graph import Node, Edge, Graph\n'), ((1523, 1575), 'resistor.Resistor', 'Resistor', (['graph', '(10 * Units.K)', 'node', 'node_plus', 'edge'], {}), '(graph, 10 * Units.K, node, node_plus, edge)\n', (1531, 1575), False, 'from resistor import Resistor\n'), ((1622, 1666), 'graph.Edge', 'Edge', (['graph', 'node_plus', 'node_4_5'], {'label': '"""I4"""'}), "(graph, node_plus, node_4_5, label='I4')\n", (1626, 1666), False, 'from graph import Node, Edge, Graph\n'), ((1682, 1737), 'resistor.Resistor', 'Resistor', (['graph', '(1 * Units.M)', 'node_plus', 'node_4_5', 'edge'], {}), '(graph, 1 * Units.M, node_plus, node_4_5, edge)\n', (1690, 1737), False, 'from resistor import Resistor\n'), ((1811, 1834), 'graph.Node', 'Node', (['graph'], {'label': '"""V2"""'}), "(graph, label='V2')\n", (1815, 1834), False, 'from graph import Node, Edge, Graph\n'), ((1846, 1885), 'graph.Edge', 'Edge', (['graph', 'node', 'node_gnd'], {'label': '"""I5"""'}), "(graph, node, node_gnd, label='I5')\n", (1850, 1885), False, 'from graph import Node, Edge, Graph\n'), ((1901, 1966), 'resistor.Resistor', 'Resistor', (['graph', '(gain_param * (1 * Units.M))', 'node', 'node_gnd', 'edge'], {}), '(graph, gain_param * (1 * Units.M), node, node_gnd, edge)\n', (1909, 1966), False, 'from resistor import Resistor\n'), ((2015, 2038), 'graph.Node', 'Node', (['graph'], {'label': '"""V3"""'}), "(graph, label='V3')\n", (2019, 2038), False, 'from graph import Node, Edge, Graph\n'), ((2050, 2087), 'graph.Edge', 'Edge', (['graph', 'node', 'node_1'], {'label': '"""I6"""'}), "(graph, node, node_1, label='I6')\n", (2054, 2087), False, 'from graph import Node, Edge, Graph\n'), ((2103, 2153), 'resistor.Resistor', 'Resistor', (['graph', '(4.7 * Units.K)', 'node', 'node_1', 'edge'], {}), '(graph, 4.7 * Units.K, node, node_1, edge)\n', (2111, 2153), False, 'from resistor import Resistor\n'), ((2206, 2229), 'graph.Node', 'Node', (['graph'], {'label': '"""V-"""'}), "(graph, label='V-')\n", (2210, 2229), False, 'from graph import Node, Edge, Graph\n'), ((2242, 2285), 'graph.Edge', 'Edge', (['graph', 'node_1', 'node_minus'], {'label': '"""I7"""'}), "(graph, node_1, node_minus, label='I7')\n", (2246, 2285), False, 'from graph import Node, Edge, Graph\n'), ((2410, 2447), 'wire.Wire', 'Wire', (['graph', 'node_1', 'node_minus', 'edge'], {}), '(graph, node_1, node_minus, edge)\n', (2414, 2447), False, 'from wire import Wire\n'), ((2510, 2546), 'graph.Node', 'Node', (['graph'], {'source': '(True)', 'label': '"""Vo"""'}), "(graph, source=True, label='Vo')\n", (2514, 2546), False, 'from graph import Node, Edge, Graph\n'), ((2560, 2631), 'opamp.Opamp', 'Opamp', (['graph'], {'node_a': 'node_minus', 'node_b': 'node_plus', 'node_out': 'node_output'}), '(graph, node_a=node_minus, node_b=node_plus, node_out=node_output)\n', (2565, 2631), False, 'from opamp import Opamp\n'), ((2676, 2724), 'graph.Edge', 'Edge', (['graph', 'node_minus', 'node_output'], {'label': '"""I8"""'}), "(graph, node_minus, node_output, label='I8')\n", (2680, 2724), False, 'from graph import Node, Edge, Graph\n'), ((2740, 2799), 'resistor.Resistor', 'Resistor', (['graph', '(1 * Units.M)', 'node_minus', 'node_output', 'edge'], {}), '(graph, 1 * Units.M, node_minus, node_output, edge)\n', (2748, 2799), False, 'from resistor import Resistor\n'), ((2866, 2889), 'graph.Node', 'Node', (['graph'], {'label': '"""V4"""'}), "(graph, label='V4')\n", (2870, 2889), False, 'from graph import Node, Edge, Graph\n'), ((2901, 2943), 'graph.Edge', 'Edge', (['graph', 'node_output', 'node'], {'label': '"""I9"""'}), "(graph, node_output, node, label='I9')\n", (2905, 2943), False, 'from graph import Node, Edge, Graph\n'), ((2960, 3014), 'capacitor.Capacitor', 'Capacitor', (['graph', '(1 * Units.u)', 'node_output', 'node', 'edge'], {}), '(graph, 1 * Units.u, node_output, node, edge)\n', (2969, 3014), False, 'from capacitor import Capacitor\n'), ((3064, 3087), 'graph.Node', 'Node', (['graph'], {'label': '"""V5"""'}), "(graph, label='V5')\n", (3068, 3087), False, 'from graph import Node, Edge, Graph\n'), ((3099, 3137), 'graph.Edge', 'Edge', (['graph', 'node', 'node_1'], {'label': '"""I10"""'}), "(graph, node, node_1, label='I10')\n", (3103, 3137), False, 'from graph import Node, Edge, Graph\n'), ((3153, 3202), 'resistor.Resistor', 'Resistor', (['graph', '(10 * Units.K)', 'node', 'node_1', 'edge'], {}), '(graph, 10 * Units.K, node, node_1, edge)\n', (3161, 3202), False, 'from resistor import Resistor\n'), ((3249, 3291), 'graph.Edge', 'Edge', (['graph', 'node_1', 'node_gnd'], {'label': '"""I11"""'}), "(graph, node_1, node_gnd, label='I11')\n", (3253, 3291), False, 'from graph import Node, Edge, Graph\n'), ((3305, 3362), 'diode.Diode', 'Diode', (['graph'], {'node_a': 'node_1', 'node_b': 'node_gnd', 'edge_i': 'edge'}), '(graph, node_a=node_1, node_b=node_gnd, edge_i=edge)\n', (3310, 3362), False, 'from diode import Diode\n'), ((3407, 3449), 'graph.Edge', 'Edge', (['graph', 'node_gnd', 'node_1'], {'label': '"""I12"""'}), "(graph, node_gnd, node_1, label='I12')\n", (3411, 3449), False, 'from graph import Node, Edge, Graph\n'), ((3463, 3520), 'diode.Diode', 'Diode', (['graph'], {'node_a': 'node_gnd', 'node_b': 'node_1', 'edge_i': 'edge'}), '(graph, node_a=node_gnd, node_b=node_1, edge_i=edge)\n', (3468, 3520), False, 'from diode import Diode\n'), ((3565, 3607), 'graph.Edge', 'Edge', (['graph', 'node_1', 'node_gnd'], {'label': '"""I13"""'}), "(graph, node_1, node_gnd, label='I13')\n", (3569, 3607), False, 'from graph import Node, Edge, Graph\n'), ((3624, 3681), 'capacitor.Capacitor', 'Capacitor', (['graph', '(0.001 * Units.u)', 'node_1', 'node_gnd', 'edge'], {}), '(graph, 0.001 * Units.u, node_1, node_gnd, edge)\n', (3633, 3681), False, 'from capacitor import Capacitor\n'), ((3755, 3797), 'graph.Edge', 'Edge', (['graph', 'node_1', 'node_out'], {'label': '"""I14"""'}), "(graph, node_1, node_out, label='I14')\n", (3759, 3797), False, 'from graph import Node, Edge, Graph\n'), ((3813, 3880), 'resistor.Resistor', 'Resistor', (['graph', '(mix_param * (10 * Units.K))', 'node_1', 'node_out', 'edge'], {}), '(graph, mix_param * (10 * Units.K), node_1, node_out, edge)\n', (3821, 3880), False, 'from resistor import Resistor\n'), ((3927, 3971), 'graph.Edge', 'Edge', (['graph', 'node_out', 'node_gnd'], {'label': '"""I15"""'}), "(graph, node_out, node_gnd, label='I15')\n", (3931, 3971), False, 'from graph import Node, Edge, Graph\n'), ((3987, 4062), 'resistor.Resistor', 'Resistor', (['graph', '((1 - mix_param) * (10 * Units.K))', 'node_out', 'node_gnd', 'edge'], {}), '(graph, (1 - mix_param) * (10 * Units.K), node_out, node_gnd, edge)\n', (3995, 4062), False, 'from resistor import Resistor\n'), ((4161, 4208), 'filter.Filter', 'Filter', (['graph', 'node_in', 'node_out'], {'probes': 'probes'}), '(graph, node_in, node_out, probes=probes)\n', (4167, 4208), False, 'from filter import Filter\n')]
from imageai.Prediction import ImagePrediction import cv2 import os import time import RPi.GPIO as gpio def my_prediction(img_path, prob): result = {} execution_path = os.getcwd() prediction = ImagePrediction() prediction.setModelTypeAsResNet() prediction.setModelPath(os.path.join(execution_path, "./data/resnet50.h5")) # I rename a model to simple name prediction.loadModel() predictions, probabilities = prediction.predictImage(os.path.join(execution_path, img_path), result_count=5) for eachPrediction, eachProbability in zip(predictions, probabilities): if( eachProbability >= prob ): result[eachPrediction] = eachProbability # print(eachPrediction , " : " , eachProbability) return result def image_process(): # The device number is cahangable cap = cv2.VideoCapture(0) while True: # Read Video capture in realtime ret, frame = cap.read() cv2.imshow("RPI CAM", frame) # Get Signal from pin11 # You can change the pin number here! pir = gpio.input(11) # If PIR sensor detected movement if pir == 1: cv2.imwrite('output.png', frame) break if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() def verify_keyword(sample_word): code = '100' keywords = ['bottle', 'can', 'water', 'plastic', 'wine'] gate1 = ['bottle', 'water', 'wine'] gate2 = ['can'] for sample in sample_word.keys(): for keyword in keywords: if keyword in sample: if keyword in gate1: # Send signal to MOTOR here! # You can add code overhere code = '110' elif keyword in gate2: # Optional # For sending signal to another MOTOR code = '101' print('FOUND:', keyword, 'in', sample) return code if __name__ == '__main__': gpio.setwarnings(False) gpio.setmode(gpio.BOARD) gpio.setup(11, gpio.IN) print("AI is processing...") # Capture an image and save to disk image_process() # Send sample frame from cam to my_prediction function result = my_prediction('./output.png', 1) # Display result from my_prediction function print(result) # Find keyword in result code = verify_keyword(result) print("code to sensors:", code)
[ "cv2.imwrite", "imageai.Prediction.ImagePrediction", "RPi.GPIO.setup", "RPi.GPIO.setwarnings", "os.path.join", "os.getcwd", "cv2.imshow", "cv2.destroyAllWindows", "cv2.VideoCapture", "RPi.GPIO.input", "cv2.waitKey", "RPi.GPIO.setmode" ]
[((177, 188), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (186, 188), False, 'import os\n'), ((206, 223), 'imageai.Prediction.ImagePrediction', 'ImagePrediction', ([], {}), '()\n', (221, 223), False, 'from imageai.Prediction import ImagePrediction\n'), ((837, 856), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (853, 856), False, 'import cv2\n'), ((1308, 1331), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1329, 1331), False, 'import cv2\n'), ((2044, 2067), 'RPi.GPIO.setwarnings', 'gpio.setwarnings', (['(False)'], {}), '(False)\n', (2060, 2067), True, 'import RPi.GPIO as gpio\n'), ((2072, 2096), 'RPi.GPIO.setmode', 'gpio.setmode', (['gpio.BOARD'], {}), '(gpio.BOARD)\n', (2084, 2096), True, 'import RPi.GPIO as gpio\n'), ((2101, 2124), 'RPi.GPIO.setup', 'gpio.setup', (['(11)', 'gpio.IN'], {}), '(11, gpio.IN)\n', (2111, 2124), True, 'import RPi.GPIO as gpio\n'), ((290, 340), 'os.path.join', 'os.path.join', (['execution_path', '"""./data/resnet50.h5"""'], {}), "(execution_path, './data/resnet50.h5')\n", (302, 340), False, 'import os\n'), ((461, 499), 'os.path.join', 'os.path.join', (['execution_path', 'img_path'], {}), '(execution_path, img_path)\n', (473, 499), False, 'import os\n'), ((955, 983), 'cv2.imshow', 'cv2.imshow', (['"""RPI CAM"""', 'frame'], {}), "('RPI CAM', frame)\n", (965, 983), False, 'import cv2\n'), ((1077, 1091), 'RPi.GPIO.input', 'gpio.input', (['(11)'], {}), '(11)\n', (1087, 1091), True, 'import RPi.GPIO as gpio\n'), ((1168, 1200), 'cv2.imwrite', 'cv2.imwrite', (['"""output.png"""', 'frame'], {}), "('output.png', frame)\n", (1179, 1200), False, 'import cv2\n'), ((1231, 1245), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1242, 1245), False, 'import cv2\n')]
import re import csv import nltk """docstring for twitterClean""" def __init__(self): super(twitterClean, self).__init__() def renameUser(corpus): _new = [] for _temp in corpus: _temp = re.sub( r'(^|[^@\w])@(\w{1,15})\b','',_temp) _new.append(_temp) return _new def removeHashtag(corpus): _new = [] for _temp in corpus: _temp = re.sub(r'#(\w+)', '', _temp) _new.append(_temp) return _new def removeURL(corpus): _new = [] for _temp in corpus: _temp = re.sub(r'http:\S+', '', _temp, flags=re.MULTILINE) _temp = re.sub(r'https:\S+', '', _temp, flags=re.MULTILINE) _new.append(_temp) return _new def removeEmoticon(corpus): _new = [] emoticons_str = r"(?:[:=;B\-][oO\"\_\-]?[\-D\)\]\(\]/\\Op3]{2,3})" for _temp in corpus: _temp.replace(emoticons_str, '') _temp = re.sub(r'[^\x00-\x7F]', '', _temp) _new.append(_temp) return _new def getTweetData(filename="dataset/Indonesian_Tweets.tsv"): #Gain large corpus of tweets toFeed = [] rawSentence = [] with open(filename, 'rU') as csvfile: spamreader = csv.reader(csvfile, delimiter='\n', quotechar='|') for spam in spamreader: rawSentence.append(spam) corpusSentence =[] for individualSentence in rawSentence: if individualSentence == []: pass else: corpusSentence.append(individualSentence[0]) # corpusSentence = self.text.removeAll(corpusSentence) _temp = removeURL(corpusSentence) _temp = renameUser(_temp) _temp = removeHashtag(_temp) _temp = removeEmoticon(_temp) for sentences in _temp: token = nltk.wordpunct_tokenize(sentences.lower()) toFeed.append(token) return toFeed
[ "re.sub", "csv.reader" ]
[((231, 278), 're.sub', 're.sub', (['"""(^|[^@\\\\w])@(\\\\w{1,15})\\\\b"""', '""""""', '_temp'], {}), "('(^|[^@\\\\w])@(\\\\w{1,15})\\\\b', '', _temp)\n", (237, 278), False, 'import re\n'), ((411, 439), 're.sub', 're.sub', (['"""#(\\\\w+)"""', '""""""', '_temp'], {}), "('#(\\\\w+)', '', _temp)\n", (417, 439), False, 'import re\n'), ((563, 613), 're.sub', 're.sub', (['"""http:\\\\S+"""', '""""""', '_temp'], {'flags': 're.MULTILINE'}), "('http:\\\\S+', '', _temp, flags=re.MULTILINE)\n", (569, 613), False, 'import re\n'), ((630, 681), 're.sub', 're.sub', (['"""https:\\\\S+"""', '""""""', '_temp'], {'flags': 're.MULTILINE'}), "('https:\\\\S+', '', _temp, flags=re.MULTILINE)\n", (636, 681), False, 'import re\n'), ((922, 957), 're.sub', 're.sub', (['"""[^\\\\x00-\\\\x7F]"""', '""""""', '_temp'], {}), "('[^\\\\x00-\\\\x7F]', '', _temp)\n", (928, 957), False, 'import re\n'), ((1195, 1245), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\n"""', 'quotechar': '"""|"""'}), "(csvfile, delimiter='\\n', quotechar='|')\n", (1205, 1245), False, 'import csv\n')]
from beem.utils import formatTimeString, resolve_authorperm, construct_authorperm, addTzInfo from beem.nodelist import NodeList from beem.comment import Comment from beem import Steem from beem.account import Account from beem.instance import set_shared_steem_instance from beem.blockchain import Blockchain import time import json import os import math import dataset import random from datetime import date, datetime, timedelta from dateutil.parser import parse from beem.constants import STEEM_100_PERCENT from steemrewarding.post_storage import PostsTrx from steemrewarding.command_storage import CommandsTrx from steemrewarding.vote_rule_storage import VoteRulesTrx from steemrewarding.pending_vote_storage import PendingVotesTrx from steemrewarding.config_storage import ConfigurationDB from steemrewarding.vote_storage import VotesTrx from steemrewarding.vote_log_storage import VoteLogTrx from steemrewarding.failed_vote_log_storage import FailedVoteLogTrx from steemrewarding.broadcast_vote_storage import BroadcastVoteTrx from steemrewarding.utils import isfloat, upvote_comment, valid_age, upvote_comment_without_check from steemrewarding.version import version as rewardingversion from steemrewarding.account_storage import AccountsDB from steemrewarding.version import version as rewarding_version import dataset if __name__ == "__main__": config_file = 'config.json' if not os.path.isfile(config_file): raise Exception("config.json is missing!") else: with open(config_file) as json_data_file: config_data = json.load(json_data_file) # print(config_data) databaseConnector = config_data["databaseConnector"] wallet_password = config_data["wallet_password"] posting_auth_acc = config_data["posting_auth_acc"] voting_round_sec = config_data["voting_round_sec"] start_prep_time = time.time() db = dataset.connect(databaseConnector) # Create keyStorage print("Start upvote_post_comments_timebased.py") nobroadcast = False # nobroadcast = True postTrx = PostsTrx(db) votesTrx = VotesTrx(db) voteRulesTrx = VoteRulesTrx(db) confStorage = ConfigurationDB(db) pendingVotesTrx = PendingVotesTrx(db) voteLogTrx = VoteLogTrx(db) failedVoteLogTrx = FailedVoteLogTrx(db) accountsTrx = AccountsDB(db) broadcastVoteTrx = BroadcastVoteTrx(db) conf_setup = confStorage.get() # last_post_block = conf_setup["last_post_block"] nodes = NodeList() # nodes.update_nodes(weights={"block": 1}) try: nodes.update_nodes() except: print("could not update nodes") node_list = nodes.get_nodes(exclude_limited=False) stm = Steem(node=node_list, num_retries=5, call_num_retries=3, timeout=15, nobroadcast=nobroadcast) stm.wallet.unlock(wallet_password) last_voter = None print("Start apply new timebased votes") voter_counter = 0 delete_pending_votes = [] rc_sp_to_low_account_list = [] vote_counter = 0 vote_count = 0 for pending_vote in pendingVotesTrx.get_command_list_timed(): settings = None voter_acc = None author, permlink = resolve_authorperm(pending_vote["authorperm"]) if pending_vote["voter"] in rc_sp_to_low_account_list: continue age_min = (datetime.utcnow() - pending_vote["comment_timestamp"]).total_seconds() / 60 maximum_vote_delay_min = pending_vote["maximum_vote_delay_min"] if age_min < pending_vote["vote_delay_min"] - voting_round_sec / 2.0 / 60 - 3: # print("%s is not ready yet - %.2f min should be %.2f" % (pending_vote["authorperm"], age_min, pending_vote["vote_delay_min"])) continue if settings is None: settings = accountsTrx.get(pending_vote["voter"]) if settings is None: voter_acc = Account(pending_vote["voter"], steem_instance=stm) print("update %s - did not exists" % pending_vote["voter"]) posting_auth = False for a in voter_acc["posting"]["account_auths"]: if a[0] == posting_auth_acc: posting_auth = True if pending_vote["voter"] == posting_auth_acc: posting_auth = True accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(), "sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(), "posting_auth_acc": posting_auth}) pause_votes_below_vp = 0 settings = accountsTrx.get(pending_vote["voter"]) elif settings["sp"] is None or settings["vp"] is None or settings["last_update"] is None or settings["rc"] is None or settings["posting_auth_acc"] is None: print("update %s - None" % pending_vote["voter"]) voter_acc = Account(pending_vote["voter"], steem_instance=stm) posting_auth = False for a in voter_acc["posting"]["account_auths"]: if a[0] == posting_auth_acc: posting_auth = True if pending_vote["voter"] == posting_auth_acc: posting_auth = True accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(), "sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(), "posting_auth_acc": posting_auth}) settings = accountsTrx.get(pending_vote["voter"]) elif (datetime.utcnow() - settings["last_update"]).total_seconds() / 60 > 1: print("update %s - last update was before %f s" % (pending_vote["voter"], (datetime.utcnow() - settings["last_update"]).total_seconds())) voter_acc = Account(pending_vote["voter"], steem_instance=stm) posting_auth = False for a in voter_acc["posting"]["account_auths"]: if a[0] == posting_auth_acc: posting_auth = True if pending_vote["voter"] == posting_auth_acc: posting_auth = True accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(), "sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(), "posting_auth_acc": posting_auth}) settings = accountsTrx.get(pending_vote["voter"]) if pending_vote["vote_weight"] > 0: pause_votes_below_vp = settings["pause_votes_below_vp"] vp = settings["vp"] else: pause_votes_below_vp = settings["pause_down_votes_below_down_vp"] vp = settings["down_vp"] vp_update = settings["last_update"] if vp_update is not None: diff_in_seconds = ((datetime.utcnow()) - (vp_update)).total_seconds() regenerated_vp = diff_in_seconds * 10000 / 432000 / 100 vp = vp + regenerated_vp #down_vp = down_vp + regenerated_vp if vp > 100: vp = 100 #if down_vp > 100: # down_vp = 100 if vp < pause_votes_below_vp: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Voting is paused (VP = %.2f %%, which below pause_votes_below_vp of %.2f %%)" % (vp, pause_votes_below_vp), "timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue # print("time vote %.2f s - %d votes" % (time.time() - start_prep_time, vote_count)) if (pending_vote["vote_weight"] is None or pending_vote["vote_weight"] == 0) and (pending_vote["vote_sbd"] is None or float(pending_vote["vote_sbd"]) <= 0): # voter_acc = Account(pending_vote["voter"], steem_instance=stm) failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "vote_weight was set to zero. (%s %% and %s $)" % (pending_vote["vote_weight"], pending_vote["vote_sbd"]), "timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue if maximum_vote_delay_min < 0: maximum_vote_delay_min = 9360 if age_min > maximum_vote_delay_min + voting_round_sec / 60: # voter_acc = Account(pending_vote["voter"], steem_instance=stm) failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "post is older than %.2f min." % (maximum_vote_delay_min), "timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue voter_counter += 1 # voter_acc = Account(pending_vote["voter"], steem_instance=stm) if settings["sp"] < 0.1: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not vot %s, as Steem Power is almost zero." % (pending_vote["authorperm"]), "timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) print("Could not process %s - sp < 0.1" % pending_vote["authorperm"]) rc_sp_to_low_account_list.append(pending_vote["voter"]) continue if settings["rc"] < 0.5: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not vot %s, as RC is almost zero." % (pending_vote["authorperm"]), "timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) print("Could not process %s - rc to low" % pending_vote["authorperm"]) rc_sp_to_low_account_list.append(pending_vote["voter"]) continue vote_weight = pending_vote["vote_weight"] if vote_weight is None or vote_weight == 0: voter_acc = Account(pending_vote["voter"], steem_instance=stm) vote_weight = voter_acc.get_vote_pct_for_SBD(float(pending_vote["vote_sbd"])) / 100. if vote_weight > 100: vote_weight = 100 elif vote_weight < 0.01: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "vote_weight was set to zero.", "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": voter_acc.vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue age_hour = ((datetime.utcnow()) - pending_vote["created"]).total_seconds() / 60 / 60 if age_hour > 156: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "post is older than 6.5 days.", "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue if vp < pending_vote["min_vp"]: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Voting power is %.2f %%, which is to low. (min_vp is %.2f %%)" % (vp, pending_vote["min_vp"]), "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue if pending_vote["max_votes_per_day"] > -1: if settings is None: settings = accountsTrx.get(pending_vote["voter"]) if settings is not None: sliding_time_window = settings["sliding_time_window"] else: sliding_time_window = True votes_24h_before = voteLogTrx.get_votes_per_day(pending_vote["voter"], author, sliding_time_window) if votes_24h_before >= pending_vote["max_votes_per_day"]: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The author was already upvoted %d in the last 24h (max_votes_per_day is %d)." % (votes_24h_before, pending_vote["max_votes_per_day"]), "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue if pending_vote["max_votes_per_week"] > -1: if settings is None: settings = accountsTrx.get(pending_vote["voter"]) if settings is not None: sliding_time_window = settings["sliding_time_window"] else: sliding_time_window = True votes_168h_before = voteLogTrx.get_votes_per_week(pending_vote["voter"], author, sliding_time_window) if votes_168h_before >= pending_vote["max_votes_per_week"]: failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The author was already upvoted %d in the last 7 days (max_votes_per_week is %d)." % (votes_168h_before, pending_vote["max_votes_per_week"]), "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"],"vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue if pending_vote["vp_scaler"] > 0: vote_weight *= 1 - ((100 - vp) / 100 * pending_vote["vp_scaler"]) if abs(vote_weight) < 0.02: error_msg = "Vote weight is zero or below zero (%.2f %%)" % vote_weight failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": error_msg, "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"],"vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue cnt = 0 c = None while c is None and cnt < 5: cnt += 1 try: c = Comment(pending_vote["authorperm"], use_tags_api=True, steem_instance=stm) c.refresh() except: nodelist = NodeList() nodelist.update_nodes() stm = Steem(node=nodelist.get_nodes(), num_retries=5, call_num_retries=3, timeout=15, nobroadcast=nobroadcast) time.sleep(1) if cnt == 5: print("Could not read %s" % (pending_vote["authorperm"])) failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not process %s" % (pending_vote["authorperm"]), "timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) print("Could not process %s" % pending_vote["authorperm"]) continue votes_list = votesTrx.get_authorperm_votes(pending_vote["authorperm"]) try: if pending_vote["max_net_votes"] >= 0 and pending_vote["max_net_votes"] < len(votes_list): failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The number of post/comment votes (%d) is higher than max_net_votes (%d)." % (len(votes_list), pending_vote["max_net_votes"]), "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue except: continue if False and pending_vote["max_pending_payout"] >= 0 and pending_vote["max_pending_payout"] < float(c["pending_payout_value"]): failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The pending payout of post/comment votes (%.2f) is higher than max_pending_payout (%.2f)." % (float(c["pending_payout_value"]), pending_vote["max_pending_payout"]), "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue # check for max votes per day/week already_voted = False for v in votes_list: if pending_vote["voter"] == v["voter"]: already_voted = True if not settings["posting_auth_acc"] or already_voted: if already_voted: error_msg = "already voted." else: error_msg = "posting authority is missing" failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": error_msg, "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) continue # sucess = upvote_comment(c, pending_vote["voter"], vote_weight) if False: reply_message = upvote_comment_without_check(c, pending_vote["voter"], vote_weight) if reply_message is not None: vote_count += 1 if pending_vote["leave_comment"]: try: if settings is None: settings = accountsTrx.get(pending_vote["voter"]) if settings is not None and "upvote_comment" in settings and settings["upvote_comment"] is not None: json_metadata = {'app': 'rewarding/%s' % (rewarding_version)} reply_body = settings["upvote_comment"] reply_body = reply_body.replace("{{name}}", "@%s" % c["author"] ).replace("{{voter}}", "@%s" % pending_vote["voter"]) c.reply(reply_body, author=pending_vote["voter"], meta=json_metadata) except: print("Could not leave comment!") voteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "author": c["author"], "timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "voted_after_min": age_min, "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "trail_vote": pending_vote["trail_vote"], "main_post": pending_vote["main_post"], "voter_to_follow": pending_vote["voter_to_follow"]}) expiration = formatTimeString(reply_message["expiration"]).replace(tzinfo=None) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) else: expiration = datetime.utcnow() broadcastVoteTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"], "author": c["author"], "voted_after_min": 0, "created": datetime.utcnow(), "vp": settings["vp"], "down_vp": settings["down_vp"], "maximum_vote_delay_min": pending_vote["maximum_vote_delay_min"], "comment_timestamp": pending_vote["comment_timestamp"], "trail_vote": pending_vote["trail_vote"], "voter_to_follow": pending_vote["voter_to_follow"], "leave_comment": pending_vote["leave_comment"], "vote_timestamp": pending_vote["comment_timestamp"] + timedelta(seconds=pending_vote["vote_delay_min"]/60), "max_votes_per_day": pending_vote["max_votes_per_day"], "max_votes_per_week": pending_vote["max_votes_per_week"]}) delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]}) for pending_vote in delete_pending_votes: pendingVotesTrx.delete(pending_vote["authorperm"], pending_vote["voter"], pending_vote["vote_when_vp_reached"]) delete_pending_votes = [] print("%d voter have been checked!" % voter_counter) print("time vote %.2f s - %d votes" % (time.time() - start_prep_time, vote_count))
[ "steemrewarding.config_storage.ConfigurationDB", "time.sleep", "beem.nodelist.NodeList", "steemrewarding.utils.upvote_comment_without_check", "steemrewarding.post_storage.PostsTrx", "datetime.timedelta", "steemrewarding.broadcast_vote_storage.BroadcastVoteTrx", "beem.utils.resolve_authorperm", "stee...
[((1880, 1891), 'time.time', 'time.time', ([], {}), '()\n', (1889, 1891), False, 'import time\n'), ((1901, 1935), 'dataset.connect', 'dataset.connect', (['databaseConnector'], {}), '(databaseConnector)\n', (1916, 1935), False, 'import dataset\n'), ((2081, 2093), 'steemrewarding.post_storage.PostsTrx', 'PostsTrx', (['db'], {}), '(db)\n', (2089, 2093), False, 'from steemrewarding.post_storage import PostsTrx\n'), ((2109, 2121), 'steemrewarding.vote_storage.VotesTrx', 'VotesTrx', (['db'], {}), '(db)\n', (2117, 2121), False, 'from steemrewarding.vote_storage import VotesTrx\n'), ((2141, 2157), 'steemrewarding.vote_rule_storage.VoteRulesTrx', 'VoteRulesTrx', (['db'], {}), '(db)\n', (2153, 2157), False, 'from steemrewarding.vote_rule_storage import VoteRulesTrx\n'), ((2176, 2195), 'steemrewarding.config_storage.ConfigurationDB', 'ConfigurationDB', (['db'], {}), '(db)\n', (2191, 2195), False, 'from steemrewarding.config_storage import ConfigurationDB\n'), ((2218, 2237), 'steemrewarding.pending_vote_storage.PendingVotesTrx', 'PendingVotesTrx', (['db'], {}), '(db)\n', (2233, 2237), False, 'from steemrewarding.pending_vote_storage import PendingVotesTrx\n'), ((2255, 2269), 'steemrewarding.vote_log_storage.VoteLogTrx', 'VoteLogTrx', (['db'], {}), '(db)\n', (2265, 2269), False, 'from steemrewarding.vote_log_storage import VoteLogTrx\n'), ((2293, 2313), 'steemrewarding.failed_vote_log_storage.FailedVoteLogTrx', 'FailedVoteLogTrx', (['db'], {}), '(db)\n', (2309, 2313), False, 'from steemrewarding.failed_vote_log_storage import FailedVoteLogTrx\n'), ((2332, 2346), 'steemrewarding.account_storage.AccountsDB', 'AccountsDB', (['db'], {}), '(db)\n', (2342, 2346), False, 'from steemrewarding.account_storage import AccountsDB\n'), ((2370, 2390), 'steemrewarding.broadcast_vote_storage.BroadcastVoteTrx', 'BroadcastVoteTrx', (['db'], {}), '(db)\n', (2386, 2390), False, 'from steemrewarding.broadcast_vote_storage import BroadcastVoteTrx\n'), ((2494, 2504), 'beem.nodelist.NodeList', 'NodeList', ([], {}), '()\n', (2502, 2504), False, 'from beem.nodelist import NodeList\n'), ((2712, 2809), 'beem.Steem', 'Steem', ([], {'node': 'node_list', 'num_retries': '(5)', 'call_num_retries': '(3)', 'timeout': '(15)', 'nobroadcast': 'nobroadcast'}), '(node=node_list, num_retries=5, call_num_retries=3, timeout=15,\n nobroadcast=nobroadcast)\n', (2717, 2809), False, 'from beem import Steem\n'), ((1400, 1427), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (1414, 1427), False, 'import os\n'), ((3188, 3234), 'beem.utils.resolve_authorperm', 'resolve_authorperm', (["pending_vote['authorperm']"], {}), "(pending_vote['authorperm'])\n", (3206, 3234), False, 'from beem.utils import formatTimeString, resolve_authorperm, construct_authorperm, addTzInfo\n'), ((1566, 1591), 'json.load', 'json.load', (['json_data_file'], {}), '(json_data_file)\n', (1575, 1591), False, 'import json\n'), ((3922, 3972), 'beem.account.Account', 'Account', (["pending_vote['voter']"], {'steem_instance': 'stm'}), "(pending_vote['voter'], steem_instance=stm)\n", (3929, 3972), False, 'from beem.account import Account\n'), ((13040, 13090), 'beem.account.Account', 'Account', (["pending_vote['voter']"], {'steem_instance': 'stm'}), "(pending_vote['voter'], steem_instance=stm)\n", (13047, 13090), False, 'from beem.account import Account\n'), ((24588, 24655), 'steemrewarding.utils.upvote_comment_without_check', 'upvote_comment_without_check', (['c', "pending_vote['voter']", 'vote_weight'], {}), "(c, pending_vote['voter'], vote_weight)\n", (24616, 24655), False, 'from steemrewarding.utils import isfloat, upvote_comment, valid_age, upvote_comment_without_check\n'), ((5061, 5111), 'beem.account.Account', 'Account', (["pending_vote['voter']"], {'steem_instance': 'stm'}), "(pending_vote['voter'], steem_instance=stm)\n", (5068, 5111), False, 'from beem.account import Account\n'), ((19833, 19907), 'beem.comment.Comment', 'Comment', (["pending_vote['authorperm']"], {'use_tags_api': '(True)', 'steem_instance': 'stm'}), "(pending_vote['authorperm'], use_tags_api=True, steem_instance=stm)\n", (19840, 19907), False, 'from beem.comment import Comment\n'), ((26485, 26502), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (26500, 26502), False, 'from datetime import date, datetime, timedelta\n'), ((26964, 26981), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (26979, 26981), False, 'from datetime import date, datetime, timedelta\n'), ((4417, 4434), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4432, 4434), False, 'from datetime import date, datetime, timedelta\n'), ((4624, 4641), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4639, 4641), False, 'from datetime import date, datetime, timedelta\n'), ((6088, 6138), 'beem.account.Account', 'Account', (["pending_vote['voter']"], {'steem_instance': 'stm'}), "(pending_vote['voter'], steem_instance=stm)\n", (6095, 6138), False, 'from beem.account import Account\n'), ((7893, 7910), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7908, 7910), False, 'from datetime import date, datetime, timedelta\n'), ((9105, 9122), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (9120, 9122), False, 'from datetime import date, datetime, timedelta\n'), ((10158, 10175), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (10173, 10175), False, 'from datetime import date, datetime, timedelta\n'), ((11144, 11161), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11159, 11161), False, 'from datetime import date, datetime, timedelta\n'), ((12154, 12171), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (12169, 12171), False, 'from datetime import date, datetime, timedelta\n'), ((14368, 14385), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (14383, 14385), False, 'from datetime import date, datetime, timedelta\n'), ((15266, 15283), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (15281, 15283), False, 'from datetime import date, datetime, timedelta\n'), ((19127, 19144), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (19142, 19144), False, 'from datetime import date, datetime, timedelta\n'), ((19983, 19993), 'beem.nodelist.NodeList', 'NodeList', ([], {}), '()\n', (19991, 19993), False, 'from beem.nodelist import NodeList\n'), ((20178, 20191), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (20188, 20191), False, 'import time\n'), ((20502, 20519), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (20517, 20519), False, 'from datetime import date, datetime, timedelta\n'), ((22679, 22696), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (22694, 22696), False, 'from datetime import date, datetime, timedelta\n'), ((23863, 23880), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (23878, 23880), False, 'from datetime import date, datetime, timedelta\n'), ((27445, 27499), 'datetime.timedelta', 'timedelta', ([], {'seconds': "(pending_vote['vote_delay_min'] / 60)"}), "(seconds=pending_vote['vote_delay_min'] / 60)\n", (27454, 27499), False, 'from datetime import date, datetime, timedelta\n'), ((28135, 28146), 'time.time', 'time.time', ([], {}), '()\n', (28144, 28146), False, 'import time\n'), ((3347, 3364), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3362, 3364), False, 'from datetime import date, datetime, timedelta\n'), ((5474, 5491), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5489, 5491), False, 'from datetime import date, datetime, timedelta\n'), ((5681, 5698), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5696, 5698), False, 'from datetime import date, datetime, timedelta\n'), ((7258, 7275), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7273, 7275), False, 'from datetime import date, datetime, timedelta\n'), ((16660, 16677), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (16675, 16677), False, 'from datetime import date, datetime, timedelta\n'), ((18095, 18112), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (18110, 18112), False, 'from datetime import date, datetime, timedelta\n'), ((21633, 21650), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (21648, 21650), False, 'from datetime import date, datetime, timedelta\n'), ((25715, 25732), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (25730, 25732), False, 'from datetime import date, datetime, timedelta\n'), ((26172, 26217), 'beem.utils.formatTimeString', 'formatTimeString', (["reply_message['expiration']"], {}), "(reply_message['expiration'])\n", (26188, 26217), False, 'from beem.utils import formatTimeString, resolve_authorperm, construct_authorperm, addTzInfo\n'), ((6506, 6523), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6521, 6523), False, 'from datetime import date, datetime, timedelta\n'), ((6713, 6730), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6728, 6730), False, 'from datetime import date, datetime, timedelta\n'), ((13497, 13514), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (13512, 13514), False, 'from datetime import date, datetime, timedelta\n'), ((14073, 14090), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (14088, 14090), False, 'from datetime import date, datetime, timedelta\n'), ((5843, 5860), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5858, 5860), False, 'from datetime import date, datetime, timedelta\n'), ((6001, 6018), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6016, 6018), False, 'from datetime import date, datetime, timedelta\n')]
from typing import Iterable, Any import unittest from zensols.persist import ReadOnlyStash, PreemptiveStash class RangeStash(ReadOnlyStash): def __init__(self, n: int, end: int = None): super().__init__() self.n = n self.end = end self.keyed = False self.loaded = False def load(self, name: str) -> Any: self.loaded = True if self.exists(name): return name def keys(self) -> Iterable[str]: self.keyed = True if self.end is not None: return range(self.n, self.end) else: return range(self.n) def exists(self, name: str) -> bool: n = int(name) if self.end is None: if (n >= self.n): return False elif (n < self.n) or (n >= self.end): return False return True class TestPreemptiveStash(unittest.TestCase): def setUp(self): self.rs = RangeStash(3) self.pe = PreemptiveStash(self.rs) def test_data_first(self): self.assertFalse(self.rs.keyed) self.assertFalse(self.rs.loaded) self.assertEqual(((0, 0), (1, 1), (2, 2)), tuple(self.pe)) self.assertTrue(self.pe.has_data) self.assertTrue(self.rs.keyed) self.assertTrue(self.rs.loaded) def test_has_data_first(self): self.assertFalse(self.rs.keyed) self.assertFalse(self.rs.loaded) self.assertTrue(self.pe.has_data) self.assertTrue(self.rs.keyed) self.assertFalse(self.rs.loaded)
[ "zensols.persist.PreemptiveStash" ]
[((985, 1009), 'zensols.persist.PreemptiveStash', 'PreemptiveStash', (['self.rs'], {}), '(self.rs)\n', (1000, 1009), False, 'from zensols.persist import ReadOnlyStash, PreemptiveStash\n')]
from sqlite3 import connect ##<NAME> def show_menu(): print("\n------") print ("MENU:") print ("_____") print ("1. Add a student") print ("2. Find a student") print ("3. Add a course") print ("4. Find a course") print ("5. Enroll a student") print ("6. Find Course(s) of a Student") print ("7. Find Student(s) of a Course") print ("8. Quit\n") def add_student(): conn = connect('training.db') curs = conn.cursor() x = input("\nEnter student id...") y = input("Enter student name...") curs.execute("insert into student (studentid,studentname) values(\"" + x + "\", \""+y+"\");") conn.commit() print ("\nStudent " + x + "/" + y + " added successfully!") conn.close() def find_student(): conn = connect('training.db') curs = conn.cursor() x = input("\nEnter student name...") curs.execute("select * from student where studentname like \"%" + x + "%\";") conn.commit() print("\nHere is the list...") for (name) in curs.fetchall(): print(name) conn.close() def add_course(): conn = connect('training.db') curs = conn.cursor() x = input("\nEnter course id...") y = input("Enter course name...") curs.execute("insert into course (courseid,coursename) values(\"" + x + "\", \""+y+"\");") conn.commit() print ("\nCourse " + x + "/" + y + " added successfully!") conn.close() def find_course(): conn = connect('training.db') curs = conn.cursor() x = input("\nEnter course name...") curs.execute("select * from course where coursename like \"%" + x + "%\";") conn.commit() print("\nHere is the list...") for (name) in curs.fetchall(): print(name) conn.close() def enroll_student(): conn = connect('training.db') curs = conn.cursor() x = input("\nEnter student id...") y = input("\nEnter course id...") curs.execute("insert into enrollment(studentid, courseid) values(\""+x+"\",\""+y+"\");") conn.commit() print("\nEnrollment of student " + x +"in course" + y + "is successful") conn.close() def find_a_course_for_student(): conn = connect('training.db') curs = conn.cursor() x = input("\nEnter student id...") curs.execute("select student.studentname, course.coursename from student, course, enrollment where student.studentid = enrollment.studentid and course.courseid = enrollment.courseid and enrollment.studentid =\""+x+"\";") conn.commit() print("\nHere is the list...") for (name) in curs.fetchall(): print(name) conn.close() def find_student_for_course(): conn = connect('training.db') curs = conn.cursor() x=input("\nEnter course id...") curs.execute("select student.studentname, course.coursename from student, course, enrollment where student.studentid = enrollment.studentid and course.courseid = enrollment.courseid and enrollment.courseid = \""+x+"\";") conn.commit() print("\nHere is the list...") for (name) in curs.fetchall(): print(name) conn.close() print("\nWelcome To Training Application!!!") indicator = True while indicator ==True: show_menu() option = input("Select an option....") if option =="1": try: add_student() except: print("Error: function is unsuccessful") elif option == "2": try: find_student() except: print("Error: function is unsuccessful") elif option == "3": try: add_course() except: print("Error: function is unsuccessful") elif option == "4": try: find_course() except: print("Error: function is unsuccessful") elif option == "5": try: enroll_student() except: print("Error: function is unsuccessful") elif option == "6": try: find_a_course_for_student() except: print("Error: function is unsuccessful") elif option == "7": try: find_student_for_course() except: print("Error: function is unsuccessful") elif option == "8": indicator = False ##raise SystemExit (alternative to indicator = False)
[ "sqlite3.connect" ]
[((418, 440), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (425, 440), False, 'from sqlite3 import connect\n'), ((772, 794), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (779, 794), False, 'from sqlite3 import connect\n'), ((1097, 1119), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (1104, 1119), False, 'from sqlite3 import connect\n'), ((1444, 1466), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (1451, 1466), False, 'from sqlite3 import connect\n'), ((1770, 1792), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (1777, 1792), False, 'from sqlite3 import connect\n'), ((2146, 2168), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (2153, 2168), False, 'from sqlite3 import connect\n'), ((2626, 2648), 'sqlite3.connect', 'connect', (['"""training.db"""'], {}), "('training.db')\n", (2633, 2648), False, 'from sqlite3 import connect\n')]
import numpy as np from scipy.ndimage import convolve, maximum_filter def gauss2d(sigma, fsize): """ Create a 2D Gaussian filter Args: sigma: width of the Gaussian filter fsize: (w, h) dimensions of the filter Returns: *normalized* Gaussian filter as (h, w) np.array """ m, n = fsize x = np.arange(-m / 2 + 0.5, m / 2) y = np.arange(-n / 2 + 0.5, n / 2) xx, yy = np.meshgrid(x, y, sparse=True) g = np.exp(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2)) return g / np.sum(g) def derivative_filters(): """ Create derivative filters for x and y direction Returns: fx: derivative filter in x direction fy: derivative filter in y direction """ fx = np.array([[0.5, 0, -0.5]]) fy = fx.transpose() return fx, fy def compute_hessian(img, gauss, fx, fy): """ Compute elements of the Hessian matrix Args: img: gauss: Gaussian filter fx: derivative filter in x direction fy: derivative filter in y direction Returns: I_xx: (h, w) np.array of 2nd derivatives in x direction I_yy: (h, w) np.array of 2nd derivatives in y direction I_xy: (h, w) np.array of 2nd derivatives in x-y direction """ # # You code here # # set mode mode = "mirror" # smooth image img = convolve(img, gauss, mode = mode, cval=0) # first derivatives I_x = convolve(img, fx, mode = mode, cval=0) I_y = convolve(img, fy, mode = mode, cval=0) # second derivatives I_xx = convolve(I_x, fx, mode = mode, cval=0) I_xy = convolve(I_x, fy, mode = mode, cval=0) I_yy = convolve(I_y, fy, mode = mode, cval=0) return I_xx,I_yy,I_xy def compute_criterion(I_xx, I_yy, I_xy, sigma): """ Compute criterion function Args: I_xx: (h, w) np.array of 2nd derivatives in x direction I_yy: (h, w) np.array of 2nd derivatives in y direction I_xy: (h, w) np.array of 2nd derivatives in x-y direction sigma: scaling factor Returns: criterion: (h, w) np.array of scaled determinant of Hessian matrix """ # # You code here # det = I_xx * I_yy - I_xy ** 2 return sigma ** 4 * det def nonmaxsuppression(criterion, threshold): """ Apply non-maximum suppression to criterion values and return Hessian interest points Args: criterion: (h, w) np.array of criterion function values threshold: criterion threshold Returns: rows: (n,) np.array with y-positions of interest points cols: (n,) np.array with x-positions of interest points """ # # You code here # criterion_max = maximum_filter(criterion, (5,5), mode= "mirror") criterion_thresh = np.logical_and(criterion_max > threshold, criterion >= criterion_max) mask = np.zeros_like(criterion_thresh) mask[5:-5, 5:-5] = criterion_thresh[5:-5, 5:-5] rows, cols = np.nonzero(mask) return rows, cols
[ "numpy.logical_and", "scipy.ndimage.convolve", "numpy.exp", "numpy.array", "numpy.sum", "numpy.nonzero", "scipy.ndimage.maximum_filter", "numpy.meshgrid", "numpy.zeros_like", "numpy.arange" ]
[((339, 369), 'numpy.arange', 'np.arange', (['(-m / 2 + 0.5)', '(m / 2)'], {}), '(-m / 2 + 0.5, m / 2)\n', (348, 369), True, 'import numpy as np\n'), ((378, 408), 'numpy.arange', 'np.arange', (['(-n / 2 + 0.5)', '(n / 2)'], {}), '(-n / 2 + 0.5, n / 2)\n', (387, 408), True, 'import numpy as np\n'), ((422, 452), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(True)'}), '(x, y, sparse=True)\n', (433, 452), True, 'import numpy as np\n'), ((461, 508), 'numpy.exp', 'np.exp', (['(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2))'], {}), '(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2))\n', (467, 508), True, 'import numpy as np\n'), ((739, 765), 'numpy.array', 'np.array', (['[[0.5, 0, -0.5]]'], {}), '([[0.5, 0, -0.5]])\n', (747, 765), True, 'import numpy as np\n'), ((1358, 1397), 'scipy.ndimage.convolve', 'convolve', (['img', 'gauss'], {'mode': 'mode', 'cval': '(0)'}), '(img, gauss, mode=mode, cval=0)\n', (1366, 1397), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1435, 1471), 'scipy.ndimage.convolve', 'convolve', (['img', 'fx'], {'mode': 'mode', 'cval': '(0)'}), '(img, fx, mode=mode, cval=0)\n', (1443, 1471), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1484, 1520), 'scipy.ndimage.convolve', 'convolve', (['img', 'fy'], {'mode': 'mode', 'cval': '(0)'}), '(img, fy, mode=mode, cval=0)\n', (1492, 1520), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1560, 1596), 'scipy.ndimage.convolve', 'convolve', (['I_x', 'fx'], {'mode': 'mode', 'cval': '(0)'}), '(I_x, fx, mode=mode, cval=0)\n', (1568, 1596), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1610, 1646), 'scipy.ndimage.convolve', 'convolve', (['I_x', 'fy'], {'mode': 'mode', 'cval': '(0)'}), '(I_x, fy, mode=mode, cval=0)\n', (1618, 1646), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1660, 1696), 'scipy.ndimage.convolve', 'convolve', (['I_y', 'fy'], {'mode': 'mode', 'cval': '(0)'}), '(I_y, fy, mode=mode, cval=0)\n', (1668, 1696), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((2727, 2775), 'scipy.ndimage.maximum_filter', 'maximum_filter', (['criterion', '(5, 5)'], {'mode': '"""mirror"""'}), "(criterion, (5, 5), mode='mirror')\n", (2741, 2775), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((2799, 2868), 'numpy.logical_and', 'np.logical_and', (['(criterion_max > threshold)', '(criterion >= criterion_max)'], {}), '(criterion_max > threshold, criterion >= criterion_max)\n', (2813, 2868), True, 'import numpy as np\n'), ((2881, 2912), 'numpy.zeros_like', 'np.zeros_like', (['criterion_thresh'], {}), '(criterion_thresh)\n', (2894, 2912), True, 'import numpy as np\n'), ((2982, 2998), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (2992, 2998), True, 'import numpy as np\n'), ((524, 533), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (530, 533), True, 'import numpy as np\n')]
from builtins import range from builtins import object import numpy as np from comp411.layers import * from comp411.layer_utils import * class ThreeLayerNet(object): """ A three-layer fully-connected neural network with Leaky ReLU nonlinearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of tuple of (H1, H2) yielding the dimension for the first and second hidden layer respectively, and perform classification over C classes. The architecture should be affine - leakyrelu - affine - leakyrelu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays. """ def __init__(self, input_dim=3*32*32, hidden_dim=(64, 32), num_classes=10, weight_scale=1e-3, reg=0.0, alpha=1e-3): """ Initialize a new network. Inputs: - input_dim: An integer giving the size of the input - hidden_dim: A tuple giving the size of the first and second hidden layer respectively - num_classes: An integer giving the number of classes to classify - weight_scale: Scalar giving the standard deviation for random initialization of the weights. - reg: Scalar giving L2 regularization strength. - alpha: negative slope of Leaky ReLU layers """ self.params = {} self.reg = reg self.alpha = alpha ############################################################################ # TODO: Initialize the weights and biases of the three-layer net. Weights # # should be initialized from a Gaussian centered at 0.0 with # # standard deviation equal to weight_scale, and biases should be # # initialized to zero. All weights and biases should be stored in the # # dictionary self.params, with first layer weights # # and biases using the keys 'W1' and 'b1', second layer # # weights and biases using the keys 'W2' and 'b2', # # and third layer weights and biases using the keys 'W3' and 'b3. # # # ############################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** self.params['W1'] = weight_scale * np.random.randn(input_dim,hidden_dim[0]) self.params['W2'] = weight_scale * np.random.randn(hidden_dim[0],hidden_dim[1]) self.params['W3'] = weight_scale * np.random.randn(hidden_dim[1],num_classes) self.params['b1'] = np.zeros(hidden_dim[0]) self.params['b2'] = np.zeros(hidden_dim[1]) self.params['b3'] = np.zeros(num_classes) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################ # END OF YOUR CODE # ############################################################################ def loss(self, X, y=None): """ Compute loss and gradient for a minibatch of data. Inputs: - X: Array of input data of shape (N, d_1, ..., d_k) - y: Array of labels, of shape (N,). y[i] gives the label for X[i]. Returns: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters. """ scores = None ############################################################################ # TODO: Implement the forward pass for the three-layer net, computing the # # class scores for X and storing them in the scores variable. # ############################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** W1 = self.params['W1'] W2 = self.params['W2'] W3 = self.params['W3'] b1 = self.params['b1'] b2 = self.params['b2'] b3 = self.params['b3'] X2 , lrelu_cache1 = affine_lrelu_forward(X,W1,b1,{"alpha": self.alpha}) X3 , lrelu_cache2 = affine_lrelu_forward(X2,W2,b2,{"alpha": self.alpha}) scores, affine_cache = affine_forward(X3,W3,b3) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################ # END OF YOUR CODE # ############################################################################ # If y is None then we are in test mode so just return scores if y is None: return scores loss, grads = 0, {} ############################################################################ # TODO: Implement the backward pass for the three-layer net. Store the loss# # in the loss variable and gradients in the grads dictionary. Compute data # # loss using softmax, and make sure that grads[k] holds the gradients for # # self.params[k]. Don't forget to add L2 regularization! # # # # NOTE: To ensure that your implementation matches ours and you pass the # # automated tests, make sure that your L2 regularization includes a factor # # of 0.5 to simplify the expression for the gradient. # ############################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** loss, softmax_grad = softmax_loss(scores, y) loss += 0.5 * self.reg * ( np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) ) dx3, dw3, db3 = affine_backward(softmax_grad, affine_cache) dx2, dw2, db2 = affine_lrelu_backward(dx3, lrelu_cache2) dx1, dw1, db1 = affine_lrelu_backward(dx2, lrelu_cache1) grads['W3'] = dw3 + self.reg * W3 grads['b3'] = db3 grads['W2'] = dw2 + self.reg * W2 grads['b2'] = db2 grads['W1'] = dw1 + self.reg * W1 grads['b1'] = db1 # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################ # END OF YOUR CODE # ############################################################################ return loss, grads class FullyConnectedNet(object): """ A fully-connected neural network with an arbitrary number of hidden layers, LeakyReLU nonlinearities, and a softmax loss function. This will also implement dropout optionally. For a network with L layers, the architecture will be {affine - leakyrelu - [dropout]} x (L - 1) - affine - softmax where dropout is optional, and the {...} block is repeated L - 1 times. Similar to the ThreeLayerNet above, learnable parameters are stored in the self.params dictionary and will be learned using the Solver class. """ def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10, dropout=1, reg=0.0, alpha=1e-2, weight_scale=1e-2, dtype=np.float32, seed=None): """ Initialize a new FullyConnectedNet. Inputs: - hidden_dims: A list of integers giving the size of each hidden layer. - input_dim: An integer giving the size of the input. - num_classes: An integer giving the number of classes to classify. - dropout: Scalar between 0 and 1 giving dropout strength. If dropout=1 then the network should not use dropout at all. - reg: Scalar giving L2 regularization strength. - alpha: negative slope of Leaky ReLU layers - weight_scale: Scalar giving the standard deviation for random initialization of the weights. - dtype: A numpy datatype object; all computations will be performed using this datatype. float32 is faster but less accurate, so you should use float64 for numeric gradient checking. - seed: If not None, then pass this random seed to the dropout layers. This will make the dropout layers deterministic so we can gradient check the model. """ self.use_dropout = dropout != 1 self.reg = reg self.alpha = alpha self.num_layers = 1 + len(hidden_dims) self.dtype = dtype self.params = {} ############################################################################ # TODO: Initialize the parameters of the network, storing all values in # # the self.params dictionary. Store weights and biases for the first layer # # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be # # initialized from a normal distribution centered at 0 with standard # # deviation equal to weight_scale. Biases should be initialized to zero. # # # ############################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dims = np.hstack((input_dim, hidden_dims, num_classes)) for i in range(self.num_layers): self.params['W%d' % (i + 1)] = weight_scale * np.random.randn(dims[i], dims[i+1]) self.params['b%d' % (i + 1)] = np.zeros(dims[i+1]) # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################ # END OF YOUR CODE # ############################################################################ # When using dropout we need to pass a dropout_param dictionary to each # dropout layer so that the layer knows the dropout probability and the mode # (train / test). You can pass the same dropout_param to each dropout layer. self.dropout_param = {} if self.use_dropout: self.dropout_param = {'mode': 'train', 'p': dropout} if seed is not None: self.dropout_param['seed'] = seed # Cast all parameters to the correct datatype for k, v in self.params.items(): self.params[k] = v.astype(dtype) def loss(self, X, y=None): """ Compute loss and gradient for the fully-connected net. Input / output: Same as ThreeLayerNet above. """ X = X.astype(self.dtype) mode = 'test' if y is None else 'train' # Set train/test mode for dropout param since it # behaves differently during training and testing. if self.use_dropout: self.dropout_param['mode'] = mode scores = None ############################################################################ # TODO: Implement the forward pass for the fully-connected net, computing # # the class scores for X and storing them in the scores variable. # # # # When using dropout, you'll need to pass self.dropout_param to each # # dropout forward pass. # # # ############################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** hidden_num = self.num_layers - 1 scores = X cache_history = [] L2reg = 0 for i in range(hidden_num): scores, cache = affine_lrelu_forward(scores, self.params['W%d' % (i + 1)], self.params['b%d' % (i + 1)],{"alpha": self.alpha}) cache_history.append(cache) if self.use_dropout: scores, cache = dropout_forward(scores, self.dropout_param) cache_history.append(cache) L2reg += np.sum(self.params['W%d' % (i + 1)] ** 2) i += 1 scores, cache = affine_forward(scores, self.params['W%d' % (i + 1)], self.params['b%d' % (i + 1)]) cache_history.append(cache) L2reg += np.sum(self.params['W%d' % (i + 1)] ** 2) L2reg *= 0.5 * self.reg # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################ # END OF YOUR CODE # ############################################################################ # If test mode return early if mode == 'test': return scores loss, grads = 0.0, {} ############################################################################ # TODO: Implement the backward pass for the fully-connected net. Store the # # loss in the loss variable and gradients in the grads dictionary. Compute # # data loss using softmax, and make sure that grads[k] holds the gradients # # for self.params[k]. Don't forget to add L2 regularization! # # # # # # NOTE: To ensure that your implementation matches ours and you pass the # # automated tests, make sure that your L2 regularization includes a factor # # of 0.5 to simplify the expression for the gradient. # ############################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** loss, dout = softmax_loss(scores, y) loss += L2reg dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = affine_backward(dout, cache_history.pop()) grads['W%d' % (i + 1)] += self.reg * self.params['W%d' % (i + 1)] i -= 1 while i >= 0: if self.use_dropout: dout = dropout_backward(dout, cache_history.pop()) #else: dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = affine_lrelu_backward(dout, cache_history.pop()) grads['W%d' % (i + 1)] += self.reg * self.params['W%d' % (i + 1)] i -= 1 # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################ # END OF YOUR CODE # ############################################################################ return loss, grads
[ "numpy.hstack", "numpy.sum", "builtins.range", "numpy.zeros", "numpy.random.randn" ]
[((2953, 2976), 'numpy.zeros', 'np.zeros', (['hidden_dim[0]'], {}), '(hidden_dim[0])\n', (2961, 2976), True, 'import numpy as np\n'), ((3005, 3028), 'numpy.zeros', 'np.zeros', (['hidden_dim[1]'], {}), '(hidden_dim[1])\n', (3013, 3028), True, 'import numpy as np\n'), ((3057, 3078), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (3065, 3078), True, 'import numpy as np\n'), ((10193, 10241), 'numpy.hstack', 'np.hstack', (['(input_dim, hidden_dims, num_classes)'], {}), '((input_dim, hidden_dims, num_classes))\n', (10202, 10241), True, 'import numpy as np\n'), ((10260, 10282), 'builtins.range', 'range', (['self.num_layers'], {}), '(self.num_layers)\n', (10265, 10282), False, 'from builtins import range\n'), ((12712, 12729), 'builtins.range', 'range', (['hidden_num'], {}), '(hidden_num)\n', (12717, 12729), False, 'from builtins import range\n'), ((13348, 13389), 'numpy.sum', 'np.sum', (["(self.params['W%d' % (i + 1)] ** 2)"], {}), "(self.params['W%d' % (i + 1)] ** 2)\n", (13354, 13389), True, 'import numpy as np\n'), ((2710, 2751), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'hidden_dim[0]'], {}), '(input_dim, hidden_dim[0])\n', (2725, 2751), True, 'import numpy as np\n'), ((2794, 2839), 'numpy.random.randn', 'np.random.randn', (['hidden_dim[0]', 'hidden_dim[1]'], {}), '(hidden_dim[0], hidden_dim[1])\n', (2809, 2839), True, 'import numpy as np\n'), ((2882, 2925), 'numpy.random.randn', 'np.random.randn', (['hidden_dim[1]', 'num_classes'], {}), '(hidden_dim[1], num_classes)\n', (2897, 2925), True, 'import numpy as np\n'), ((10421, 10442), 'numpy.zeros', 'np.zeros', (['dims[i + 1]'], {}), '(dims[i + 1])\n', (10429, 10442), True, 'import numpy as np\n'), ((13084, 13125), 'numpy.sum', 'np.sum', (["(self.params['W%d' % (i + 1)] ** 2)"], {}), "(self.params['W%d' % (i + 1)] ** 2)\n", (13090, 13125), True, 'import numpy as np\n'), ((6601, 6616), 'numpy.sum', 'np.sum', (['(W3 * W3)'], {}), '(W3 * W3)\n', (6607, 6616), True, 'import numpy as np\n'), ((10342, 10379), 'numpy.random.randn', 'np.random.randn', (['dims[i]', 'dims[i + 1]'], {}), '(dims[i], dims[i + 1])\n', (10357, 10379), True, 'import numpy as np\n'), ((6565, 6580), 'numpy.sum', 'np.sum', (['(W1 * W1)'], {}), '(W1 * W1)\n', (6571, 6580), True, 'import numpy as np\n'), ((6583, 6598), 'numpy.sum', 'np.sum', (['(W2 * W2)'], {}), '(W2 * W2)\n', (6589, 6598), True, 'import numpy as np\n')]
import os from socket import * from time import ctime HOST = '' PORT = 9733 BUFSIZ = 1024 ADDR = (HOST, PORT) tcpSerSock = socket(AF_INET, SOCK_STREAM) tcpSerSock.bind(ADDR) tcpSerSock.listen(32) while True: print('waiting for connection...') tcpCliSock, addr = tcpSerSock.accept() print ('...connected from:', addr) data = tcpCliSock.recv(BUFSIZ) filename = data.decode("utf-8") if(os.path.exists(filename + ".csv")): os.remove(filename + ".csv") else: tcpCliSock.send("0".encode()) continue if(os.path.exists(filename + ".pdf")): os.remove(filename + ".pdf") else: tcpCliSock.send("0".encode()) continue if(os.path.exists(filename + "_"+"Y-axis.png")): os.remove(filename + "_"+"Y-axis.png") else: tcpCliSock.send("0".encode()) continue if(os.path.exists(filename + "_"+"Z-axis.png")): os.remove(filename + "_"+"Z-axis.png") else: tcpCliSock.send("0".encode()) continue if(os.path.exists(filename + "_"+"X-axis.png")): os.remove(filename + "_"+"X-axis.png") else: tcpCliSock.send("0".encode()) continue if(os.path.exists(filename + "_"+"combined.png")): os.remove(filename + "_"+"combined.png") else: tcpCliSock.send("0".encode()) continue tcpCliSock.send("file finish".encode()) tcpCliSock.close() tcpSerSock.close()
[ "os.path.exists", "os.remove" ]
[((426, 459), 'os.path.exists', 'os.path.exists', (["(filename + '.csv')"], {}), "(filename + '.csv')\n", (440, 459), False, 'import os\n'), ((576, 609), 'os.path.exists', 'os.path.exists', (["(filename + '.pdf')"], {}), "(filename + '.pdf')\n", (590, 609), False, 'import os\n'), ((722, 767), 'os.path.exists', 'os.path.exists', (["(filename + '_' + 'Y-axis.png')"], {}), "(filename + '_' + 'Y-axis.png')\n", (736, 767), False, 'import os\n'), ((888, 933), 'os.path.exists', 'os.path.exists', (["(filename + '_' + 'Z-axis.png')"], {}), "(filename + '_' + 'Z-axis.png')\n", (902, 933), False, 'import os\n'), ((1054, 1099), 'os.path.exists', 'os.path.exists', (["(filename + '_' + 'X-axis.png')"], {}), "(filename + '_' + 'X-axis.png')\n", (1068, 1099), False, 'import os\n'), ((1224, 1271), 'os.path.exists', 'os.path.exists', (["(filename + '_' + 'combined.png')"], {}), "(filename + '_' + 'combined.png')\n", (1238, 1271), False, 'import os\n'), ((470, 498), 'os.remove', 'os.remove', (["(filename + '.csv')"], {}), "(filename + '.csv')\n", (479, 498), False, 'import os\n'), ((620, 648), 'os.remove', 'os.remove', (["(filename + '.pdf')"], {}), "(filename + '.pdf')\n", (629, 648), False, 'import os\n'), ((776, 816), 'os.remove', 'os.remove', (["(filename + '_' + 'Y-axis.png')"], {}), "(filename + '_' + 'Y-axis.png')\n", (785, 816), False, 'import os\n'), ((942, 982), 'os.remove', 'os.remove', (["(filename + '_' + 'Z-axis.png')"], {}), "(filename + '_' + 'Z-axis.png')\n", (951, 982), False, 'import os\n'), ((1108, 1148), 'os.remove', 'os.remove', (["(filename + '_' + 'X-axis.png')"], {}), "(filename + '_' + 'X-axis.png')\n", (1117, 1148), False, 'import os\n'), ((1280, 1322), 'os.remove', 'os.remove', (["(filename + '_' + 'combined.png')"], {}), "(filename + '_' + 'combined.png')\n", (1289, 1322), False, 'import os\n')]
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>> # # SPDX-License-Identifier: MIT import setuptools setuptools.setup( name="pico-wizard", version="0.1.0", author="<NAME>", author_email="<EMAIL>", description="A Post Installation COnfiguration tool", long_description="A Post Installation COnfiguration tool for Linux OSes", long_description_content_type="text/plain", scripts=["files/pico-wizard-script-runner"], entry_points={ "console_scripts": [ "pico-wizard = PicoWizard.__main__:__main__", ] }, url="https://github.com/pico-wizard/pico-wizard", project_urls={ "Bug Tracker": "https://github.com/pico-wizard/pico-wizard/issues", "Documentation": "https://github.com/pico-wizard/pico-wizard", "Source Code": "https://github.com/pico-wizard/pico-wizard", }, packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License" ], license="MIT", install_requires=[ ### Pyside2 needs to be installed from manjaro repository ### pip doesnt provide prebuilt arm64 # "pyside2" ], python_requires=">=3.6", package_data = { "": [ "*.qml", "**/*.qml", "**/*.svg", "**/*.svg.license", "**/*.sh", "**/qmldir", "PicoWizard/**/*.svg" ] }, include_package_data=True, )
[ "setuptools.find_packages" ]
[((878, 904), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (902, 904), False, 'import setuptools\n')]
import torch import torch.nn as nn import torch.nn.functional as F from ..decoder import ConvDecoder from ..encoder import build_encoder from ..modules import conv, deconv from ..similarity import CorrelationLayer from ..utils import warp from .build import MODEL_REGISTRY @MODEL_REGISTRY.register() class PWCNet(nn.Module): """ Implementation of the paper `PWC-Net: CNNs for Optical Flow Using Pyramid, Warping, and Cost Volume <https://arxiv.org/abs/1709.02371>`_ Parameters ---------- cfg : :class:`CfgNode` Configuration for the model """ def __init__(self, cfg): super(PWCNet, self).__init__() self.cfg = cfg self.encoder = build_encoder(cfg.ENCODER) self.correlation_layer = CorrelationLayer( pad_size=cfg.SIMILARITY.PAD_SIZE, max_displacement=cfg.SIMILARITY.MAX_DISPLACEMENT, ) search_range = (2 * cfg.SIMILARITY.MAX_DISPLACEMENT + 1) ** 2 self.decoder_layers = nn.ModuleList() decoder_cfg = cfg.DECODER.CONFIG self.up_feature_layers = nn.ModuleList() for i in range(len(decoder_cfg)): if i == 0: concat_channels = search_range else: concat_channels = ( search_range + decoder_cfg[i] + cfg.SIMILARITY.MAX_DISPLACEMENT ) self.decoder_layers.append( ConvDecoder( config=decoder_cfg, to_flow=True, concat_channels=concat_channels, ) ) self.up_feature_layers.append( deconv( concat_channels + sum(decoder_cfg), 2, kernel_size=4, stride=2, padding=1, ) ) self.deconv_layers = nn.ModuleList() for i in range(len(decoder_cfg)): self.deconv_layers.append(deconv(2, 2, kernel_size=4, stride=2, padding=1)) self.dc_conv = nn.ModuleList( [ conv( search_range + cfg.SIMILARITY.MAX_DISPLACEMENT + decoder_cfg[-1] + sum(decoder_cfg), 128, kernel_size=3, stride=1, padding=1, dilation=1, ), ] ) self.dc_conv.append( conv( decoder_cfg[0], decoder_cfg[0], kernel_size=3, stride=1, padding=2, dilation=2, ) ) padding = 4 dilation = 4 for i in range(len(decoder_cfg) - 2): self.dc_conv.append( conv( decoder_cfg[i], decoder_cfg[i + 1], kernel_size=3, stride=1, padding=padding, dilation=dilation, ) ) padding *= 2 dilation *= 2 self.dc_conv.append( conv( decoder_cfg[3], decoder_cfg[4], kernel_size=3, stride=1, padding=1, dilation=1, ) ) self.dc_conv.append( nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1, bias=True) ) self.dc_conv = nn.Sequential(*self.dc_conv) self._init_weights() def _init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): nn.init.kaiming_normal_(m.weight.data, mode="fan_in") if m.bias is not None: m.bias.data.zero_() def _corr_relu(self, features1, features2): corr = self.correlation_layer(features1, features2) return F.leaky_relu(corr, negative_slope=0.1) def forward(self, img1, img2): """ Performs forward pass of the network Parameters ---------- img1 : torch.Tensor Image to predict flow from img2 : torch.Tensor Image to predict flow to Returns ------- torch.Tensor Flow from img1 to img2 """ H, W = img1.shape[-2:] feature_pyramid1 = self.encoder(img1) feature_pyramid2 = self.encoder(img2) up_flow, up_features = None, None up_flow_scale = 0.625 flow_preds = [] for i in range(len(self.decoder_layers)): if i == 0: corr = self._corr_relu(feature_pyramid1[i], feature_pyramid2[i]) concatenated_features = corr else: warped_features = warp(feature_pyramid2[i], up_flow * up_flow_scale) up_flow_scale *= 2 corr = self._corr_relu(feature_pyramid1[i], warped_features) concatenated_features = torch.cat( [corr, feature_pyramid1[i], up_flow, up_features], dim=1 ) flow, features = self.decoder_layers[i](concatenated_features) flow_preds.append(flow) up_flow = self.deconv_layers[i](flow) up_features = self.up_feature_layers[i](features) flow_preds.reverse() flow_preds[0] += self.dc_conv(features) if self.training: return flow_preds else: flow = flow_preds[0] if self.cfg.INTERPOLATE_FLOW: H_, W_ = flow.shape[-2:] flow = F.interpolate( flow, img1.shape[-2:], mode="bilinear", align_corners=True ) flow_u = flow[:, 0, :, :] * (W / W_) flow_v = flow[:, 1, :, :] * (H / H_) flow = torch.stack([flow_u, flow_v], dim=1) if self.cfg.FLOW_SCALE_FACTOR is not None: flow *= self.cfg.FLOW_SCALE_FACTOR return flow
[ "torch.nn.functional.leaky_relu", "torch.nn.ModuleList", "torch.nn.Sequential", "torch.stack", "torch.nn.init.kaiming_normal_", "torch.nn.Conv2d", "torch.nn.functional.interpolate", "torch.cat" ]
[((997, 1012), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1010, 1012), True, 'import torch.nn as nn\n'), ((1088, 1103), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1101, 1103), True, 'import torch.nn as nn\n'), ((1908, 1923), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1921, 1923), True, 'import torch.nn as nn\n'), ((3566, 3594), 'torch.nn.Sequential', 'nn.Sequential', (['*self.dc_conv'], {}), '(*self.dc_conv)\n', (3579, 3594), True, 'import torch.nn as nn\n'), ((4042, 4080), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['corr'], {'negative_slope': '(0.1)'}), '(corr, negative_slope=0.1)\n', (4054, 4080), True, 'import torch.nn.functional as F\n'), ((3469, 3532), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(32, 2, kernel_size=3, stride=1, padding=1, bias=True)\n', (3478, 3532), True, 'import torch.nn as nn\n'), ((3783, 3836), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight.data'], {'mode': '"""fan_in"""'}), "(m.weight.data, mode='fan_in')\n", (3806, 3836), True, 'import torch.nn as nn\n'), ((5129, 5196), 'torch.cat', 'torch.cat', (['[corr, feature_pyramid1[i], up_flow, up_features]'], {'dim': '(1)'}), '([corr, feature_pyramid1[i], up_flow, up_features], dim=1)\n', (5138, 5196), False, 'import torch\n'), ((5752, 5825), 'torch.nn.functional.interpolate', 'F.interpolate', (['flow', 'img1.shape[-2:]'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, img1.shape[-2:], mode='bilinear', align_corners=True)\n", (5765, 5825), True, 'import torch.nn.functional as F\n'), ((5993, 6029), 'torch.stack', 'torch.stack', (['[flow_u, flow_v]'], {'dim': '(1)'}), '([flow_u, flow_v], dim=1)\n', (6004, 6029), False, 'import torch\n')]
# -*- coding: utf-8 -*- import pathlib from setuptools import setup def read(file_name): file_path = pathlib.Path(__file__).parent / file_name return file_path.read_text('utf-8') setup( name='cibopath', version='0.1.0', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', license='BSD', url='https://github.com/hackebrot/cibopath', description='Search Cookiecutters on GitHub.', long_description=read('README.rst'), packages=[ 'cibopath', ], package_dir={'cibopath': 'cibopath'}, include_package_data=True, zip_safe=False, install_requires=[ 'click', 'aiohttp', ], entry_points={ 'console_scripts': [ 'cibopath = cibopath.cli:main', ] }, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', ], keywords=['cookiecutter', 'web scraping', 'asyncio', 'command-line'], )
[ "pathlib.Path" ]
[((109, 131), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'import pathlib\n')]
from django.test import TestCase from django.contrib.auth import get_user_model class ModelTestCase(TestCase): def test_create_user_with_email_successful(self): """Test creating a new user with email is successful""" email = "<EMAIL>" password = "<PASSWORD>" user = get_user_model().objects.create_user(email=email, password=password) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password)) def test_new_user_email_normalized(self): """Test creating a new user with email is normalized""" email = "<EMAIL>" user = get_user_model().objects.create_user(email=email, password='<PASSWORD>') self.assertEqual(user.email, email.lower()) def test_new_user_invalid_email(self): """Test creating a new user with invalid email raise exception""" with self.assertRaises(ValueError): get_user_model().objects.create_user(email=None, password='<PASSWORD>') def test_new_user_is_superuser(self): """Test creating a new super user""" user = get_user_model().objects.create_superuser( email="<EMAIL>", password="<PASSWORD>") self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
[ "django.contrib.auth.get_user_model" ]
[((306, 322), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (320, 322), False, 'from django.contrib.auth import get_user_model\n'), ((679, 695), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (693, 695), False, 'from django.contrib.auth import get_user_model\n'), ((1255, 1271), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1269, 1271), False, 'from django.contrib.auth import get_user_model\n'), ((1031, 1047), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1045, 1047), False, 'from django.contrib.auth import get_user_model\n')]
# coding: utf-8 import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as mticker import numpy as np def card_num_distribution(): """Plot `Std-CardNumDistribution.png`.""" total = np.fromstring('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ') jb = np.fromstring('0 6 6 6 6 8 9 11 11 10 7 3 2', sep=' ') jn = np.fromstring('7 1 6 6 0 3 3 1 1 2 5 9 6', sep=' ') zb = np.fromstring('5 7 0 0 7 1 0 0 0 0 0 2 4', sep=' ') jb /= total jn /= total zb /= total x = np.arange(1, 14, 1) xlabels = 'A 2 3 4 5 6 7 8 9 10 J Q K'.split() plt.plot(x, jb, '*-', color='k', label='εŸΊζœ¬η‰Œ') plt.plot(x, jn, 'o-', color='b', label='ι”¦ε›Šη‰Œ') plt.plot(x, zb, '+-', color='r', label='θ£…ε€‡η‰Œ') plt.legend() plt.grid() plt.ylim(ymin=-0.01, ymax=1.01) ax = plt.gca() ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0)) plt.xticks(x, xlabels) plt.show() def card_suit_distribution(): """Plot `Std-CardSuitDistribution.png`.""" jb = np.fromstring('14 22 20 29', sep=' ') jn = np.fromstring('16 15 14 5', sep=' ') zb = np.fromstring('10 3 6 7', sep=' ') total = np.fromstring('40 40 40 41', sep=' ') jb /= total jn /= total zb /= total x = np.arange(1, 5, 1) xlabels = '黑摃 纒摃 θ‰θŠ± 方片'.split() plt.bar(x - 0.2, jb, color='k', width=0.2, label='εŸΊζœ¬η‰Œ') plt.bar(x, jn, color='b', width=0.2, label='ι”¦ε›Šη‰Œ') plt.bar(x + 0.2, zb, color='r', width=0.2, label='θ£…ε€‡η‰Œ') plt.legend() plt.grid() ax = plt.gca() ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0)) plt.xticks(x, xlabels) plt.show() def main(): matplotlib.rc('font',**{ 'sans-serif': 'Microsoft YaHei' }) # card_num_distribution() card_suit_distribution() if __name__ == '__main__': main()
[ "matplotlib.pyplot.grid", "matplotlib.pyplot.xticks", "numpy.arange", "matplotlib.ticker.PercentFormatter", "matplotlib.pyplot.gca", "matplotlib.pyplot.plot", "matplotlib.pyplot.bar", "matplotlib.rc", "matplotlib.pyplot.ylim", "numpy.fromstring", "matplotlib.pyplot.legend", "matplotlib.pyplot....
[((210, 274), 'numpy.fromstring', 'np.fromstring', (['"""12 14 12 12 13 12 12 12 12 12 12 14 12"""'], {'sep': '""" """'}), "('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ')\n", (223, 274), True, 'import numpy as np\n'), ((285, 339), 'numpy.fromstring', 'np.fromstring', (['"""0 6 6 6 6 8 9 11 11 10 7 3 2"""'], {'sep': '""" """'}), "('0 6 6 6 6 8 9 11 11 10 7 3 2', sep=' ')\n", (298, 339), True, 'import numpy as np\n'), ((349, 400), 'numpy.fromstring', 'np.fromstring', (['"""7 1 6 6 0 3 3 1 1 2 5 9 6"""'], {'sep': '""" """'}), "('7 1 6 6 0 3 3 1 1 2 5 9 6', sep=' ')\n", (362, 400), True, 'import numpy as np\n'), ((410, 461), 'numpy.fromstring', 'np.fromstring', (['"""5 7 0 0 7 1 0 0 0 0 0 2 4"""'], {'sep': '""" """'}), "('5 7 0 0 7 1 0 0 0 0 0 2 4', sep=' ')\n", (423, 461), True, 'import numpy as np\n'), ((520, 539), 'numpy.arange', 'np.arange', (['(1)', '(14)', '(1)'], {}), '(1, 14, 1)\n', (529, 539), True, 'import numpy as np\n'), ((595, 640), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'jb', '"""*-"""'], {'color': '"""k"""', 'label': '"""εŸΊζœ¬η‰Œ"""'}), "(x, jb, '*-', color='k', label='εŸΊζœ¬η‰Œ')\n", (603, 640), True, 'import matplotlib.pyplot as plt\n'), ((645, 690), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'jn', '"""o-"""'], {'color': '"""b"""', 'label': '"""ι”¦ε›Šη‰Œ"""'}), "(x, jn, 'o-', color='b', label='ι”¦ε›Šη‰Œ')\n", (653, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'zb', '"""+-"""'], {'color': '"""r"""', 'label': '"""θ£…ε€‡η‰Œ"""'}), "(x, zb, '+-', color='r', label='θ£…ε€‡η‰Œ')\n", (703, 740), True, 'import matplotlib.pyplot as plt\n'), ((746, 758), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (756, 758), True, 'import matplotlib.pyplot as plt\n'), ((763, 773), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (771, 773), True, 'import matplotlib.pyplot as plt\n'), ((778, 809), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(-0.01)', 'ymax': '(1.01)'}), '(ymin=-0.01, ymax=1.01)\n', (786, 809), True, 'import matplotlib.pyplot as plt\n'), ((819, 828), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (826, 828), True, 'import matplotlib.pyplot as plt\n'), ((897, 919), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'xlabels'], {}), '(x, xlabels)\n', (907, 919), True, 'import matplotlib.pyplot as plt\n'), ((925, 935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (933, 935), True, 'import matplotlib.pyplot as plt\n'), ((1024, 1061), 'numpy.fromstring', 'np.fromstring', (['"""14 22 20 29"""'], {'sep': '""" """'}), "('14 22 20 29', sep=' ')\n", (1037, 1061), True, 'import numpy as np\n'), ((1071, 1107), 'numpy.fromstring', 'np.fromstring', (['"""16 15 14 5"""'], {'sep': '""" """'}), "('16 15 14 5', sep=' ')\n", (1084, 1107), True, 'import numpy as np\n'), ((1117, 1151), 'numpy.fromstring', 'np.fromstring', (['"""10 3 6 7"""'], {'sep': '""" """'}), "('10 3 6 7', sep=' ')\n", (1130, 1151), True, 'import numpy as np\n'), ((1164, 1201), 'numpy.fromstring', 'np.fromstring', (['"""40 40 40 41"""'], {'sep': '""" """'}), "('40 40 40 41', sep=' ')\n", (1177, 1201), True, 'import numpy as np\n'), ((1260, 1278), 'numpy.arange', 'np.arange', (['(1)', '(5)', '(1)'], {}), '(1, 5, 1)\n', (1269, 1278), True, 'import numpy as np\n'), ((1320, 1375), 'matplotlib.pyplot.bar', 'plt.bar', (['(x - 0.2)', 'jb'], {'color': '"""k"""', 'width': '(0.2)', 'label': '"""εŸΊζœ¬η‰Œ"""'}), "(x - 0.2, jb, color='k', width=0.2, label='εŸΊζœ¬η‰Œ')\n", (1327, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1429), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'jn'], {'color': '"""b"""', 'width': '(0.2)', 'label': '"""ι”¦ε›Šη‰Œ"""'}), "(x, jn, color='b', width=0.2, label='ι”¦ε›Šη‰Œ')\n", (1387, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1489), 'matplotlib.pyplot.bar', 'plt.bar', (['(x + 0.2)', 'zb'], {'color': '"""r"""', 'width': '(0.2)', 'label': '"""θ£…ε€‡η‰Œ"""'}), "(x + 0.2, zb, color='r', width=0.2, label='θ£…ε€‡η‰Œ')\n", (1441, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1507), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1505, 1507), True, 'import matplotlib.pyplot as plt\n'), ((1512, 1522), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1520, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1541), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1539, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1632), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'xlabels'], {}), '(x, xlabels)\n', (1620, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1648), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1646, 1648), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1725), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **{'sans-serif': 'Microsoft YaHei'})\n", (1680, 1725), False, 'import matplotlib\n'), ((862, 891), 'matplotlib.ticker.PercentFormatter', 'mticker.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (886, 891), True, 'import matplotlib.ticker as mticker\n'), ((1575, 1604), 'matplotlib.ticker.PercentFormatter', 'mticker.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (1599, 1604), True, 'import matplotlib.ticker as mticker\n')]
"""Test hyperparameter tuning.""" import pytest from nbaspa.model.tasks import ( LifelinesTuning, SegmentData, SurvivalData, XGBoostTuning, ) @pytest.fixture(scope="module") def survivaldata(data): """Create survival data for the hyperparameter tuning.""" pre = SurvivalData() df = pre.run(data) seg = SegmentData() segdata = seg.run(data=df, splits=[0.6, 0.2, 0.2], keys=["train", "tune", "stop"], seed=42) return segdata["train"], segdata["tune"], segdata["stop"] def test_lifelines_tuning(survivaldata): """Test running hyperparameter tuning with Lifelines.""" train, tune, _ = survivaldata # Drop nulls because of weirdness with fitting lifelines on random data train = train.dropna() tune = tune.dropna() tsk = LifelinesTuning() output = tsk.run( train_data=train, tune_data=[tune], max_evals=10, ) assert isinstance(output, dict) assert len(output["trials"].trials) <= 10 assert "l1_ratio" in output["best"] assert "penalizer" in output["best"] assert hasattr(tsk, "best_") assert hasattr(tsk, "metric_") def test_xgboost_tuning(survivaldata): """Test running hyperparameter tuning with XGBoost.""" train, tune, stop = survivaldata tsk = XGBoostTuning() output = tsk.run( train_data=train, tune_data=[tune], stopping_data=stop, max_evals=10, ) assert isinstance(output, dict) assert len(output["trials"].trials) <= 10 assert hasattr(tsk, "best_") assert hasattr(tsk, "metric_") assert "learning_rate" in output["best"] assert "subsample" in output["best"] assert "max_delta_step" in output["best"] assert "max_depth" in output["best"] assert "gamma" in output["best"] assert "reg_alpha" in output["best"] assert "reg_lambda" in output["best"] assert "colsample_bytree" in output["best"] assert "min_child_weight" in output["best"]
[ "nbaspa.model.tasks.SegmentData", "nbaspa.model.tasks.LifelinesTuning", "nbaspa.model.tasks.SurvivalData", "nbaspa.model.tasks.XGBoostTuning", "pytest.fixture" ]
[((162, 192), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (176, 192), False, 'import pytest\n'), ((289, 303), 'nbaspa.model.tasks.SurvivalData', 'SurvivalData', ([], {}), '()\n', (301, 303), False, 'from nbaspa.model.tasks import LifelinesTuning, SegmentData, SurvivalData, XGBoostTuning\n'), ((337, 350), 'nbaspa.model.tasks.SegmentData', 'SegmentData', ([], {}), '()\n', (348, 350), False, 'from nbaspa.model.tasks import LifelinesTuning, SegmentData, SurvivalData, XGBoostTuning\n'), ((785, 802), 'nbaspa.model.tasks.LifelinesTuning', 'LifelinesTuning', ([], {}), '()\n', (800, 802), False, 'from nbaspa.model.tasks import LifelinesTuning, SegmentData, SurvivalData, XGBoostTuning\n'), ((1283, 1298), 'nbaspa.model.tasks.XGBoostTuning', 'XGBoostTuning', ([], {}), '()\n', (1296, 1298), False, 'from nbaspa.model.tasks import LifelinesTuning, SegmentData, SurvivalData, XGBoostTuning\n')]
from domain.import_object import * from modules.defaultmodule import DefaultModule from domain.financialReports import FinancialPowerPlantReport from domain.powerplant import PowerPlant from domain.cashflow import CashFlow from domain.technologies import * import logging class CreatingFinancialReports(DefaultModule): def __init__(self, reps): super().__init__("Creating Financial Reports", reps) reps.dbrw.stage_init_financial_results_structure() def act(self): # fuelPriceMap = {} # for substance in self.reps.substances: # fuelPriceMap.update({substance: findLastKnownPriceForSubstance(substance)}) #TODO WHY findAllPowerPlantsWhichAreNotDismantledBeforeTick(self.reps.current_tick - 2) self.createFinancialReportsForPowerPlantsAndTick(self.reps.power_plants, self.reps.current_tick) print("finished financial report") def createFinancialReportsForNewInvestments(self): self.createFinancialReportsForPowerPlantsAndTick(self.reps.findAllPowerPlantsWithConstructionStartTimeInTick(self.reps.current_tick), self.reps.current_tick) def createFinancialReportsForPowerPlantsAndTick(self, plants, tick): # todo -> probably this is needed only for operational power plants financialPowerPlantReports = [] for plant in plants.values(): financialPowerPlantReport = FinancialPowerPlantReport(plant.name, self.reps) financialPowerPlantReport.setTime(tick) financialPowerPlantReport.setPowerPlant(plant.name) totalSupply = plant.getAwardedPowerinMWh() financialPowerPlantReport.setProduction(totalSupply) financialPowerPlantReport.setSpotMarketRevenue(plant.ReceivedMoneyinEUR) financialPowerPlantReport.setProfit(plant.Profit) financialPowerPlantReports.append(financialPowerPlantReport) self.reps.dbrw.stage_financial_results(financialPowerPlantReports) # if plant.getFuelMix() is None: # plant.setFuelMix(java.util.HashSet()) # for share in plant.getFuelMix(): # amount = share.getShare() * totalSupply # substance = share.getSubstance() # substanceCost = findLastKnownPriceForSubstance(substance) * amount # financialPowerPlantReport.setCommodityCosts(financialPowerPlantReport.getCommodityCosts() + substanceCost) #TODO add cash flows #cashFlows = self.reps.getCashFlowsForPowerPlant(plant, tick) #financialPowerPlantReport.setCo2Costs(self.calculateCO2CostsOfPowerPlant(cashFlows)) #financialPowerPlantReport.setVariableCosts(financialPowerPlantReport.getCommodityCosts() + financialPowerPlantReport.getCo2Costs()) #Determine fixed costs #financialPowerPlantReport.setFixedCosts(self.calculateFixedCostsOfPowerPlant(cashFlows)) #financialPowerPlantReport.setFixedOMCosts(self.calculateFixedOMCostsOfPowerPlant(cashFlows)) #financialPowerPlantReport.setStrategicReserveRevenue(self.calculateStrategicReserveRevenueOfPowerPlant(cashFlows)) #financialPowerPlantReport.setCapacityMarketRevenue(self.calculateCapacityMarketRevenueOfPowerPlant(cashFlows)) #financialPowerPlantReport.setCo2HedgingRevenue(self.calculateCO2HedgingRevenueOfPowerPlant(cashFlows)) #financialPowerPlantReport.setOverallRevenue(financialPowerPlantReport.getCapacityMarketRevenue() + financialPowerPlantReport.getCo2HedgingRevenue() + financialPowerPlantReport.getSpotMarketRevenue() + financialPowerPlantReport.getStrategicReserveRevenue()) # Calculate Full load hours #financialPowerPlantReport.setFullLoadHours(self.reps.calculateFullLoadHoursOfPowerPlant(plant, tick)) # # def calculateSpotMarketRevenueOfPowerPlant(self, cashFlows): # toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.ELECTRICITY_SPOT).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum() # java.util.logging.Logger.getGlobal().finer("Income Spot " + toReturn) # return toReturn # # def calculateLongTermContractRevenueOfPowerPlant(self, cashFlows): # toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.ELECTRICITY_LONGTERM).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum() # java.util.logging.Logger.getGlobal().finer("Income LT " + toReturn) # return toReturn # # def calculateStrategicReserveRevenueOfPowerPlant(self, cashFlows): # toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.STRRESPAYMENT).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum() # java.util.logging.Logger.getGlobal().finer("Income strategic reserve " + toReturn) # return toReturn # # def calculateCapacityMarketRevenueOfPowerPlant(self, cashFlows): # toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.CAPMARKETPAYMENT).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum() # java.util.logging.Logger.getGlobal().finer("Income Capacity market " + toReturn) # return toReturn # # def calculateCO2HedgingRevenueOfPowerPlant(self, cashFlows): # toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.CO2HEDGING).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum() # java.util.logging.Logger.getGlobal().finer("Income CO2 Hedging" + toReturn) # return toReturn # # def calculateCO2CostsOfPowerPlant(self, list): # return list.stream().filter(lambda p : (p.getType() == emlab.gen.domain.contract.CashFlow.CO2TAX) or (p.getType() == emlab.gen.domain.contract.CashFlow.CO2AUCTION) or (p.getType() == emlab.gen.domain.contract.CashFlow.NATIONALMINCO2)).mapToDouble(lambda p : p.getMoney()).sum() # def calculateFixedCostsOfPowerPlant(self, list): # pass # #return list.stream().filter(lambda p : (p.getType() == CashFlow.FIXEDOMCOST) or (p.getType() == CashFlow.LOAN) or (p.getType() == CashFlow.DOWNPAYMENT)).mapToDouble(lambda p : p.getMoney()).sum() # # def calculateFixedOMCostsOfPowerPlant(self, list): # pass # #return list.stream().filter(lambda p : (p.getType() == CashFlow.FIXEDOMCOST)).mapToDouble(lambda p : p.getMoney()).sum()
[ "domain.financialReports.FinancialPowerPlantReport" ]
[((1384, 1432), 'domain.financialReports.FinancialPowerPlantReport', 'FinancialPowerPlantReport', (['plant.name', 'self.reps'], {}), '(plant.name, self.reps)\n', (1409, 1432), False, 'from domain.financialReports import FinancialPowerPlantReport\n')]
import pandas as pd def main(): load_path = "data/2_road_usages.csv" save_path = "data/3_road_usages.csv" df = pd.read_csv(load_path) street_names = df.nimi.unique() points_to_drop = [] for street in street_names: points = df[df['nimi'] == street].piste.unique() if len(points) > 1: print(street + " has the following measurement points: " + str(points)) for i in range(1, len(points)): points_to_drop.append(points[i]) print("\nDropping rows at measurement points " + str(points_to_drop)) df = df[-df['piste'].isin(points_to_drop)] df.to_csv(save_path, index=False) if __name__ == '__main__': main()
[ "pandas.read_csv" ]
[((126, 148), 'pandas.read_csv', 'pd.read_csv', (['load_path'], {}), '(load_path)\n', (137, 148), True, 'import pandas as pd\n')]
# -*- coding: utf-8 -*- from application.files.parsers.dsv import parse_csv, lines from application.files.parsers import ParseError import unittest from cStringIO import StringIO from hamcrest import assert_that, only_contains, is_, contains class ParseCsvTestCase(unittest.TestCase): def test_parse_csv(self): csv_stream = _string_io("a,b\nx,y\nq,w") data = parse_csv(csv_stream) assert_that(data, contains( ["a", "b"], ["x", "y"], ["q", "w"], )) def test_parse_empty_csv(self): csv_stream = _string_io("") data = _traverse(parse_csv(csv_stream)) assert_that(data, is_([])) def test_parse_utf8_data(self): csv = u"a,b\nΓ ,ΓΉ" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, contains( ["a", "b"], [u"Γ ", u"ΓΉ"], )) def test_error_when_input_is_not_utf8(self): csv = u"a,b\nΓ ,ΓΉ" csv_stream = _string_io(csv, "iso-8859-1") self.assertRaises(ParseError, lambda csv_stream: _traverse(parse_csv(csv_stream)), csv_stream) def test_ignore_when_empty_row(self): csv = u"a,b\n,\nc,d" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["a", "b"], ["c", "d"], )) def test_accept_when_some_values_empty(self): csv = u"a,b\n,\nc,d\nc," csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["a", "b"], ["c", "d"], ["c", ""], )) def test_ignore_comments(self): csv = u"# top comment\na,b\n# any random comment\nc,d" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["a", "b"], ["c", "d"], )) def test_ignore_values_in_comments_column(self): csv = u"a,comment,b\nc,d,e" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["a", "b"], ["c", "e"], )) def test_accept_csv_with_CR_as_line_separator(self): csv = u"prop1,prop2\rvalue 1,value 2" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["prop1", "prop2"], ["value 1", "value 2"], )) def test_accept_csv_with_CRLF_as_line_separator(self): csv = u"prop1,prop2\r\nvalue 1,value 2" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["prop1", "prop2"], ["value 1", "value 2"], )) def test_preserve_newlines_in_quoted_values(self): csv = u"prop1,prop2\nvalue,\"value\nwith newline\"" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["prop1", "prop2"], ["value", "value\nwith newline"], )) def test_parsing_numbers_in_cells(self): csv = u"int,float,string\n12,12.1,a string" csv_stream = _string_io(csv, "utf-8") data = parse_csv(csv_stream) assert_that(data, only_contains( ["int", "float", "string"], [12, 12.1, "a string"], )) class LinesGeneratorTest(unittest.TestCase): def test_handles_CR_LF_and_CRLF(self): text = "1\n2\r3\r\n4" lines_list = list(lines(_string_io(text))) assert_that(lines_list, is_(["1\n", "2\r", "3\r\n", "4"])) def test_handles_emptylines(self): text = "q\n\rw\r\r\ne" lines_list = list(lines(_string_io(text))) assert_that(lines_list, is_(["q\n", "\r", "w\r", "\r\n", "e"])) def test_ignores_trailing_empty_line(self): text = "asd\n" lines_list = list(lines(_string_io(text))) assert_that(lines_list, is_(["asd\n"])) def _string_io(content, encoding=None): if encoding is not None: content = content.encode(encoding) return StringIO(content) def _traverse(content): return map(lambda rows: list(rows), content)
[ "cStringIO.StringIO", "hamcrest.is_", "application.files.parsers.dsv.parse_csv", "hamcrest.only_contains", "hamcrest.contains" ]
[((4325, 4342), 'cStringIO.StringIO', 'StringIO', (['content'], {}), '(content)\n', (4333, 4342), False, 'from cStringIO import StringIO\n'), ((386, 407), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (395, 407), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((811, 832), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (820, 832), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((1349, 1370), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (1358, 1370), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((1618, 1639), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (1627, 1639), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((1926, 1947), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (1935, 1947), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((2201, 2222), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (2210, 2222), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((2490, 2511), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (2499, 2511), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((2803, 2824), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (2812, 2824), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((3125, 3146), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (3134, 3146), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((3439, 3460), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (3448, 3460), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((435, 479), 'hamcrest.contains', 'contains', (["['a', 'b']", "['x', 'y']", "['q', 'w']"], {}), "(['a', 'b'], ['x', 'y'], ['q', 'w'])\n", (443, 479), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((627, 648), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (636, 648), False, 'from application.files.parsers.dsv import parse_csv, lines\n'), ((677, 684), 'hamcrest.is_', 'is_', (['[]'], {}), '([])\n', (680, 684), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((860, 894), 'hamcrest.contains', 'contains', (["['a', 'b']", "[u'Γ ', u'ΓΉ']"], {}), "(['a', 'b'], [u'Γ ', u'ΓΉ'])\n", (868, 894), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((1398, 1435), 'hamcrest.only_contains', 'only_contains', (["['a', 'b']", "['c', 'd']"], {}), "(['a', 'b'], ['c', 'd'])\n", (1411, 1435), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((1667, 1715), 'hamcrest.only_contains', 'only_contains', (["['a', 'b']", "['c', 'd']", "['c', '']"], {}), "(['a', 'b'], ['c', 'd'], ['c', ''])\n", (1680, 1715), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((1975, 2012), 'hamcrest.only_contains', 'only_contains', (["['a', 'b']", "['c', 'd']"], {}), "(['a', 'b'], ['c', 'd'])\n", (1988, 2012), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((2250, 2287), 'hamcrest.only_contains', 'only_contains', (["['a', 'b']", "['c', 'e']"], {}), "(['a', 'b'], ['c', 'e'])\n", (2263, 2287), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((2539, 2596), 'hamcrest.only_contains', 'only_contains', (["['prop1', 'prop2']", "['value 1', 'value 2']"], {}), "(['prop1', 'prop2'], ['value 1', 'value 2'])\n", (2552, 2596), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((2852, 2909), 'hamcrest.only_contains', 'only_contains', (["['prop1', 'prop2']", "['value 1', 'value 2']"], {}), "(['prop1', 'prop2'], ['value 1', 'value 2'])\n", (2865, 2909), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((3174, 3244), 'hamcrest.only_contains', 'only_contains', (["['prop1', 'prop2']", '[\'value\', """value\nwith newline"""]'], {}), '([\'prop1\', \'prop2\'], [\'value\', """value\nwith newline"""])\n', (3187, 3244), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((3488, 3553), 'hamcrest.only_contains', 'only_contains', (["['int', 'float', 'string']", "[12, 12.1, 'a string']"], {}), "(['int', 'float', 'string'], [12, 12.1, 'a string'])\n", (3501, 3553), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((3796, 3829), 'hamcrest.is_', 'is_', (["['1\\n', '2\\r', '3\\r\\n', '4']"], {}), "(['1\\n', '2\\r', '3\\r\\n', '4'])\n", (3799, 3829), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((3987, 4025), 'hamcrest.is_', 'is_', (["['q\\n', '\\r', 'w\\r', '\\r\\n', 'e']"], {}), "(['q\\n', '\\r', 'w\\r', '\\r\\n', 'e'])\n", (3990, 4025), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((4184, 4198), 'hamcrest.is_', 'is_', (["['asd\\n']"], {}), "(['asd\\n'])\n", (4187, 4198), False, 'from hamcrest import assert_that, only_contains, is_, contains\n'), ((1153, 1174), 'application.files.parsers.dsv.parse_csv', 'parse_csv', (['csv_stream'], {}), '(csv_stream)\n', (1162, 1174), False, 'from application.files.parsers.dsv import parse_csv, lines\n')]
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import json import warnings import pulumi import pulumi.runtime from typing import Union from .. import utilities, tables class Policy(pulumi.CustomResource): custom_rules: pulumi.Output[list] """ One or more `custom_rules` blocks as defined below. * `action` (`str`) - Type of action. * `matchConditions` (`list`) - One or more `match_conditions` blocks as defined below. * `matchValues` (`list`) - A list of match values. * `matchVariables` (`list`) - One or more `match_variables` blocks as defined below. * `selector` (`str`) - Describes field of the matchVariable collection * `variableName` (`str`) - The name of the Match Variable * `negationCondition` (`bool`) - Describes if this is negate condition or not * `operator` (`str`) - Describes operator to be matched. * `name` (`str`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource. * `priority` (`float`) - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. * `ruleType` (`str`) - Describes the type of rule. """ location: pulumi.Output[str] """ Resource location. Changing this forces a new resource to be created. """ managed_rules: pulumi.Output[dict] """ A `managed_rules` blocks as defined below. * `exclusions` (`list`) - One or more `exclusion` block defined below. * `matchVariable` (`str`) * `selector` (`str`) - Describes field of the matchVariable collection. * `selectorMatchOperator` (`str`) - Describes operator to be matched. Possible values: `Contains`, `EndsWith`, `Equals`, `EqualsAny`, `StartsWith`. * `managedRuleSets` (`list`) - One or more `managed_rule_set` block defined below. * `ruleGroupOverrides` (`list`) - One or more `rule_group_override` block defined below. * `disabledRules` (`list`) - One or more Rule ID's * `ruleGroupName` (`str`) - The name of the Rule Group * `type` (`str`) - The rule set type. * `version` (`str`) - The rule set version. """ name: pulumi.Output[str] """ The name of the policy. Changing this forces a new resource to be created. """ policy_settings: pulumi.Output[dict] """ A `policy_settings` block as defined below. * `enabled` (`bool`) - Describes if the policy is in enabled state or disabled state Defaults to `Enabled`. * `mode` (`str`) - Describes if it is in detection mode or prevention mode at the policy level Defaults to `Prevention`. """ resource_group_name: pulumi.Output[str] """ The name of the resource group. Changing this forces a new resource to be created. """ tags: pulumi.Output[dict] """ A mapping of tags to assign to the Web Application Firewall Policy. """ def __init__(__self__, resource_name, opts=None, custom_rules=None, location=None, managed_rules=None, name=None, policy_settings=None, resource_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None): """ Manages a Azure Web Application Firewall Policy instance. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US 2") example_policy = azure.waf.Policy("examplePolicy", resource_group_name=example_resource_group.name, location=example_resource_group.location, custom_rules=[ { "name": "Rule1", "priority": 1, "ruleType": "MatchRule", "match_conditions": [{ "match_variables": [{ "variableName": "RemoteAddr", }], "operator": "IPMatch", "negationCondition": False, "matchValues": [ "192.168.1.0/24", "10.0.0.0/24", ], }], "action": "Block", }, { "name": "Rule2", "priority": 2, "ruleType": "MatchRule", "match_conditions": [ { "match_variables": [{ "variableName": "RemoteAddr", }], "operator": "IPMatch", "negationCondition": False, "matchValues": ["192.168.1.0/24"], }, { "match_variables": [{ "variableName": "RequestHeaders", "selector": "UserAgent", }], "operator": "Contains", "negationCondition": False, "matchValues": ["Windows"], }, ], "action": "Block", }, ], policy_settings={ "enabled": True, "mode": "Prevention", }, managed_rules={ "exclusion": [ { "matchVariable": "RequestHeaderNames", "selector": "x-company-secret-header", "selectorMatchOperator": "Equals", }, { "matchVariable": "RequestCookieNames", "selector": "too-tasty", "selectorMatchOperator": "EndsWith", }, ], "managed_rule_set": [{ "type": "OWASP", "version": "3.1", "rule_group_override": [{ "ruleGroupName": "REQUEST-920-PROTOCOL-ENFORCEMENT", "disabledRules": [ "920300", "920440", ], }], }], }) ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[list] custom_rules: One or more `custom_rules` blocks as defined below. :param pulumi.Input[str] location: Resource location. Changing this forces a new resource to be created. :param pulumi.Input[dict] managed_rules: A `managed_rules` blocks as defined below. :param pulumi.Input[str] name: The name of the policy. Changing this forces a new resource to be created. :param pulumi.Input[dict] policy_settings: A `policy_settings` block as defined below. :param pulumi.Input[str] resource_group_name: The name of the resource group. Changing this forces a new resource to be created. :param pulumi.Input[dict] tags: A mapping of tags to assign to the Web Application Firewall Policy. The **custom_rules** object supports the following: * `action` (`pulumi.Input[str]`) - Type of action. * `matchConditions` (`pulumi.Input[list]`) - One or more `match_conditions` blocks as defined below. * `matchValues` (`pulumi.Input[list]`) - A list of match values. * `matchVariables` (`pulumi.Input[list]`) - One or more `match_variables` blocks as defined below. * `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection * `variableName` (`pulumi.Input[str]`) - The name of the Match Variable * `negationCondition` (`pulumi.Input[bool]`) - Describes if this is negate condition or not * `operator` (`pulumi.Input[str]`) - Describes operator to be matched. * `name` (`pulumi.Input[str]`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource. * `priority` (`pulumi.Input[float]`) - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. * `ruleType` (`pulumi.Input[str]`) - Describes the type of rule. The **managed_rules** object supports the following: * `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` block defined below. * `matchVariable` (`pulumi.Input[str]`) * `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection. * `selectorMatchOperator` (`pulumi.Input[str]`) - Describes operator to be matched. Possible values: `Contains`, `EndsWith`, `Equals`, `EqualsAny`, `StartsWith`. * `managedRuleSets` (`pulumi.Input[list]`) - One or more `managed_rule_set` block defined below. * `ruleGroupOverrides` (`pulumi.Input[list]`) - One or more `rule_group_override` block defined below. * `disabledRules` (`pulumi.Input[list]`) - One or more Rule ID's * `ruleGroupName` (`pulumi.Input[str]`) - The name of the Rule Group * `type` (`pulumi.Input[str]`) - The rule set type. * `version` (`pulumi.Input[str]`) - The rule set version. The **policy_settings** object supports the following: * `enabled` (`pulumi.Input[bool]`) - Describes if the policy is in enabled state or disabled state Defaults to `Enabled`. * `mode` (`pulumi.Input[str]`) - Describes if it is in detection mode or prevention mode at the policy level Defaults to `Prevention`. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['custom_rules'] = custom_rules __props__['location'] = location if managed_rules is None: raise TypeError("Missing required property 'managed_rules'") __props__['managed_rules'] = managed_rules __props__['name'] = name __props__['policy_settings'] = policy_settings if resource_group_name is None: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['tags'] = tags super(Policy, __self__).__init__( 'azure:waf/policy:Policy', resource_name, __props__, opts) @staticmethod def get(resource_name, id, opts=None, custom_rules=None, location=None, managed_rules=None, name=None, policy_settings=None, resource_group_name=None, tags=None): """ Get an existing Policy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[list] custom_rules: One or more `custom_rules` blocks as defined below. :param pulumi.Input[str] location: Resource location. Changing this forces a new resource to be created. :param pulumi.Input[dict] managed_rules: A `managed_rules` blocks as defined below. :param pulumi.Input[str] name: The name of the policy. Changing this forces a new resource to be created. :param pulumi.Input[dict] policy_settings: A `policy_settings` block as defined below. :param pulumi.Input[str] resource_group_name: The name of the resource group. Changing this forces a new resource to be created. :param pulumi.Input[dict] tags: A mapping of tags to assign to the Web Application Firewall Policy. The **custom_rules** object supports the following: * `action` (`pulumi.Input[str]`) - Type of action. * `matchConditions` (`pulumi.Input[list]`) - One or more `match_conditions` blocks as defined below. * `matchValues` (`pulumi.Input[list]`) - A list of match values. * `matchVariables` (`pulumi.Input[list]`) - One or more `match_variables` blocks as defined below. * `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection * `variableName` (`pulumi.Input[str]`) - The name of the Match Variable * `negationCondition` (`pulumi.Input[bool]`) - Describes if this is negate condition or not * `operator` (`pulumi.Input[str]`) - Describes operator to be matched. * `name` (`pulumi.Input[str]`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource. * `priority` (`pulumi.Input[float]`) - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. * `ruleType` (`pulumi.Input[str]`) - Describes the type of rule. The **managed_rules** object supports the following: * `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` block defined below. * `matchVariable` (`pulumi.Input[str]`) * `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection. * `selectorMatchOperator` (`pulumi.Input[str]`) - Describes operator to be matched. Possible values: `Contains`, `EndsWith`, `Equals`, `EqualsAny`, `StartsWith`. * `managedRuleSets` (`pulumi.Input[list]`) - One or more `managed_rule_set` block defined below. * `ruleGroupOverrides` (`pulumi.Input[list]`) - One or more `rule_group_override` block defined below. * `disabledRules` (`pulumi.Input[list]`) - One or more Rule ID's * `ruleGroupName` (`pulumi.Input[str]`) - The name of the Rule Group * `type` (`pulumi.Input[str]`) - The rule set type. * `version` (`pulumi.Input[str]`) - The rule set version. The **policy_settings** object supports the following: * `enabled` (`pulumi.Input[bool]`) - Describes if the policy is in enabled state or disabled state Defaults to `Enabled`. * `mode` (`pulumi.Input[str]`) - Describes if it is in detection mode or prevention mode at the policy level Defaults to `Prevention`. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["custom_rules"] = custom_rules __props__["location"] = location __props__["managed_rules"] = managed_rules __props__["name"] = name __props__["policy_settings"] = policy_settings __props__["resource_group_name"] = resource_group_name __props__["tags"] = tags return Policy(resource_name, opts=opts, __props__=__props__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
[ "warnings.warn", "pulumi.ResourceOptions" ]
[((10083, 10158), 'warnings.warn', 'warnings.warn', (['"""explicit use of __name__ is deprecated"""', 'DeprecationWarning'], {}), "('explicit use of __name__ is deprecated', DeprecationWarning)\n", (10096, 10158), False, 'import warnings\n'), ((10241, 10340), 'warnings.warn', 'warnings.warn', (['"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', 'DeprecationWarning'], {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)\n', (10254, 10340), False, 'import warnings\n'), ((10409, 10433), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (10431, 10433), False, 'import pulumi\n'), ((15542, 15571), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (15564, 15571), False, 'import pulumi\n')]
# Execution time : 0.440223 seconds # Solution Explanation # We can simplily iterate through all the 4-digits numbers # Then generate all the permutation of this number and check # if the desirable sequence, distinct from the given one, is found import time width = 40 import itertools import math def solution(): isPrime = lambda p: p>=1000 and all(p%it!=0 for it in range(2,int(math.sqrt(p))+1)) for num in range(1488,10000): v = [int(''.join(ch for ch in it)) for it in itertools.permutations(str(num))] v.sort() for it1 in range(len(v)): for it2 in range(it1+1,len(v)): r = v[it2] - v[it1] if r > 0 and v[it1]!=1487 and v[it2]+r in v: if isPrime(v[it1]) and isPrime(v[it2]) and isPrime(v[it2]+r): return str(v[it1])+str(v[it2])+str(v[it2]+r) if __name__=="__main__": start_ = time.time() print(' Answer -> %s '.center(width,'-') % ( solution() )) print(' %f seconds '.center(width,'-') % ( time.time() - start_))
[ "math.sqrt", "time.time" ]
[((913, 924), 'time.time', 'time.time', ([], {}), '()\n', (922, 924), False, 'import time\n'), ((1035, 1046), 'time.time', 'time.time', ([], {}), '()\n', (1044, 1046), False, 'import time\n'), ((390, 402), 'math.sqrt', 'math.sqrt', (['p'], {}), '(p)\n', (399, 402), False, 'import math\n')]
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd """Unit test for Hotlist creation servlet.""" from __future__ import print_function from __future__ import division from __future__ import absolute_import import mox import unittest import settings from framework import permissions from features import hotlistcreate from proto import site_pb2 from services import service_manager from testing import fake from testing import testing_helpers class HotlistCreateTest(unittest.TestCase): """Tests for the HotlistCreate servlet.""" def setUp(self): self.cnxn = 'fake cnxn' self.mr = testing_helpers.MakeMonorailRequest() self.services = service_manager.Services(project=fake.ProjectService(), user=fake.UserService(), issue=fake.IssueService(), features=fake.FeaturesService()) self.servlet = hotlistcreate.HotlistCreate('req', 'res', services=self.services) self.mox = mox.Mox() def tearDown(self): self.mox.UnsetStubs() self.mox.ResetAll() def CheckAssertBasePermissions( self, restriction, expect_admin_ok, expect_nonadmin_ok): old_hotlist_creation_restriction = settings.hotlist_creation_restriction settings.hotlist_creation_restriction = restriction mr = testing_helpers.MakeMonorailRequest( perms=permissions.GetPermissions(None, {}, None)) self.assertRaises( permissions.PermissionException, self.servlet.AssertBasePermission, mr) mr = testing_helpers.MakeMonorailRequest() if expect_admin_ok: self.servlet.AssertBasePermission(mr) else: self.assertRaises( permissions.PermissionException, self.servlet.AssertBasePermission, mr) mr = testing_helpers.MakeMonorailRequest( perms=permissions.GetPermissions(mr.auth.user_pb, {111}, None)) if expect_nonadmin_ok: self.servlet.AssertBasePermission(mr) else: self.assertRaises( permissions.PermissionException, self.servlet.AssertBasePermission, mr) settings.hotlist_creation_restriction = old_hotlist_creation_restriction def testAssertBasePermission(self): self.CheckAssertBasePermissions( site_pb2.UserTypeRestriction.ANYONE, True, True) self.CheckAssertBasePermissions( site_pb2.UserTypeRestriction.ADMIN_ONLY, True, False) self.CheckAssertBasePermissions( site_pb2.UserTypeRestriction.NO_ONE, False, False) def testGatherPageData(self): page_data = self.servlet.GatherPageData(self.mr) self.assertEqual('st6', page_data['user_tab_mode']) self.assertEqual('', page_data['initial_name']) self.assertEqual('', page_data['initial_summary']) self.assertEqual('', page_data['initial_description']) self.assertEqual('', page_data['initial_editors']) self.assertEqual('no', page_data['initial_privacy']) def testProcessFormData(self): self.servlet.services.user.TestAddUser('owner', 111) self.mr.auth.user_id = 111 post_data = fake.PostData(hotlistname=['Hotlist'], summary=['summ'], description=['hey'], editors=[''], is_private=['yes']) url = self.servlet.ProcessFormData(self.mr, post_data) self.assertTrue('/u/111/hotlists/Hotlist' in url) def testProcessFormData_OwnerInEditors(self): self.servlet.services.user.TestAddUser('owner_editor', 222) self.mr.auth.user_id = 222 self.mr.cnxn = 'fake cnxn' post_data = fake.PostData(hotlistname=['Hotlist-owner-editor'], summary=['summ'], description=['hi'], editors=['owner_editor'], is_private=['yes']) url = self.servlet.ProcessFormData(self.mr, post_data) self.assertTrue('/u/222/hotlists/Hotlist-owner-editor' in url) hotlists_by_id = self.servlet.services.features.LookupHotlistIDs( self.mr.cnxn, ['Hotlist-owner-editor'], [222]) self.assertTrue(('hotlist-owner-editor', 222) in hotlists_by_id) hotlist_id = hotlists_by_id[('hotlist-owner-editor', 222)] hotlist = self.servlet.services.features.GetHotlist( self.mr.cnxn, hotlist_id, use_cache=False) self.assertEqual(hotlist.owner_ids, [222]) self.assertEqual(hotlist.editor_ids, []) def testProcessFormData_RejectTemplateInvalid(self): mr = testing_helpers.MakeMonorailRequest() # invalid hotlist name and nonexistent editor post_data = fake.PostData(hotlistname=['123BadName'], summary=['summ'], description=['hey'], editors=['<EMAIL>'], is_private=['yes']) self.mox.StubOutWithMock(self.servlet, 'PleaseCorrect') self.servlet.PleaseCorrect( mr, initial_name = '123BadName', initial_summary='summ', initial_description='hey', initial_editors='<EMAIL>', initial_privacy='yes') self.mox.ReplayAll() url = self.servlet.ProcessFormData(mr, post_data) self.mox.VerifyAll() self.assertEqual(mr.errors.hotlistname, 'Invalid hotlist name') self.assertEqual(mr.errors.editors, 'One or more editor emails is not valid.') self.assertIsNone(url) def testProcessFormData_RejectTemplateMissing(self): mr = testing_helpers.MakeMonorailRequest() # missing name and summary post_data = fake.PostData() self.mox.StubOutWithMock(self.servlet, 'PleaseCorrect') self.servlet.PleaseCorrect(mr, initial_name = None, initial_summary=None, initial_description='', initial_editors='', initial_privacy=None) self.mox.ReplayAll() url = self.servlet.ProcessFormData(mr, post_data) self.mox.VerifyAll() self.assertEqual(mr.errors.hotlistname, 'Missing hotlist name') self.assertEqual(mr.errors.summary,'Missing hotlist summary') self.assertIsNone(url)
[ "testing.fake.UserService", "features.hotlistcreate.HotlistCreate", "testing.fake.PostData", "mox.Mox", "testing.fake.FeaturesService", "testing.testing_helpers.MakeMonorailRequest", "testing.fake.ProjectService", "framework.permissions.GetPermissions", "testing.fake.IssueService" ]
[((772, 809), 'testing.testing_helpers.MakeMonorailRequest', 'testing_helpers.MakeMonorailRequest', ([], {}), '()\n', (807, 809), False, 'from testing import testing_helpers\n'), ((1120, 1185), 'features.hotlistcreate.HotlistCreate', 'hotlistcreate.HotlistCreate', (['"""req"""', '"""res"""'], {'services': 'self.services'}), "('req', 'res', services=self.services)\n", (1147, 1185), False, 'from features import hotlistcreate\n'), ((1248, 1257), 'mox.Mox', 'mox.Mox', ([], {}), '()\n', (1255, 1257), False, 'import mox\n'), ((1788, 1825), 'testing.testing_helpers.MakeMonorailRequest', 'testing_helpers.MakeMonorailRequest', ([], {}), '()\n', (1823, 1825), False, 'from testing import testing_helpers\n'), ((3302, 3418), 'testing.fake.PostData', 'fake.PostData', ([], {'hotlistname': "['Hotlist']", 'summary': "['summ']", 'description': "['hey']", 'editors': "['']", 'is_private': "['yes']"}), "(hotlistname=['Hotlist'], summary=['summ'], description=['hey'\n ], editors=[''], is_private=['yes'])\n", (3315, 3418), False, 'from testing import fake\n'), ((3778, 3917), 'testing.fake.PostData', 'fake.PostData', ([], {'hotlistname': "['Hotlist-owner-editor']", 'summary': "['summ']", 'description': "['hi']", 'editors': "['owner_editor']", 'is_private': "['yes']"}), "(hotlistname=['Hotlist-owner-editor'], summary=['summ'],\n description=['hi'], editors=['owner_editor'], is_private=['yes'])\n", (3791, 3917), False, 'from testing import fake\n'), ((4652, 4689), 'testing.testing_helpers.MakeMonorailRequest', 'testing_helpers.MakeMonorailRequest', ([], {}), '()\n', (4687, 4689), False, 'from testing import testing_helpers\n'), ((4756, 4882), 'testing.fake.PostData', 'fake.PostData', ([], {'hotlistname': "['123BadName']", 'summary': "['summ']", 'description': "['hey']", 'editors': "['<EMAIL>']", 'is_private': "['yes']"}), "(hotlistname=['123BadName'], summary=['summ'], description=[\n 'hey'], editors=['<EMAIL>'], is_private=['yes'])\n", (4769, 4882), False, 'from testing import fake\n'), ((5556, 5593), 'testing.testing_helpers.MakeMonorailRequest', 'testing_helpers.MakeMonorailRequest', ([], {}), '()\n', (5591, 5593), False, 'from testing import testing_helpers\n'), ((5641, 5656), 'testing.fake.PostData', 'fake.PostData', ([], {}), '()\n', (5654, 5656), False, 'from testing import fake\n'), ((863, 884), 'testing.fake.ProjectService', 'fake.ProjectService', ([], {}), '()\n', (882, 884), False, 'from testing import fake\n'), ((931, 949), 'testing.fake.UserService', 'fake.UserService', ([], {}), '()\n', (947, 949), False, 'from testing import fake\n'), ((1002, 1021), 'testing.fake.IssueService', 'fake.IssueService', ([], {}), '()\n', (1019, 1021), False, 'from testing import fake\n'), ((1077, 1099), 'testing.fake.FeaturesService', 'fake.FeaturesService', ([], {}), '()\n', (1097, 1099), False, 'from testing import fake\n'), ((1623, 1665), 'framework.permissions.GetPermissions', 'permissions.GetPermissions', (['None', '{}', 'None'], {}), '(None, {}, None)\n', (1649, 1665), False, 'from framework import permissions\n'), ((2082, 2138), 'framework.permissions.GetPermissions', 'permissions.GetPermissions', (['mr.auth.user_pb', '{111}', 'None'], {}), '(mr.auth.user_pb, {111}, None)\n', (2108, 2138), False, 'from framework import permissions\n')]
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # [Grove - 12 Key Capacitive I2C Touch Sensor V2] # (http://wiki.seeedstudio.com/Grove-12_Key_Capacitive_I2C_Touch_Sensor_V2-MPR121/) on I2C2 # [Grove – Speaker](http://wiki.seeedstudio.com/Grove-Speaker/) # on UART2 # [Grove - Chainable RGB LED X 2](http://wiki.seeedstudio.com/Grove-Chainable_RGB_LED/) # on A2 import time import wave import pyaudio from Captouch import MPR121 from RGBLed import P981X _SCALE_DEFS = [ 'do.wav', 're.wav', 'me.wav', 'fa.wav', 'so.wav', 'la.wav', 'ti.wav', 'do+.wav' ] def Play_Music(file): """Play WAV format music when the Mpr121 is pressed file:the Wav format music """ # define stream chunk chunk = 1024 # open a wav format music f = wave.open(file,"rb") # instantiate PyAudio p = pyaudio.PyAudio() #define callback function def callback(in_data, frame_count, time_info, status): data = f.readframes(frame_count) #the function will return pyaudio.paContinue when the Mpr121 is pressed if Mpr121Data[0] != 0: return (data,pyaudio.paContinue) return (data,pyaudio.paComplete) # open stream stream = p.open(format = p.get_format_from_width(f.getsampwidth()), channels = f.getnchannels(), rate = f.getframerate(), output = True, stream_callback=callback) #Start stream stream.start_stream() #Enter the while loop,when the Mpr121 is pressed while stream.is_active(): global Mpr121Data Mpr121Data = Mpr121.get() time.sleep(0.01) # stop stream stream.stop_stream() stream.close() f.close() # close PyAudio p.terminate() def main(): LED = P981X() global Mpr121 Mpr121 = MPR121() while True: GetMpr121 = Mpr121.get() Mpr121Result = GetMpr121[1] #Mpr121Result isn't empty when the Mpr121 is pressed if any(Mpr121Result) != False: #Check the which one button is pressed on Mpr12 then play #different music and turn on LED that will display different color for i in range(12): if(Mpr121Result[i] == 1): if i > 3 : LED.set(0,((i-4)&0x01)*255,((i-4)&0x02)*255,((i-4)&0x04)*255) LED.set(1,((i-4)&0x01)*255,((i-4)&0x02)*255,((i-4)&0x04)*255) Play_Music("/tmp/scale/%s"%_SCALE_DEFS[i-4]) else : LED.set(0,(i&0x01)*255,(i&0x02)*255,(i&0x04)*255) LED.set(1,(i&0x01)*255,(i&0x02)*255,(i&0x04)*255) if i == 0: LED.set(0,50,50,200) LED.set(1,50,50,200) Play_Music("/tmp/scale/%s"%_SCALE_DEFS[i]) else : LED.set(0,0,0,0) LED.set(1,0,0,0) time.sleep(0.05) if __name__ == "__main__": main()
[ "wave.open", "time.sleep", "Captouch.MPR121", "RGBLed.P981X", "pyaudio.PyAudio" ]
[((782, 803), 'wave.open', 'wave.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (791, 803), False, 'import wave\n'), ((837, 854), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (852, 854), False, 'import pyaudio\n'), ((1847, 1854), 'RGBLed.P981X', 'P981X', ([], {}), '()\n', (1852, 1854), False, 'from RGBLed import P981X\n'), ((1886, 1894), 'Captouch.MPR121', 'MPR121', ([], {}), '()\n', (1892, 1894), False, 'from Captouch import MPR121\n'), ((1692, 1708), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1702, 1708), False, 'import time\n'), ((3069, 3085), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3079, 3085), False, 'import time\n')]
import glob, codecs #configfiles = glob.glob(r'C:\Users\sam\Desktop\**\*.txt', recursive=True) #fn="ojcpp/company/amazon_memo.txt" for fn in glob.glob("ojcpp/company/*.cpp",recursive=True): print(fn) ret=True try: data = open(fn, "r", encoding="gbk").read() open(fn, "w", encoding="utf-8").write(data) except: ret=False """ if ret: continue try: data = open(fn, "r", encoding="utf-8").read() open(fn, "w", encoding="utf-8").write(data) except: pass """
[ "glob.glob" ]
[((149, 197), 'glob.glob', 'glob.glob', (['"""ojcpp/company/*.cpp"""'], {'recursive': '(True)'}), "('ojcpp/company/*.cpp', recursive=True)\n", (158, 197), False, 'import glob, codecs\n')]
from pathlib import Path from typing import List import torch from transformers import AutoModel, AutoTokenizer from interprete.src.models.model import Model, ModelOutput from interprete.src.models.utils import to_cpu class GraphCodeBertModel(Model): def __init__(self, args=[], type="GraphCodeBert"): super().__init__(type) self.args = args path = Path(__file__).parent.absolute().resolve() print(path) tokenizer = AutoTokenizer.from_pretrained(f"{path}/graphcodebert-base") model = AutoModel.from_pretrained(f"{path}/graphcodebert-base") self.model = model self.tokenizer = tokenizer print("loaded CodeBert model and tokenizer") if torch.cuda.is_available(): self.model = self.model.cuda() @staticmethod def get_model(model_type, **kwargs): return GraphCodeBertModel(type=model_type) def bpe(self, code: str, max_positions=512) -> List[str]: inp = code inp = inp.replace("▁", "_") tokens = self.tokenizer.tokenize(inp) if len(tokens) > max_positions - 2: tokens = tokens[: max_positions - 2] tokens = list(map(lambda x: x.replace("Δ ", "▁"), tokens)) tokens[ 0 ] = f"▁{tokens[0]}" # first subtoken was not prefixed with special BPE symbol return tokens def __call__(self, bpe: List[str]): """ Returns: dict """ code = "".join(bpe).replace("▁", " ").strip() # "▁" symbol in code resulted in ['Δ ', 'Γ’', 'ΔΈ', 'Δ£'] tokens # inp = "hello i am Sergey" code_tokens = self.tokenizer.tokenize(code) tokens = [self.tokenizer.cls_token] + code_tokens + [self.tokenizer.sep_token] tokens_ids = torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens))[None, :] # print("token_ids", tokens_ids.shape) max_idx = 512 if tokens_ids.shape[-1] > max_idx: # if input is too long, crop it tokens_ids = torch.cat( ( tokens_ids[..., :1], tokens_ids[..., 1 : max_idx - 1], tokens_ids[..., -1:], ), dim=-1, ) if torch.cuda.is_available(): tokens_ids = tokens_ids.cuda() # simply generate one code span with torch.no_grad(): generated_ids = self.model.forward( torch.tensor(tokens_ids), output_hidden_states=True ) bpes = list( map( lambda x: x.replace("Δ ", "▁"), self.tokenizer.convert_ids_to_tokens( tokens_ids[0], skip_special_tokens=True ), ) ) bpes[ 0 ] = f"▁{bpes[0]}" # in hugginface first subtoken was not prefixed with special BPE symbol features = to_cpu( list(list(map(lambda x: x[:, 1:-1, :], generated_ids.hidden_states))) ) tokens = tokens_ids[0].cpu().clone().numpy() tokens = tokens[1:-1] return ModelOutput(bpes, tokens, features) @staticmethod def get_embeddings_info() -> List[str]: """get identifiers for all embedding layer e.g. e1, e2, e3, ..., d1, d2, d3, ...""" return [f"e{i}" for i in range(13)] + [] # no decoder values
[ "transformers.AutoModel.from_pretrained", "pathlib.Path", "interprete.src.models.model.ModelOutput", "torch.tensor", "torch.cuda.is_available", "transformers.AutoTokenizer.from_pretrained", "torch.no_grad", "torch.cat" ]
[((464, 523), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['f"""{path}/graphcodebert-base"""'], {}), "(f'{path}/graphcodebert-base')\n", (493, 523), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((540, 595), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['f"""{path}/graphcodebert-base"""'], {}), "(f'{path}/graphcodebert-base')\n", (565, 595), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((723, 748), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (746, 748), False, 'import torch\n'), ((2271, 2296), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2294, 2296), False, 'import torch\n'), ((2036, 2135), 'torch.cat', 'torch.cat', (['(tokens_ids[..., :1], tokens_ids[..., 1:max_idx - 1], tokens_ids[..., -1:])'], {'dim': '(-1)'}), '((tokens_ids[..., :1], tokens_ids[..., 1:max_idx - 1], tokens_ids[\n ..., -1:]), dim=-1)\n', (2045, 2135), False, 'import torch\n'), ((2395, 2410), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2408, 2410), False, 'import torch\n'), ((3200, 3235), 'interprete.src.models.model.ModelOutput', 'ModelOutput', (['bpes', 'tokens', 'features'], {}), '(bpes, tokens, features)\n', (3211, 3235), False, 'from interprete.src.models.model import Model, ModelOutput\n'), ((2476, 2500), 'torch.tensor', 'torch.tensor', (['tokens_ids'], {}), '(tokens_ids)\n', (2488, 2500), False, 'import torch\n'), ((381, 395), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (385, 395), False, 'from pathlib import Path\n')]
import pygame from src.entities import Player from src.utils import DialogBox from src.map import MapManager class Game: def __init__(self): super().__init__() # creer la fenetre du jeu self.screen = pygame.display.set_mode((1280, 720)) pygame.display.set_caption("Irale - Le jeux video") self.running = True # generer le joueur self.player = Player() self.map_manager = MapManager(self.screen, self.player) self.dialog_box = DialogBox() # definir control def handle_input(self): pressed = pygame.key.get_pressed() up = pressed[pygame.K_z] down = pressed[pygame.K_s] right = pressed[pygame.K_d] left = pressed[pygame.K_q] esc = pressed[pygame.K_ESCAPE] if up & right: self.player.move_up() self.player.move_right() elif up & left: self.player.move_up() self.player.move_left() elif down & right: self.player.move_down() self.player.move_right() elif down & left: self.player.move_down() self.player.move_left() elif down: self.player.move_down() elif right: self.player.move_right() elif left: self.player.move_left() elif up: self.player.move_up() elif esc: self.running = False def update(self): self.map_manager.update() def run(self): clock = pygame.time.Clock() # boucle du jeu while self.running: self.player.save_location() self.handle_input() self.update() self.map_manager.draw() self.dialog_box.render(self.screen) pygame.display.flip() for event in pygame.event.get(): if event.type == pygame.QUIT: self.running = False elif event.type == pygame.KEYDOWN: if event.key == pygame.K_e: self.map_manager.check_npc_collisions(self.dialog_box) clock.tick(60)
[ "src.utils.DialogBox", "pygame.event.get", "pygame.display.set_mode", "pygame.display.flip", "pygame.time.Clock", "pygame.key.get_pressed", "src.entities.Player", "pygame.display.set_caption", "src.map.MapManager" ]
[((234, 270), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 720)'], {}), '((1280, 720))\n', (257, 270), False, 'import pygame\n'), ((279, 330), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Irale - Le jeux video"""'], {}), "('Irale - Le jeux video')\n", (305, 330), False, 'import pygame\n'), ((410, 418), 'src.entities.Player', 'Player', ([], {}), '()\n', (416, 418), False, 'from src.entities import Player\n'), ((446, 482), 'src.map.MapManager', 'MapManager', (['self.screen', 'self.player'], {}), '(self.screen, self.player)\n', (456, 482), False, 'from src.map import MapManager\n'), ((509, 520), 'src.utils.DialogBox', 'DialogBox', ([], {}), '()\n', (518, 520), False, 'from src.utils import DialogBox\n'), ((590, 614), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (612, 614), False, 'import pygame\n'), ((1542, 1561), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1559, 1561), False, 'import pygame\n'), ((1809, 1830), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1828, 1830), False, 'import pygame\n'), ((1857, 1875), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1873, 1875), False, 'import pygame\n')]
from gym.envs.registration import register from malpi.dkwm.gym_envs.dkwm_env import DKWMEnv register( id='dkwm-v0', entry_point='malpi.dkwm.gym_envs:DKWMEnv', )
[ "gym.envs.registration.register" ]
[((94, 159), 'gym.envs.registration.register', 'register', ([], {'id': '"""dkwm-v0"""', 'entry_point': '"""malpi.dkwm.gym_envs:DKWMEnv"""'}), "(id='dkwm-v0', entry_point='malpi.dkwm.gym_envs:DKWMEnv')\n", (102, 159), False, 'from gym.envs.registration import register\n')]
#!/bin/python3 import sys import math def isPrime(n): if n==2 or n==3 or n==5 or n==7 or n==11 or n==13 or n==13 or n==17 or n==19: return True upperLimit = math.ceil(math.sqrt(n))+1 for i in range(2,upperLimit): if n%i==0: return False return True t = int(input().strip()) for a0 in range(t): n = int(input().strip()) if isPrime(n): print(n) else: factors = [] upperLimit = math.ceil(math.sqrt(n)) + 1 for i in range(2,upperLimit): if n%i==0: factors.append(i) if i!=(n//i): factors.append(n//i) sorted(factors) for j in range(len(factors)-1,-1,-1): if isPrime(factors[j]): print(factors[j]) break
[ "math.sqrt" ]
[((185, 197), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (194, 197), False, 'import math\n'), ((468, 480), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (477, 480), False, 'import math\n')]
"""Support for OPCUA""" import logging import voluptuous as vol from opcua import Client, ua from homeassistant.const import ( ATTR_STATE, CONF_URL, CONF_NAME, CONF_TIMEOUT, CONF_USERNAME, CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import load_platform from .const import ( OPCUA_DOMAIN as DOMAIN, DEFAULT_NAME, DEFAULT_TIMEOUT, CONF_SESSIONTIMEOUT, CONF_SECURETIMEOUT, CONF_SECURITYSTRING, CONF_URI, SERVICE_SET_VALUE, SERVICE_SET_ATTRIBUTE, SERVICE_READ_VALUE, SERVICE_CONNECT, SERVICE_CLOSE, ATTR_HUB, ATTR_NODEID, ATTR_VALUE, ) _LOGGER = logging.getLogger(__name__) BASE_SCHEMA = vol.Schema( { vol.Required(CONF_URL): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int, vol.Optional(CONF_SESSIONTIMEOUT, default=3600000): cv.positive_int, vol.Optional(CONF_SECURETIMEOUT, default=600000): cv.positive_int, vol.Optional(CONF_USERNAME, default=None): vol.Any(None, cv.string), vol.Optional(CONF_PASSWORD, default=None): vol.Any(None, cv.string), vol.Optional(CONF_SECURITYSTRING, default=None): vol.Any(None, cv.string), vol.Optional(CONF_URI, default=None): vol.Any(None, cv.string), } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Any(BASE_SCHEMA), ], ), }, extra=vol.ALLOW_EXTRA, ) SERVICE_SET_VALUE_SCHEMA = vol.Schema( { vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string, vol.Required(ATTR_NODEID): cv.string, vol.Required(ATTR_VALUE): vol.Any( float, int, str, cv.byte, cv.boolean, cv.time, ), } ) SERVICE_SET_ATTRIBUTE_SCHEMA = vol.Schema( { vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string, vol.Required(ATTR_NODEID): cv.string, vol.Required(ATTR_VALUE): vol.Any( float, int, str, cv.byte, cv.boolean, cv.time, ), } ) SERVICE_READ_VALUE_SCHEMA = vol.Schema( { vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string, vol.Required(ATTR_NODEID): cv.string, } ) SERVICE_CONNECT_SCHEMA = vol.Schema( { vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string, } ) def setup(hass, config): hass.data[DOMAIN] = hub_collect = {} for conf_hub in config[DOMAIN]: # create an instance of a opcua hub connection, i.e. to a opcua server hub_collect[conf_hub[CONF_NAME]] = OpcUAHub(conf_hub) # Return boolean to indicate that initialization was successful. def stop_opcua(event): """Stop opcua service.""" for client in hub_collect.values(): client.close() def start_opcua(event): """Start opcua service.""" for client in hub_collect.values(): client.connect() def set_value(service): """set opcua nodeid values.""" hub = service.data[ATTR_HUB] value = service.data[ATTR_VALUE] nodeid = service.data[ATTR_NODEID] hub_collect[hub].setvalues(nodeid, value) def set_attribute(service): """set opcua nodeid values.""" hub = service.data[ATTR_HUB] value = service.data[ATTR_VALUE] nodeid = service.data[ATTR_NODEID] hub_collect[hub].setattribute(nodeid, value) def read_value(service): """read opcua nodeid values.""" hub = service.data[ATTR_HUB] nodeid = service.data[ATTR_NODEID] #Trying to determine if we can even access this data gathered return hub_collect[hub].readvalues(nodeid) def connect(service): """ should be a called service to reconnect in the event the opcua target needs to restart and the socket drops. self.connect() """ hub = service.data[ATTR_HUB] hub_collect[hub].connect() def close(service): """ should be a called service to close the opcua connection gracefully. """ hub = service.data[ATTR_HUB] hub_collect[hub].close() # do not wait for EVENT_HOMEASSISTANT_START, setup opcua connection properties now for client in hub_collect.values(): client.setup() # register function to gracefully stop opcua connections hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_opcua) # register function to start opcua connections on HA statup hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_opcua) # Register service to write back values to opcua nodeids hass.services.register( DOMAIN, SERVICE_SET_VALUE, set_value, schema=SERVICE_SET_VALUE_SCHEMA, ) # Register service to write back values to opcua nodeids via set attributes hass.services.register( DOMAIN, SERVICE_SET_ATTRIBUTE, set_attribute, schema=SERVICE_SET_ATTRIBUTE_SCHEMA, ) # Register service to read opcua nodeids values on the fly hass.services.register( DOMAIN, SERVICE_READ_VALUE, read_value, schema=SERVICE_READ_VALUE_SCHEMA, ) # Register services for opcua target reconnection hass.services.register( DOMAIN, SERVICE_CONNECT, connect, schema=SERVICE_CONNECT_SCHEMA, ) # Register services for opcua connection closing hass.services.register( DOMAIN, SERVICE_CLOSE, close, schema=SERVICE_CONNECT_SCHEMA, ) return True class OpcUAHub: """ wrapper class for opcua.""" def __init__(self, client_config): """Initialize the opcua hub.""" # Set configuration variables self._client = None self._config_url = client_config[CONF_URL] self._config_name = client_config[CONF_NAME] self._config_timeout = client_config[CONF_TIMEOUT] self._config_sessiontimeout = client_config[CONF_SESSIONTIMEOUT] self._config_securetimeout = client_config[CONF_SECURETIMEOUT] self._config_username = client_config[CONF_USERNAME] self._config_password = client_config[CONF_PASSWORD] self._config_security = client_config[CONF_SECURITYSTRING] self._application_uri = client_config[CONF_URI] @property def name(self): """Return the name of this hub.""" return self._config_name def setup(self): """Set up opcua client.""" _LOGGER.info('Setting up Client parameters for: '+self._config_name) self._client = Client(self._config_url) # Setup timeouts self._client.timeout = self._config_timeout self._client.session_timeout = self._config_sessiontimeout self._client.secure_channel_timeout = self._config_securetimeout # setup URI and Security Type if self._application_uri is not None: self._client.application_uri = self._application_uri if self._config_security is not None: self._client.set_security_string(self._config_security) # Client Auth Setup if self._config_username is not None: self._client._username = self._config_username if self._config_password is not None: self._client._password = self._config_password # Attempt Device Connection # Wrapped in "try" due to socket critical error when OPCUA server rejects/tears down # the socket https://github.com/minix1234/hacore_opcua/issues/1 # Moved initial connection attemp to start_opcua #try: # self.connect() #except Exception as e: # _LOGGER.error(e) def close(self): """Disconnect client.""" try: self._client.disconnect() except Exception as e: _LOGGER.error(self._config_name +': Channel Close Error: '+ str(e)) def connect(self): """Connect client.""" try: self._client.connect() except Exception as e: _LOGGER.error(self._config_name +': Connection Error: '+ str(e)) def readvalues(self, nodeid): try: return self._client.get_node(nodeid).get_value() except Exception as e: _LOGGER.error(str(nodeid) +', Read Value Error: '+ str(e)) def setvalues(self, nodeid, value): try: node = self._client.get_node(nodeid) uatype = node.get_data_value().Value.VariantType node.set_value(value, uatype) except Exception as e: _LOGGER.error('Error: ' + str(e) + ' encountered when attempting to write a value of: '+str(value) +' to nodeid: '+ str(nodeid)) def setattribute(self, nodeid, value): try: node = self._client.get_node(nodeid) uatype = node.get_data_value().Value.VariantType datavalue = ua.DataValue(ua.Variant(value, uatype)) node.set_attribute(ua.AttributeIds.Value, datavalue) except Exception as e: _LOGGER.error('Error: ' + str(e) + ' encountered when attempting to write an attribute.ValueIds.Value of: '+str(value) +' to nodeid: '+ str(nodeid))
[ "logging.getLogger", "voluptuous.Required", "voluptuous.Any", "opcua.Client", "opcua.ua.Variant", "voluptuous.Optional" ]
[((758, 785), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (775, 785), False, 'import logging\n'), ((827, 849), 'voluptuous.Required', 'vol.Required', (['CONF_URL'], {}), '(CONF_URL)\n', (839, 849), True, 'import voluptuous as vol\n'), ((870, 915), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (882, 915), True, 'import voluptuous as vol\n'), ((936, 987), 'voluptuous.Optional', 'vol.Optional', (['CONF_TIMEOUT'], {'default': 'DEFAULT_TIMEOUT'}), '(CONF_TIMEOUT, default=DEFAULT_TIMEOUT)\n', (948, 987), True, 'import voluptuous as vol\n'), ((1014, 1064), 'voluptuous.Optional', 'vol.Optional', (['CONF_SESSIONTIMEOUT'], {'default': '(3600000)'}), '(CONF_SESSIONTIMEOUT, default=3600000)\n', (1026, 1064), True, 'import voluptuous as vol\n'), ((1091, 1139), 'voluptuous.Optional', 'vol.Optional', (['CONF_SECURETIMEOUT'], {'default': '(600000)'}), '(CONF_SECURETIMEOUT, default=600000)\n', (1103, 1139), True, 'import voluptuous as vol\n'), ((1166, 1207), 'voluptuous.Optional', 'vol.Optional', (['CONF_USERNAME'], {'default': 'None'}), '(CONF_USERNAME, default=None)\n', (1178, 1207), True, 'import voluptuous as vol\n'), ((1243, 1284), 'voluptuous.Optional', 'vol.Optional', (['CONF_PASSWORD'], {'default': 'None'}), '(CONF_PASSWORD, default=None)\n', (1255, 1284), True, 'import voluptuous as vol\n'), ((1320, 1367), 'voluptuous.Optional', 'vol.Optional', (['CONF_SECURITYSTRING'], {'default': 'None'}), '(CONF_SECURITYSTRING, default=None)\n', (1332, 1367), True, 'import voluptuous as vol\n'), ((1403, 1439), 'voluptuous.Optional', 'vol.Optional', (['CONF_URI'], {'default': 'None'}), '(CONF_URI, default=None)\n', (1415, 1439), True, 'import voluptuous as vol\n'), ((1209, 1233), 'voluptuous.Any', 'vol.Any', (['None', 'cv.string'], {}), '(None, cv.string)\n', (1216, 1233), True, 'import voluptuous as vol\n'), ((1286, 1310), 'voluptuous.Any', 'vol.Any', (['None', 'cv.string'], {}), '(None, cv.string)\n', (1293, 1310), True, 'import voluptuous as vol\n'), ((1369, 1393), 'voluptuous.Any', 'vol.Any', (['None', 'cv.string'], {}), '(None, cv.string)\n', (1376, 1393), True, 'import voluptuous as vol\n'), ((1441, 1465), 'voluptuous.Any', 'vol.Any', (['None', 'cv.string'], {}), '(None, cv.string)\n', (1448, 1465), True, 'import voluptuous as vol\n'), ((1731, 1775), 'voluptuous.Optional', 'vol.Optional', (['ATTR_HUB'], {'default': 'DEFAULT_NAME'}), '(ATTR_HUB, default=DEFAULT_NAME)\n', (1743, 1775), True, 'import voluptuous as vol\n'), ((1796, 1821), 'voluptuous.Required', 'vol.Required', (['ATTR_NODEID'], {}), '(ATTR_NODEID)\n', (1808, 1821), True, 'import voluptuous as vol\n'), ((1842, 1866), 'voluptuous.Required', 'vol.Required', (['ATTR_VALUE'], {}), '(ATTR_VALUE)\n', (1854, 1866), True, 'import voluptuous as vol\n'), ((1868, 1922), 'voluptuous.Any', 'vol.Any', (['float', 'int', 'str', 'cv.byte', 'cv.boolean', 'cv.time'], {}), '(float, int, str, cv.byte, cv.boolean, cv.time)\n', (1875, 1922), True, 'import voluptuous as vol\n'), ((2074, 2118), 'voluptuous.Optional', 'vol.Optional', (['ATTR_HUB'], {'default': 'DEFAULT_NAME'}), '(ATTR_HUB, default=DEFAULT_NAME)\n', (2086, 2118), True, 'import voluptuous as vol\n'), ((2139, 2164), 'voluptuous.Required', 'vol.Required', (['ATTR_NODEID'], {}), '(ATTR_NODEID)\n', (2151, 2164), True, 'import voluptuous as vol\n'), ((2185, 2209), 'voluptuous.Required', 'vol.Required', (['ATTR_VALUE'], {}), '(ATTR_VALUE)\n', (2197, 2209), True, 'import voluptuous as vol\n'), ((2211, 2265), 'voluptuous.Any', 'vol.Any', (['float', 'int', 'str', 'cv.byte', 'cv.boolean', 'cv.time'], {}), '(float, int, str, cv.byte, cv.boolean, cv.time)\n', (2218, 2265), True, 'import voluptuous as vol\n'), ((2413, 2457), 'voluptuous.Optional', 'vol.Optional', (['ATTR_HUB'], {'default': 'DEFAULT_NAME'}), '(ATTR_HUB, default=DEFAULT_NAME)\n', (2425, 2457), True, 'import voluptuous as vol\n'), ((2478, 2503), 'voluptuous.Required', 'vol.Required', (['ATTR_NODEID'], {}), '(ATTR_NODEID)\n', (2490, 2503), True, 'import voluptuous as vol\n'), ((2576, 2620), 'voluptuous.Optional', 'vol.Optional', (['ATTR_HUB'], {'default': 'DEFAULT_NAME'}), '(ATTR_HUB, default=DEFAULT_NAME)\n', (2588, 2620), True, 'import voluptuous as vol\n'), ((6951, 6975), 'opcua.Client', 'Client', (['self._config_url'], {}), '(self._config_url)\n', (6957, 6975), False, 'from opcua import Client, ua\n'), ((1593, 1613), 'voluptuous.Any', 'vol.Any', (['BASE_SCHEMA'], {}), '(BASE_SCHEMA)\n', (1600, 1613), True, 'import voluptuous as vol\n'), ((9293, 9318), 'opcua.ua.Variant', 'ua.Variant', (['value', 'uatype'], {}), '(value, uatype)\n', (9303, 9318), False, 'from opcua import Client, ua\n')]
import threading import traceback import logging import requests from json.decoder import JSONDecodeError from ping3 import ping logging.basicConfig(level=logging.INFO) GATEWAY_IP = "192.168.100.1" STATIC_IP_MIN = 200 STATIC_IP_MAX = 254 lastDot = GATEWAY_IP.rfind(".") ipAddressBase = GATEWAY_IP[0:lastDot+1] threadLock = threading.Lock() availableForStaticIp = [] dchpNeedToReconfigure = [] def registerShellyFound(outputFile, ip, mac = "", type = "", ipv4_method ="", name = ""): threadLock.acquire() try: outputFile.write(ip + '\t' + mac + '\t' + type + '\t' + ipv4_method + '\t' + str(name) + '\n') if ipv4_method == "dhcp": dchpNeedToReconfigure.append(ip) finally: threadLock.release() def detectDevice(ipLast): ip = ipAddressBase + str(ipLast) if STATIC_IP_MIN < ipLast & ipLast < STATIC_IP_MAX: logging.debug('No Shelly on IP %s, pinging IP to check availability...', ip) pingResult = ping(ip) if pingResult == False: logging.debug("No device on IP %s, registering as available static IP", ip) availableForStaticIp.append(ipLast) else: logging.debug('Network device detected on IP %s, ping in %f sec.', ip, pingResult) return def detectShelly(ipLast, outputFile): try: ip = ipAddressBase + str(ipLast) logging.debug('Checking for Shelly at IP %s...', ip) url = "http://" + ip + "/settings" response = requests.get(url, timeout=10) if response.status_code != 200: detectDevice(ipLast) return json = response.json() device = json["device"] cloud = json["cloud"] cloud_enabled = cloud["enabled"] name = json["name"] mac = device["mac"] type = device["type"] wifi_sta = json["wifi_sta"] ipv4_method = wifi_sta["ipv4_method"] logging.info("Found: ip=%s, mac=%s, type=%s, name=%s, cloud=%d, ipv4_method=%s", ip, mac, type, name, cloud_enabled, ipv4_method) registerShellyFound(outputFile, ip, mac, type, ipv4_method, name) except JSONDecodeError: return except AttributeError: return except requests.ConnectionError as error: detectDevice(ipLast) return def configureStaticIp(currentIp, newIp, gatewayIp): try: # example: http://192.168.100.165/settings/sta?ipv4_method=static&ip=192.168.100.208&netmask=255.255.255.0&gateway=192.168.100.1 logging.info("Reconfiguring Shelly with DHCP on IP %s to new IP %s with gateway %s", currentIp, newIp, gatewayIp) url = "http://" + currentIp + "/settings/sta?ipv4_method=static&ip=" + newIp + "&netmask=255.255.255.0&gateway=" + gatewayIp response = requests.get(url, timeout=5) if response.status_code != 200: logging.error("Error reconfiguring %s error code %d", currentIp, response.status_code) return except Exception as e: logging.error(traceback.format_exc()) return def scanForShellys(): ipTableFile = open("shelly-ip-table.txt", "w", encoding="utf-8") threads = [] for c in range(2, 254): t = threading.Thread(target=detectShelly, args=(c, ipTableFile)) threads.append(t) t.start() for t in threads: t.join() ipTableFile.close() availableForStaticIp.sort() dchpNeedToReconfigure.sort() def reconfigureDhcpShellys(): for ipToReconfigure in dchpNeedToReconfigure: if availableForStaticIp.count == 0: logging.error("No more static IP slot available for %s. Stopping.", ipToReconfigure) break staticIpLast = availableForStaticIp.pop(0) staticIp = ipAddressBase + str(staticIpLast) configureStaticIp(ipToReconfigure, staticIp, GATEWAY_IP) scanForShellys() reconfigureDhcpShellys()
[ "logging.basicConfig", "traceback.format_exc", "logging.debug", "threading.Lock", "ping3.ping", "requests.get", "threading.Thread", "logging.info", "logging.error" ]
[((130, 169), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (149, 169), False, 'import logging\n'), ((326, 342), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (340, 342), False, 'import threading\n'), ((873, 949), 'logging.debug', 'logging.debug', (['"""No Shelly on IP %s, pinging IP to check availability..."""', 'ip'], {}), "('No Shelly on IP %s, pinging IP to check availability...', ip)\n", (886, 949), False, 'import logging\n'), ((971, 979), 'ping3.ping', 'ping', (['ip'], {}), '(ip)\n', (975, 979), False, 'from ping3 import ping\n'), ((1373, 1425), 'logging.debug', 'logging.debug', (['"""Checking for Shelly at IP %s..."""', 'ip'], {}), "('Checking for Shelly at IP %s...', ip)\n", (1386, 1425), False, 'import logging\n'), ((1488, 1517), 'requests.get', 'requests.get', (['url'], {'timeout': '(10)'}), '(url, timeout=10)\n', (1500, 1517), False, 'import requests\n'), ((1924, 2058), 'logging.info', 'logging.info', (['"""Found: ip=%s, mac=%s, type=%s, name=%s, cloud=%d, ipv4_method=%s"""', 'ip', 'mac', 'type', 'name', 'cloud_enabled', 'ipv4_method'], {}), "('Found: ip=%s, mac=%s, type=%s, name=%s, cloud=%d, ipv4_method=%s'\n , ip, mac, type, name, cloud_enabled, ipv4_method)\n", (1936, 2058), False, 'import logging\n'), ((2510, 2632), 'logging.info', 'logging.info', (['"""Reconfiguring Shelly with DHCP on IP %s to new IP %s with gateway %s"""', 'currentIp', 'newIp', 'gatewayIp'], {}), "(\n 'Reconfiguring Shelly with DHCP on IP %s to new IP %s with gateway %s',\n currentIp, newIp, gatewayIp)\n", (2522, 2632), False, 'import logging\n'), ((2776, 2804), 'requests.get', 'requests.get', (['url'], {'timeout': '(5)'}), '(url, timeout=5)\n', (2788, 2804), False, 'import requests\n'), ((3202, 3262), 'threading.Thread', 'threading.Thread', ([], {'target': 'detectShelly', 'args': '(c, ipTableFile)'}), '(target=detectShelly, args=(c, ipTableFile))\n', (3218, 3262), False, 'import threading\n'), ((1024, 1099), 'logging.debug', 'logging.debug', (['"""No device on IP %s, registering as available static IP"""', 'ip'], {}), "('No device on IP %s, registering as available static IP', ip)\n", (1037, 1099), False, 'import logging\n'), ((1174, 1260), 'logging.debug', 'logging.debug', (['"""Network device detected on IP %s, ping in %f sec."""', 'ip', 'pingResult'], {}), "('Network device detected on IP %s, ping in %f sec.', ip,\n pingResult)\n", (1187, 1260), False, 'import logging\n'), ((2857, 2948), 'logging.error', 'logging.error', (['"""Error reconfiguring %s error code %d"""', 'currentIp', 'response.status_code'], {}), "('Error reconfiguring %s error code %d', currentIp, response.\n status_code)\n", (2870, 2948), False, 'import logging\n'), ((3578, 3666), 'logging.error', 'logging.error', (['"""No more static IP slot available for %s. Stopping."""', 'ipToReconfigure'], {}), "('No more static IP slot available for %s. Stopping.',\n ipToReconfigure)\n", (3591, 3666), False, 'import logging\n'), ((3012, 3034), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3032, 3034), False, 'import traceback\n')]
#! /usr/bin/python3 # # Copyright (c) 2020 <NAME> <<EMAIL>> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import time import sys import fcntl import os import math from usb_2020 import * def toContinue(): answer = input('Continue [yY]? ') if (answer == 'y' or answer == 'Y'): return True else: return False def main(): # initalize the class try: usb2020 = usb_2020() print("USB-2020 device found.") except: print("No USB-2020 device found.") return # print out the calibration tables print('\nCalibration Analog Input Table:') for channel in range(usb2020.NCHAN): for gain in range(usb2020.NGAIN): print(' Channel =', channel, ' Range = ',gain, \ 'Slope =',format(usb2020.table_AIn[channel][gain].slope,'.5f'),\ 'Intercept =',format(usb2020.table_AIn[channel][gain].intercept,'5f')) # print last known calibration date: mdate = usb2020.CalDate() print('\nMFG Calibration date: ', mdate) print("wMaxPacketSize = ", usb2020.wMaxPacketSize) while True: print("\nUSB-2020 Testing") print("------------------") print("Hit 'b' to blink LED.") print("Hit 'B' to test BURSTIO.") print("Hit 'C' to test continous sampling") print("Hit 'd' to read/write digital port.") print("Hit 'e' to exit.") print("Hit 'i' to test analog input. (differential)") print("Hit 'I' to test analog input scan.") print("Hit 'M' for information.") print("Hit 'T' to get temperature") print("Hit 'r' to reset the device.") print("Hit 'S' to get status") print("Hit 's' to get serial number.") print("Hit 'v' to get version numbers") ch = input('\n') if ch == 'b': count = int(input('Enter number of times to blink: ')) usb2020.BlinkLED(count) elif ch == 'e': usb2020.udev.close() exit(0) elif ch == 'd': print("Testing Digital I/O ...") print("connect pins DIO[0-3] <--> DIO[4-7]") usb2020.DTristateW(0xf0) print("Digital Port Tristate Register = ", hex(usb2020.DTristateR())) while True: value = int(input('Enter a byte number [0-0xf]: '),16) & 0xf usb2020.DLatchW(value) value2 = usb2020.DLatchR() value3 = usb2020.DPort() >> 4 print("The number you entered: ", hex(value3), " Latched value: ", hex(value2)) if toContinue() != True: break elif ch == 'i': channel = int(input('Input channel [0-1]: ')) gain = int(input('Enter gain. 1 = +/-10V 2 = +/- 5V 3 = +/- 2V 4 = +/- 1V: ')) if gain == 1: gain = usb2020.BP_10V elif gain == 2: gain = usb2020.BP_5V elif gain == 3: gain = usb2020.BP_2V elif gain == 4: gain = usb2020.BP_1V else: print('Unknown gain choice.') break usb2020.AInConfigW(0, channel, gain, True) while True: try: value = usb2020.AIn(channel, gain) except ValueError as e: print(e) break print("AIn: %#x volt = %f" % (value, usb2020.volts(gain, value))) if toContinue() != True: break elif ch == 'I': print('Testing USB-2020 Analog Input Scan.') usb2020.AInScanStop() usb2020.AInScanClearFIFO() count = int(input('Enter total number of scans: ')) nRepeat = int(input('Enter number of repeats: ')) gain = int(input('Enter gain. 1 = +/-10V 2 = +/- 5V 3 = +/- 2V 4 = +/- 1V: ')) frequency = float(input('Enter sampling frequency [Hz]: ')) nChan = int(input('Enter number of channels [1-2]: ')) for channel in range(nChan): if gain == 1: gain = usb2020.BP_10V elif gain == 2: gain = usb2020.BP_5V elif gain == 3: gain = usb2020.BP_2V elif gain == 4: gain = usb2020.BP_1V else: print('Unknown gain choice.') break usb2020.AInConfigW(channel, channel, gain) usb2020.AInConfigW(nChan-1, nChan-1, gain, True) for repeat in range(nRepeat): print('\n\n---------------------------------------') print('repeat: %d' % (repeat)) # mode = usb2020.VOLTAGE mode = 0 options = 0 usb2020.AInScanStart(count, 0, frequency, options, mode) data = usb2020.AInScanRead() print('Number of samples read = %d (should be %d)' % (len(data), count*nChan)) for i in range(count): print("%6d" % (i), end ='') for j in range(nChan): k = i*nChan + j if mode & usb2020.VOLTAGE: # data returned in volts print(", %8.4lf V" % data[k], end='') else: if data[k] >= 0xffd: print("DAC is saturated at +FS") elif data[k] <= 0x30: print("DAC is saturated at -FS") else: data[k] = int(round(data[k]*usb2020.table_AIn[j][gain].slope + usb2020.table_AIn[j][gain].intercept)) print(", %8.4lf V" % usb2020.volts(gain, data[k]), end='') print("") print("\n---------------------------------------\n") usb2020.AInScanStop() usb2020.AInScanClearFIFO() elif ch == 'C': print("Testing USB-2020 Analog Input Scan in continuous mode 2 channels") print("Hit any key to exit") frequency = float(input("Enter desired sampling frequency (greater than 1000): ")) usb2020.AInScanStop() nScans = 0 # for conitnuous mode nChan = 2 # 2 channels gain = usb2020.BP_10V for channel in range(nChan): usb2020.AInConfigW(channel, channel, gain) usb2020.AInConfigW(nChan-1, nChan-1, gain, lastElement=True) time.sleep(1) mode = usb2020.CONTINUOUS_READOUT options = 0 usb2020.AInScanStart(nScans, 0, frequency, options, mode) flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL) fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag|os.O_NONBLOCK) i = 0 while True: raw_data = usb2020.AInScanRead() if i%100 == 0: print('Scan =', i, 'samples returned =', len(raw_data)) i += 1 c = sys.stdin.readlines() if (len(c) != 0): break fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag) usb2020.AInScanStop() usb2020.AInScanClearFIFO() elif ch == 'B': print('Testing USB-2020 Analog Input Scan BURSTIO mode') usb2020.AInScanStop() usb2020.AInScanClearFIFO() nSamples = int(input('Enter number of samples (greater than or equal to 256, less than 64 MB and a multiple of 256): ')) channel = int(input('Input channel [0-1]: ')) frequency = float(input("Enter desired sampling frequency (greater than 1000): ")) gain = int(input('Enter gain. 1 = +/-10V 2 = +/- 5V 3 = +/- 2V 4 = +/- 1V: ')) if gain == 1: gain = usb2020.BP_10V elif gain == 2: gain = usb2020.BP_5V elif gain == 3: gain = usb2020.BP_2V elif gain == 4: gain = usb2020.BP_1V else: print('Unknown gain choice.') break usb2020.AInConfigW(0, channel, gain, lastElement=True) options = usb2020.DDR_RAM # options = (0x1 << 7) print('options = ', options) mode = 0x0 usb2020.AInScanStart(nSamples, 0, frequency, options, mode) data = usb2020.AInScanRead() print('Number of samples read = %d (should be %d)' % (len(data), nSamples)) usb2020.AInScanStop() usb2020.AInScanClearFIFO() elif ch == 'M': print("Manufacturer: %s" % usb2020.getManufacturer()) print("Product: %s" % usb2020.getProduct()) print("Serial No: %s" % usb2020.getSerialNumber()) elif ch == 'e': usb2020.udev.close() exit(0) elif ch == 'r': usb2020.Reset() elif ch == 'S': print('Status =', hex(usb2020.Status())) usb2020.printStatus() elif ch == 's': print("Serial No: %s" % usb2020.getSerialNumber()) elif ch == 'T': print("Internal temperature = %.2f deg C or %.2f deg " % (usb2020.Temperature(), usb2020.Temperature()*9./5. + 32.)) elif ch == 'v': print("FPGA version %s" % (usb2020.FPGAVersion())) if __name__ == "__main__": main()
[ "sys.stdin.readlines", "fcntl.fcntl", "time.sleep" ]
[((6410, 6423), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6420, 6423), False, 'import time\n'), ((6559, 6596), 'fcntl.fcntl', 'fcntl.fcntl', (['sys.stdin', 'fcntl.F_GETFL'], {}), '(sys.stdin, fcntl.F_GETFL)\n', (6570, 6596), False, 'import fcntl\n'), ((6603, 6662), 'fcntl.fcntl', 'fcntl.fcntl', (['sys.stdin', 'fcntl.F_SETFL', '(flag | os.O_NONBLOCK)'], {}), '(sys.stdin, fcntl.F_SETFL, flag | os.O_NONBLOCK)\n', (6614, 6662), False, 'import fcntl\n'), ((6918, 6961), 'fcntl.fcntl', 'fcntl.fcntl', (['sys.stdin', 'fcntl.F_SETFL', 'flag'], {}), '(sys.stdin, fcntl.F_SETFL, flag)\n', (6929, 6961), False, 'import fcntl\n'), ((6848, 6869), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (6867, 6869), False, 'import sys\n')]
#!/bin/python3 ''' USAGE: $ python warranty.py list.txt 1. Set "apikey" to the API key obtained from Dell TechDirect. 2. Create file with serial numbers, one per line, no line endings ''' import time import requests import fileinput import sys fileName = sys.argv[1] api_url = 'https://sandbox.api.dell.com/support/assetinfo/v4/getassetwarranty' headers = {"Content-Type":"application/x-www-form-urlencoded", "apikey":"<KEY>", "accept":"application/json"} with open(fileName, 'r') as serialNumber: for line in serialNumber: payload = {"ID":""} payload['ID'] = line # Actually make the request try: r = requests.post(api_url, headers=headers, data=payload).json() print('Serial:', payload['ID'], 'Expires', r['AssetWarrantyResponse'][0]['AssetEntitlementData'][0]['EndDate']) except: print('Invalid ID:', payload['ID']) pass # Too lazy to make it actually output a csv, this is good enough time.sleep(1) # Wait a sec before doing it again, so to not hit the API too quickly
[ "requests.post", "time.sleep" ]
[((1165, 1178), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1175, 1178), False, 'import time\n'), ((760, 813), 'requests.post', 'requests.post', (['api_url'], {'headers': 'headers', 'data': 'payload'}), '(api_url, headers=headers, data=payload)\n', (773, 813), False, 'import requests\n')]
#!/usr/bin/env python try: from setuptools import setup except ImportError: from distutils.core import setup from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, "README.rst")) as f: long_description = f.read() setup( # Name of the module name="py-heat-magic", # Details version="0.0.2", description="py-heat as IPython magic", long_description=long_description, # The project's main homepage. url="https://github.com/csurfer/pyheatmagic", # Author details author="<NAME>", author_email="<EMAIL>", # License license="MIT", py_modules=["heat"], keywords="heatmap matplotlib profiling python IPython", classifiers=[ # Intended Audience. "Intended Audience :: Developers", "Intended Audience :: Education", # License. "License :: OSI Approved :: MIT License", # Project maturity. "Development Status :: 3 - Alpha", # Operating Systems. "Operating System :: POSIX", # Supported Languages. "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", # Topic tags. "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries :: Python Modules", ], install_requires=[ "numpy", "scipy", "matplotlib", "ipython", "jupyter", "pandas", "sympy", "nose", "py-heat", ], )
[ "os.path.dirname", "os.path.join", "distutils.core.setup" ]
[((312, 1251), 'distutils.core.setup', 'setup', ([], {'name': '"""py-heat-magic"""', 'version': '"""0.0.2"""', 'description': '"""py-heat as IPython magic"""', 'long_description': 'long_description', 'url': '"""https://github.com/csurfer/pyheatmagic"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'py_modules': "['heat']", 'keywords': '"""heatmap matplotlib profiling python IPython"""', 'classifiers': "['Intended Audience :: Developers', 'Intended Audience :: Education',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 3 - Alpha', 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Libraries :: Python Modules']", 'install_requires': "['numpy', 'scipy', 'matplotlib', 'ipython', 'jupyter', 'pandas', 'sympy',\n 'nose', 'py-heat']"}), "(name='py-heat-magic', version='0.0.2', description=\n 'py-heat as IPython magic', long_description=long_description, url=\n 'https://github.com/csurfer/pyheatmagic', author='<NAME>', author_email\n ='<EMAIL>', license='MIT', py_modules=['heat'], keywords=\n 'heatmap matplotlib profiling python IPython', classifiers=[\n 'Intended Audience :: Developers', 'Intended Audience :: Education',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 3 - Alpha', 'Operating System :: POSIX',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Software Development :: Libraries :: Python Modules'],\n install_requires=['numpy', 'scipy', 'matplotlib', 'ipython', 'jupyter',\n 'pandas', 'sympy', 'nose', 'py-heat'])\n", (317, 1251), False, 'from distutils.core import setup\n'), ((159, 181), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'from os import path\n'), ((242, 271), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (251, 271), False, 'from os import path\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Some simple tests for the autopaths package. You can run this file like this: ipython -i -- ~/repos/autopaths/test/test_file_path.py """ # Built-in modules # import os, inspect # Get current directory (works always) # file_name = os.path.abspath((inspect.stack()[0])[1]) this_dir = os.path.dirname(os.path.abspath(file_name)) + '/' # All our example file system # dummy_files = this_dir + 'dummy_file_system/' # Internal modules # from autopaths.dir_path import DirectoryPath ############################################################################### def test_symlink(): d = DirectoryPath(dummy_files) one = d['one.txt'] one.link_to(d + 'one_link.txt') ############################################################################### if __name__ == '__main__': test_symlink()
[ "os.path.abspath", "autopaths.dir_path.DirectoryPath", "inspect.stack" ]
[((646, 672), 'autopaths.dir_path.DirectoryPath', 'DirectoryPath', (['dummy_files'], {}), '(dummy_files)\n', (659, 672), False, 'from autopaths.dir_path import DirectoryPath\n'), ((357, 383), 'os.path.abspath', 'os.path.abspath', (['file_name'], {}), '(file_name)\n', (372, 383), False, 'import os, inspect\n'), ((305, 320), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (318, 320), False, 'import os, inspect\n')]
from rdflib import plugin from rdflib import store plugin.register( "SQLAlchemy", store.Store, "rdflib_sqlalchemy.store", "SQLAlchemy", )
[ "rdflib.plugin.register" ]
[((52, 139), 'rdflib.plugin.register', 'plugin.register', (['"""SQLAlchemy"""', 'store.Store', '"""rdflib_sqlalchemy.store"""', '"""SQLAlchemy"""'], {}), "('SQLAlchemy', store.Store, 'rdflib_sqlalchemy.store',\n 'SQLAlchemy')\n", (67, 139), False, 'from rdflib import plugin\n')]
import difflib import discord from discord.ext import commands from discord.ext.commands import CommandNotFound intents = discord.Intents.all() client = commands.Bot(command_prefix="+", intents=intents, help_command=None) @client.event async def on_ready(): print("Bot Online") @client.event async def on_command_error(ctx: commands.Context, exc): if isinstance(exc, CommandNotFound): await send_command_suggestion(ctx, ctx.invoked_with) else: pass async def send_command_suggestion(ctx: commands.Context, command_name: str) -> None: """Sends user similar commands if any can be found.""" raw_commands = [] for cmd in client.walk_commands(): if not cmd.hidden: raw_commands += (cmd.name, *cmd.aliases) if similar_command_data := difflib.get_close_matches(command_name, raw_commands, 1): similar_command_name = similar_command_data[0] similar_command = client.get_command(similar_command_name) if not similar_command: return try: if not await similar_command.can_run(ctx): return except commands.errors.CommandError: return misspelled_content = ctx.message.content e = discord.Embed() e.set_author(name="Did you mean:") e.description = misspelled_content.replace( command_name, similar_command_name, 1 ) await ctx.send(embed=e, delete_after=10.0) client.run("TOKEN")
[ "discord.ext.commands.Bot", "discord.Embed", "difflib.get_close_matches", "discord.Intents.all" ]
[((124, 145), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (143, 145), False, 'import discord\n'), ((155, 223), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""+"""', 'intents': 'intents', 'help_command': 'None'}), "(command_prefix='+', intents=intents, help_command=None)\n", (167, 223), False, 'from discord.ext import commands\n'), ((801, 857), 'difflib.get_close_matches', 'difflib.get_close_matches', (['command_name', 'raw_commands', '(1)'], {}), '(command_name, raw_commands, 1)\n', (826, 857), False, 'import difflib\n'), ((1250, 1265), 'discord.Embed', 'discord.Embed', ([], {}), '()\n', (1263, 1265), False, 'import discord\n')]
import asyncio import logging import time from typing import Optional, List from hummingbot.core.data_type.user_stream_tracker_data_source import \ UserStreamTrackerDataSource from hummingbot.logger import HummingbotLogger from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook from hummingbot.connector.exchange.bitfinex.bitfinex_websocket import BitfinexWebsocket from hummingbot.connector.exchange.bitfinex.bitfinex_auth import BitfinexAuth from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \ BitfinexOrderBookMessage class BitfinexAPIUserStreamDataSource(UserStreamTrackerDataSource): MESSAGE_TIMEOUT = 30.0 _logger: Optional[HummingbotLogger] = None @classmethod def logger(cls) -> HummingbotLogger: if cls._logger is None: cls._logger = logging.getLogger(__name__) return cls._logger def __init__(self, bitfinex_auth: BitfinexAuth, trading_pairs: Optional[List[str]] = None): if trading_pairs is None: trading_pairs = [] self._bitfinex_auth: BitfinexAuth = bitfinex_auth self._trading_pairs = trading_pairs self._current_listen_key = None self._listen_for_user_stream_task = None self._last_recv_time: float = 0 super().__init__() @property def order_book_class(self): return BitfinexOrderBook @property def last_recv_time(self) -> float: return self._last_recv_time async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): while True: try: ws = await BitfinexWebsocket(self._bitfinex_auth).connect() await ws.authenticate() async for msg in ws.messages(): transformed_msg: BitfinexOrderBookMessage = self._transform_message_from_exchange(msg) if transformed_msg is None: continue else: output.put_nowait(transformed_msg) except asyncio.CancelledError: raise except Exception: self.logger().error( "Unexpected error with Bitfinex WebSocket connection. " "Retrying after 30 seconds...", exc_info=True, ) await asyncio.sleep(self.MESSAGE_TIMEOUT) def _transform_message_from_exchange(self, msg) -> Optional[BitfinexOrderBookMessage]: order_book_message: BitfinexOrderBookMessage = BitfinexOrderBook.diff_message_from_exchange(msg, time.time()) if any([ order_book_message.type_heartbeat, order_book_message.event_auth, order_book_message.event_info, ]): # skip unneeded events and types return return order_book_message
[ "logging.getLogger", "time.time", "hummingbot.connector.exchange.bitfinex.bitfinex_websocket.BitfinexWebsocket", "asyncio.sleep" ]
[((855, 882), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (872, 882), False, 'import logging\n'), ((2625, 2636), 'time.time', 'time.time', ([], {}), '()\n', (2634, 2636), False, 'import time\n'), ((2392, 2427), 'asyncio.sleep', 'asyncio.sleep', (['self.MESSAGE_TIMEOUT'], {}), '(self.MESSAGE_TIMEOUT)\n', (2405, 2427), False, 'import asyncio\n'), ((1664, 1702), 'hummingbot.connector.exchange.bitfinex.bitfinex_websocket.BitfinexWebsocket', 'BitfinexWebsocket', (['self._bitfinex_auth'], {}), '(self._bitfinex_auth)\n', (1681, 1702), False, 'from hummingbot.connector.exchange.bitfinex.bitfinex_websocket import BitfinexWebsocket\n')]
""" Auth route """ import requests from fastapi import APIRouter, HTTPException from fastapi.param_functions import Depends from sqlalchemy.orm import Session from config.database import get_database from config.logger import logger from config.settings import settings from server.controllers.auth import get_department_id, sign_jwt from server.schemas.users import Users router = APIRouter( prefix="/auth", ) @router.get("/callback/") async def fetch_user_details( code: str, session: Session = Depends(get_database) ): """ Handles the callback route and fetches the user details """ params = { "client_id": settings.client_id, "client_secret": settings.client_secret, "grant_type": "authorization_code", "code": code, "redirect_uri": settings.redirect_url, } try: token_response = requests.post( url=settings.token_endpoint, data=params ).json() logger.debug(token_response) headers = { "Authorization": "Bearer " + token_response["access_token"] } userdetails = requests.post( url=settings.resource_endpoint, headers=headers, ).json() if ( not session.query(Users) .filter_by(email=userdetails["email"]) .first() ): new_user = Users( name=userdetails["name"], email=userdetails["email"], mobile_number=userdetails["phoneNumber"], gender=userdetails["gender"], department_id=get_department_id(userdetails["email"]), fcm_token="<PASSWORD>", ) session.add(new_user) session.commit() session.close() jwt = sign_jwt(userdetails["email"], userdetails["name"]) logger.info(f'{userdetails["name"]} user logged in') return { "name": userdetails["name"], "email": userdetails["email"], "phoneNumber": userdetails["phoneNumber"], "gender": userdetails["gender"], "jwt": jwt["jwt_token"], } except Exception as exception: logger.error(f"/dauth failed with {exception}") raise HTTPException( status_code=500, detail="An unexpected error occurred while authentication", headers={ "X-Error": "An unexpected error occurred while authentication" }, ) from exception
[ "requests.post", "fastapi.HTTPException", "config.logger.logger.error", "server.controllers.auth.sign_jwt", "config.logger.logger.info", "fastapi.APIRouter", "config.logger.logger.debug", "server.controllers.auth.get_department_id", "fastapi.param_functions.Depends" ]
[((385, 410), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/auth"""'}), "(prefix='/auth')\n", (394, 410), False, 'from fastapi import APIRouter, HTTPException\n'), ((510, 531), 'fastapi.param_functions.Depends', 'Depends', (['get_database'], {}), '(get_database)\n', (517, 531), False, 'from fastapi.param_functions import Depends\n'), ((963, 991), 'config.logger.logger.debug', 'logger.debug', (['token_response'], {}), '(token_response)\n', (975, 991), False, 'from config.logger import logger\n'), ((1800, 1851), 'server.controllers.auth.sign_jwt', 'sign_jwt', (["userdetails['email']", "userdetails['name']"], {}), "(userdetails['email'], userdetails['name'])\n", (1808, 1851), False, 'from server.controllers.auth import get_department_id, sign_jwt\n'), ((1860, 1912), 'config.logger.logger.info', 'logger.info', (['f"""{userdetails[\'name\']} user logged in"""'], {}), '(f"{userdetails[\'name\']} user logged in")\n', (1871, 1912), False, 'from config.logger import logger\n'), ((2204, 2251), 'config.logger.logger.error', 'logger.error', (['f"""/dauth failed with {exception}"""'], {}), "(f'/dauth failed with {exception}')\n", (2216, 2251), False, 'from config.logger import logger\n'), ((2266, 2439), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(500)', 'detail': '"""An unexpected error occurred while authentication"""', 'headers': "{'X-Error': 'An unexpected error occurred while authentication'}"}), "(status_code=500, detail=\n 'An unexpected error occurred while authentication', headers={'X-Error':\n 'An unexpected error occurred while authentication'})\n", (2279, 2439), False, 'from fastapi import APIRouter, HTTPException\n'), ((870, 925), 'requests.post', 'requests.post', ([], {'url': 'settings.token_endpoint', 'data': 'params'}), '(url=settings.token_endpoint, data=params)\n', (883, 925), False, 'import requests\n'), ((1116, 1178), 'requests.post', 'requests.post', ([], {'url': 'settings.resource_endpoint', 'headers': 'headers'}), '(url=settings.resource_endpoint, headers=headers)\n', (1129, 1178), False, 'import requests\n'), ((1604, 1643), 'server.controllers.auth.get_department_id', 'get_department_id', (["userdetails['email']"], {}), "(userdetails['email'])\n", (1621, 1643), False, 'from server.controllers.auth import get_department_id, sign_jwt\n')]
import os import re import argparse from collections import defaultdict _AFR_COMPONENTS = [ 'demos', 'freertos_kernel', os.path.join('libraries','abstractions','ble_hal'), os.path.join('libraries','abstractions','common_io'), os.path.join('libraries','abstractions','pkcs11'), os.path.join('libraries','abstractions','platform'), os.path.join('libraries','abstractions','posix'), os.path.join('libraries','abstractions','secure_sockets'), os.path.join('libraries','abstractions','wifi'), os.path.join('libraries','c_sdk','aws','defender'), os.path.join('libraries','c_sdk','aws','shadow'), os.path.join('libraries','c_sdk','standard','ble'), os.path.join('libraries','c_sdk','standard','common'), os.path.join('libraries','c_sdk','standard','https'), os.path.join('libraries','c_sdk','standard','mqtt'), os.path.join('libraries','c_sdk','standard','serializer'), os.path.join('libraries','freertos_plus','aws','greengrass'), os.path.join('libraries','freertos_plus','aws','ota'), os.path.join('libraries','freertos_plus','standard','crypto'), os.path.join('libraries','freertos_plus','standard','freertos_plus_posix'), os.path.join('libraries','freertos_plus','standard','freertos_plus_tcp'), os.path.join('libraries','freertos_plus','standard','pkcs11'), os.path.join('libraries','freertos_plus','standard','tls'), os.path.join('libraries','freertos_plus','standard','utils'), 'tests' ] def ask_question(question): answer = input('{}: '.format(question)) return answer.strip() def ask_multiple_choice_question(question, choices): while True: print('{}?'.format(question)) for i in range(len(choices)): print('{}. {}'.format(i, choices[i])) try: user_choice = int(ask_question('Enter Choice')) except ValueError: print('Incorrect choice. Please choose a number between 0 and {}'.format(len(choices) - 1)) continue if user_choice in range(len(choices)): break else: print('Incorrect choice. Please choose a number between 0 and {}'.format(len(choices) - 1)) return user_choice def ask_yes_no_question(question): while True: answer = ask_question('{} (Y/N)'.format(question)) if answer.lower() == 'y': answer = 'yes' break elif answer.lower() == 'n': answer = 'no' break else: print('Incorrect response. Please answer Y/N.') return answer def print_file_list(file_list): version_line_list = [] for file in file_list: version_number = extract_version_number_from_file(file) version_line_list.append(version_number[0] if version_number[0] is not None else 'Could not detect version') max_filepath_length = len(max(file_list, key=len)) max_version_line_length = len(max(version_line_list, key=len)) print('-' * (max_filepath_length + max_version_line_length + 7)) print('| {file:<{max_filepath_length}} | {version:<{max_version_line_length}} |'.format(file='File', max_filepath_length=max_filepath_length, version='Version Line', max_version_line_length=max_version_line_length)) print('-' * (max_filepath_length + max_version_line_length + 7)) for i in range(len(file_list)): print('| {file:<{max_filepath_length}} | {version:<{max_version_line_length}} |'.format(file=file_list[i], max_filepath_length=max_filepath_length, version=version_line_list[i], max_version_line_length=max_version_line_length)) print('-' * (max_filepath_length + max_version_line_length + 7)) print('\n') def list_files_in_a_component(component, afr_path): ''' Returns a list of all the files in a component. ''' list_of_files = [] search_path = os.path.join(afr_path, component) for root, dirs, files in os.walk(search_path, topdown=True): # Do not search 'portable' and 'third_party' folders. dirs[:] = [d for d in dirs if d not in ['portable', 'third_party']] # Do not include hidden files and folders. dirs[:] = [d for d in dirs if not d[0] == '.'] files = [f for f in files if not f[0] == '.'] for f in files: if f.endswith('.c') or f.endswith('.h'): list_of_files.append(os.path.join(os.path.relpath(root, afr_path), f)) return list_of_files def extract_version_number_from_file(file_path): ''' Extracts version number from the License header in a file. ''' with open(file_path) as f: content = f.read() match = re.search('\s*\*\s*(FreeRTOS.*V(.*))', content, re.MULTILINE) # Is it a kernel file? if match is None: match = re.search('\s*\*\s*(FreeRTOS Kernel.*V(.*))', content, re.MULTILINE) # Is it s FreeRTOS+TCP file? if match is None: match = re.search('\s*\*\s*(FreeRTOS\+TCP.*V(.*))', content, re.MULTILINE) return (match.group(1), match.group(2)) if match is not None else (None, None) def update_version_number_in_files(file_paths, old_version_line, new_version_line): ''' Replaces old_version_line with new_version_line in all the files specified by file_paths. ''' for file_path in file_paths: with open(file_path) as f: content = f.read() content = content.replace(old_version_line, new_version_line) with open(file_path, 'w') as f: f.write(content) def update_version_number_in_a_component(component, afr_path): ''' Updates version numbers in all the files of an AFR component based on user choices. ''' # Get all the files in the component. files_in_component = list_files_in_a_component(component, afr_path) version_numbers = defaultdict(list) # Extract version numbers from all the files. for f in files_in_component: file_path = os.path.join(afr_path, f) version_number = extract_version_number_from_file(file_path) version_numbers[version_number].append(file_path) for key in version_numbers.keys(): old_version_line = key[0] old_version_number = key[1] files_to_update = version_numbers[key] if old_version_line is None: print('\nFailed to detect the version number in the following files:') while True: print_file_list(files_to_update) print('Please update the above files manually!') confirm = ask_yes_no_question('Done updating') if confirm == 'yes': print_file_list(files_to_update) looks_good = ask_yes_no_question('Does it look good') if looks_good == 'yes': break else: print('\n{} files have the following version: {}\n'.format(len(files_to_update), old_version_line)) options = [ 'Update version number [i.e. update "{}"].'.format(old_version_number), 'Update version line [i.e. update "{}"].'.format(old_version_line), 'List files.', 'Do not update.' ] while True: user_selected_option = ask_multiple_choice_question('What do you want to do', options) if user_selected_option == 0: new_version_number = ask_question('Enter new version number') new_version_line = old_version_line.replace(old_version_number, new_version_number) print('Old version line: "{}". New version line: "{}".'.format(old_version_line, new_version_line)) confirm = ask_yes_no_question('Does it look good') if confirm == 'yes': update_version_number_in_files(files_to_update, old_version_line, new_version_line) print('Updated version line to "{}".\n'.format(new_version_line)) break elif user_selected_option == 1: new_version_line = ask_question('Enter new version line') print('Old version line: "{}". New version line: "{}".'.format(old_version_line, new_version_line)) confirm = ask_yes_no_question('Does it look good') if confirm == 'yes': update_version_number_in_files(files_to_update, old_version_line, new_version_line) print('Updated version line to "{}".\n'.format(new_version_line)) break elif user_selected_option == 2: print_file_list(files_to_update) else: print('Skipping update of {}.\n'.format(old_version_line)) break def parse_arguments(): ''' Parses the command line arguments. ''' parser = argparse.ArgumentParser(description='FreeRTOS Checksum Generator') parser.add_argument('--afr', required=True, help='Location of the AFR Code.') args = parser.parse_args() return vars(args) def main(): ''' Main entry point. ''' args = parse_arguments() afr_path = args['afr'] print('AFR Code: {}'.format(afr_path)) for component in _AFR_COMPONENTS: print('\n---------------------------------------------') print('Component: {}'.format(component)) print('---------------------------------------------\n') wanna_update_version = ask_yes_no_question('Do you want to update the component "{}"'.format(component)) if wanna_update_version == 'yes': update_version_number_in_a_component(component, afr_path) if __name__ == '__main__': main()
[ "argparse.ArgumentParser", "os.path.join", "collections.defaultdict", "os.path.relpath", "os.walk", "re.search" ]
[((134, 186), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""ble_hal"""'], {}), "('libraries', 'abstractions', 'ble_hal')\n", (146, 186), False, 'import os\n'), ((190, 244), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""common_io"""'], {}), "('libraries', 'abstractions', 'common_io')\n", (202, 244), False, 'import os\n'), ((248, 299), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""pkcs11"""'], {}), "('libraries', 'abstractions', 'pkcs11')\n", (260, 299), False, 'import os\n'), ((303, 356), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""platform"""'], {}), "('libraries', 'abstractions', 'platform')\n", (315, 356), False, 'import os\n'), ((360, 410), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""posix"""'], {}), "('libraries', 'abstractions', 'posix')\n", (372, 410), False, 'import os\n'), ((414, 473), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""secure_sockets"""'], {}), "('libraries', 'abstractions', 'secure_sockets')\n", (426, 473), False, 'import os\n'), ((477, 526), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""abstractions"""', '"""wifi"""'], {}), "('libraries', 'abstractions', 'wifi')\n", (489, 526), False, 'import os\n'), ((530, 583), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""aws"""', '"""defender"""'], {}), "('libraries', 'c_sdk', 'aws', 'defender')\n", (542, 583), False, 'import os\n'), ((586, 637), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""aws"""', '"""shadow"""'], {}), "('libraries', 'c_sdk', 'aws', 'shadow')\n", (598, 637), False, 'import os\n'), ((640, 693), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""standard"""', '"""ble"""'], {}), "('libraries', 'c_sdk', 'standard', 'ble')\n", (652, 693), False, 'import os\n'), ((696, 752), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""standard"""', '"""common"""'], {}), "('libraries', 'c_sdk', 'standard', 'common')\n", (708, 752), False, 'import os\n'), ((755, 810), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""standard"""', '"""https"""'], {}), "('libraries', 'c_sdk', 'standard', 'https')\n", (767, 810), False, 'import os\n'), ((813, 867), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""standard"""', '"""mqtt"""'], {}), "('libraries', 'c_sdk', 'standard', 'mqtt')\n", (825, 867), False, 'import os\n'), ((870, 930), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""c_sdk"""', '"""standard"""', '"""serializer"""'], {}), "('libraries', 'c_sdk', 'standard', 'serializer')\n", (882, 930), False, 'import os\n'), ((933, 996), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""aws"""', '"""greengrass"""'], {}), "('libraries', 'freertos_plus', 'aws', 'greengrass')\n", (945, 996), False, 'import os\n'), ((999, 1055), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""aws"""', '"""ota"""'], {}), "('libraries', 'freertos_plus', 'aws', 'ota')\n", (1011, 1055), False, 'import os\n'), ((1058, 1122), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""standard"""', '"""crypto"""'], {}), "('libraries', 'freertos_plus', 'standard', 'crypto')\n", (1070, 1122), False, 'import os\n'), ((1125, 1202), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""standard"""', '"""freertos_plus_posix"""'], {}), "('libraries', 'freertos_plus', 'standard', 'freertos_plus_posix')\n", (1137, 1202), False, 'import os\n'), ((1205, 1280), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""standard"""', '"""freertos_plus_tcp"""'], {}), "('libraries', 'freertos_plus', 'standard', 'freertos_plus_tcp')\n", (1217, 1280), False, 'import os\n'), ((1283, 1347), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""standard"""', '"""pkcs11"""'], {}), "('libraries', 'freertos_plus', 'standard', 'pkcs11')\n", (1295, 1347), False, 'import os\n'), ((1350, 1411), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""standard"""', '"""tls"""'], {}), "('libraries', 'freertos_plus', 'standard', 'tls')\n", (1362, 1411), False, 'import os\n'), ((1414, 1477), 'os.path.join', 'os.path.join', (['"""libraries"""', '"""freertos_plus"""', '"""standard"""', '"""utils"""'], {}), "('libraries', 'freertos_plus', 'standard', 'utils')\n", (1426, 1477), False, 'import os\n'), ((4418, 4451), 'os.path.join', 'os.path.join', (['afr_path', 'component'], {}), '(afr_path, component)\n', (4430, 4451), False, 'import os\n'), ((4482, 4516), 'os.walk', 'os.walk', (['search_path'], {'topdown': '(True)'}), '(search_path, topdown=True)\n', (4489, 4516), False, 'import os\n'), ((6405, 6422), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6416, 6422), False, 'from collections import defaultdict\n'), ((9522, 9588), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FreeRTOS Checksum Generator"""'}), "(description='FreeRTOS Checksum Generator')\n", (9545, 9588), False, 'import argparse\n'), ((5212, 5276), 're.search', 're.search', (['"""\\\\s*\\\\*\\\\s*(FreeRTOS.*V(.*))"""', 'content', 're.MULTILINE'], {}), "('\\\\s*\\\\*\\\\s*(FreeRTOS.*V(.*))', content, re.MULTILINE)\n", (5221, 5276), False, 'import re\n'), ((6527, 6552), 'os.path.join', 'os.path.join', (['afr_path', 'f'], {}), '(afr_path, f)\n', (6539, 6552), False, 'import os\n'), ((5351, 5422), 're.search', 're.search', (['"""\\\\s*\\\\*\\\\s*(FreeRTOS Kernel.*V(.*))"""', 'content', 're.MULTILINE'], {}), "('\\\\s*\\\\*\\\\s*(FreeRTOS Kernel.*V(.*))', content, re.MULTILINE)\n", (5360, 5422), False, 'import re\n'), ((5503, 5573), 're.search', 're.search', (['"""\\\\s*\\\\*\\\\s*(FreeRTOS\\\\+TCP.*V(.*))"""', 'content', 're.MULTILINE'], {}), "('\\\\s*\\\\*\\\\s*(FreeRTOS\\\\+TCP.*V(.*))', content, re.MULTILINE)\n", (5512, 5573), False, 'import re\n'), ((4945, 4976), 'os.path.relpath', 'os.path.relpath', (['root', 'afr_path'], {}), '(root, afr_path)\n', (4960, 4976), False, 'import os\n')]
import logging from pytorch_lightning.callbacks.base import Callback __all__ = ["Speed"] logger = logging.getLogger(__name__) class Speed(Callback): r""" Training speed callback, require 'simple' or 'advanced' profiler. """ def on_train_batch_end( self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx ): legacy_metrics = ( trainer.logger_connector.cached_results.legacy_batch_log_metrics ) legacy_metrics["iter"] = trainer.global_step legacy_metrics["epoch"] = trainer.current_epoch if not self.__has_profiler(trainer): # if not profiler provided, skip speed and batch_time. return # get training one batch time run_training_batch_time = trainer.profiler.recorded_durations[ "run_training_batch" ][-1] if hasattr(trainer.datamodule, "batch_size"): total_batch_size = ( trainer.datamodule.batch_size * trainer.world_size ) legacy_metrics["speed"] = ( 1.0 * total_batch_size / run_training_batch_time ) else: legacy_metrics["batch_time"] = run_training_batch_time def on_train_epoch_end(self, trainer, pl_module, *args, **kwargs): if not self.__has_profiler(trainer): return run_training_epoch_time = trainer.profiler.recorded_durations[ "run_training_epoch" ] if len(run_training_epoch_time) > 0 and hasattr( trainer.logger, "log_metrics" ): epoch_time = {"epoch_time": run_training_epoch_time[-1]} trainer.logger.log_metrics(epoch_time, step=trainer.current_epoch) def __has_profiler(self, trainer): return hasattr(trainer.profiler, "recorded_durations")
[ "logging.getLogger" ]
[((101, 128), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (118, 128), False, 'import logging\n')]
''' Created on Apr 18, 2018 @author: msanchez ''' from scraper.RequestScraper import RequestScraper from scraper.HTMLFilter import HTMLFilter from scraper.NewsFilter import NewsFilter from scraper.utilities.WebUtilities import WebUtilities class Scraper(object): ''' Full scrap operation. Downloads the request with an URL. Checks the HTTP status code. In case it's correct, proceeds with the scrap & filter operation. ''' def __init__(self): ''' Constructor ''' def scrap(self): web = self.__download() result = list() if(200 == web.status_code): scraper = RequestScraper(web) html_news_tags = scraper.scrap_news() cleaned_tags = self.__clean(html_news_tags) result = self.__filter(cleaned_tags) else: print("There was an error on download operation. Status code: ", str(web.status_code)) return result def __download(self): downloader = WebUtilities() return downloader.download("https://www.heraldo.es/") def __clean(self, html_tags): tag_filter = HTMLFilter(html_tags) return tag_filter.filter() def __filter(self, unfiltered_tags): matcher = NewsFilter(unfiltered_tags) return matcher.search()
[ "scraper.RequestScraper.RequestScraper", "scraper.HTMLFilter.HTMLFilter", "scraper.utilities.WebUtilities.WebUtilities", "scraper.NewsFilter.NewsFilter" ]
[((1013, 1027), 'scraper.utilities.WebUtilities.WebUtilities', 'WebUtilities', ([], {}), '()\n', (1025, 1027), False, 'from scraper.utilities.WebUtilities import WebUtilities\n'), ((1150, 1171), 'scraper.HTMLFilter.HTMLFilter', 'HTMLFilter', (['html_tags'], {}), '(html_tags)\n', (1160, 1171), False, 'from scraper.HTMLFilter import HTMLFilter\n'), ((1271, 1298), 'scraper.NewsFilter.NewsFilter', 'NewsFilter', (['unfiltered_tags'], {}), '(unfiltered_tags)\n', (1281, 1298), False, 'from scraper.NewsFilter import NewsFilter\n'), ((646, 665), 'scraper.RequestScraper.RequestScraper', 'RequestScraper', (['web'], {}), '(web)\n', (660, 665), False, 'from scraper.RequestScraper import RequestScraper\n')]
# -*- coding: utf-8 -*- from django import template from mezzanine.conf import settings from mezzanine_faq.models import FaqPage register = template.Library() @register.inclusion_tag('includes/faqlist.html') def faq_list(**kwargs): page = FaqPage.objects.get(**kwargs) return { 'page': page, 'faq_questions': page.faqquestion_set.all(), 'MEDIA_URL': settings.MEDIA_URL, } @register.inclusion_tag('includes/faqlist.html') def faq_last(**kwargs): page = FaqPage.objects.get(**kwargs) return { 'page': page, 'faq_questions': page.faqquestion_set.all().order_by('-id')[:1], 'MEDIA_URL': settings.MEDIA_URL, }
[ "mezzanine_faq.models.FaqPage.objects.get", "django.template.Library" ]
[((142, 160), 'django.template.Library', 'template.Library', ([], {}), '()\n', (158, 160), False, 'from django import template\n'), ((247, 276), 'mezzanine_faq.models.FaqPage.objects.get', 'FaqPage.objects.get', ([], {}), '(**kwargs)\n', (266, 276), False, 'from mezzanine_faq.models import FaqPage\n'), ((498, 527), 'mezzanine_faq.models.FaqPage.objects.get', 'FaqPage.objects.get', ([], {}), '(**kwargs)\n', (517, 527), False, 'from mezzanine_faq.models import FaqPage\n')]
#!/usr/bin/env python3 """ Quick script to read all training schools from data file and write them out again to e.g. update the formatting. """ import argparse from hsf_website_helpers.events.event import EventDatabase from hsf_website_helpers.util.cli import add_website_home_option def get_parser() -> argparse.ArgumentParser: d = ( "Quick script to read all training schools from data file and write " "them out again to e.g. update the formatting." ) parser = argparse.ArgumentParser(description=d) add_website_home_option(parser) return parser if __name__ == "__main__": parser = get_parser() args = parser.parse_args() path = args.home / "_data" / "training-schools.yml" if path.is_file(): edb = EventDatabase.from_file(path) print(f"Loaded {len(edb.events)} events from database.") else: print(f"Did not find database at {path}. Initializing empty one.") edb = EventDatabase() edb.write(path) print( "Reformated database. Please commit and submit a PR to add it to " "the webpage." )
[ "hsf_website_helpers.events.event.EventDatabase.from_file", "hsf_website_helpers.util.cli.add_website_home_option", "argparse.ArgumentParser", "hsf_website_helpers.events.event.EventDatabase" ]
[((497, 535), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'd'}), '(description=d)\n', (520, 535), False, 'import argparse\n'), ((540, 571), 'hsf_website_helpers.util.cli.add_website_home_option', 'add_website_home_option', (['parser'], {}), '(parser)\n', (563, 571), False, 'from hsf_website_helpers.util.cli import add_website_home_option\n'), ((769, 798), 'hsf_website_helpers.events.event.EventDatabase.from_file', 'EventDatabase.from_file', (['path'], {}), '(path)\n', (792, 798), False, 'from hsf_website_helpers.events.event import EventDatabase\n'), ((963, 978), 'hsf_website_helpers.events.event.EventDatabase', 'EventDatabase', ([], {}), '()\n', (976, 978), False, 'from hsf_website_helpers.events.event import EventDatabase\n')]
# a module that wraps some of the S3 commands import boto3 from botocore.exceptions import ClientError from boto3.s3.transfer import S3Transfer import re import os # check for existance of bucket def list_bucket(bucket_name, region): s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) object_list = [] try: for key in bucket.objects.all(): print(key.key) object_list.append(key.key) except ClientError as e: #print('code: {}, msg: {}, op name: {}'.format([ # e.error_code, e.error_message, e.operation_name])) #print(e.msg) print(str(e)) print(e.response) except Exception as e: # other response Error keys: Code, Message, BucketName print(e.response['Error']['Code']) print(str(e)) print(e.response) print(e.response['ResponseMetadata']['HTTPStatusCode']) return object_list # get list of bucket contents def get_bucket_list(bucket_name, region): s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) object_list = [] for key in bucket.objects.all(): object_list.append(key.key) return object_list # check bucket exists (efficient version) # NOTE: s3 bucket name space is for all AWS users # therefore need to also check that have rights to read & write (+list) def bucket_exists(bucket, region): s3 = boto3.resource('s3', region) exists = True try: s3.meta.client.head_bucket(Bucket=bucket) except ClientError as e: # If a client error is thrown, then check that it was a 404 error. # If it was a 404 error, then the bucket does not exist. error_code = int(e.response['Error']['Code']) if error_code == 404: exists = False return exists #upload a file def upload_file(bucket, region, source_file, dest_file): client = boto3.client('s3', region) transfer = S3Transfer(client) transfer.upload_file(source_file, bucket, dest_file) # determine next unique number def get_next_id(bucket_name, region, prefix): """ determines the next sequential numbering for a given folder prefix e.g. prefix is "01092015Tue-"; if a file exists in the folder, then it will be of the form "01092015Tue-xxx/somefilename.ext" - where xxx is some number"; if there is such a file, then next folder will be 01092015Tue-yyy - where yyy = xxx + 1; otherwise, next folder is 01092015Tue-1 Args: prefix: a string that represents the absolute folder name Returns: a string that represents the next folder name in the sequence """ # added () to get group pattern = re.compile(prefix + '([0-9]+)/') ids = get_bucket_list(bucket_name, region) next_num = 1 for name in ids: match = pattern.match(name) if match: # there is only one bracketed group - the number next_num = max(int(match.groups()[0]) + 1, next_num) result = prefix + str(next_num) # want to strip out any "directories" in path & just return id return result.split('/')[-1] # return a list of bucket objects that match a given prefix #TODO remove default bucket name def list_by_prefix(bucket_name, region, prefix=''): """ returns a list of names of bucket objects that start with a given prefix Args: bucket_name: string - the name of the s3 bucket prefix: string - the prefix of the name (key) of the bucket objects Returns: a list of objects whose name (key) starts with the given prefix """ s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) names = [] # osi - object summary iterator for osi in bucket.objects.filter( Prefix=prefix): name = osi.key names.append(name) return names # determine if a given object key exists in the bucket def key_exists(bucket_name, region, key): """ indicates if a key (object name) is in the bucket Args: bucket_name: string - the name of the s3 bucket key: string - the name of the object key (file-name) Returns: True if key in bucket; False otherwise """ if key in list_by_prefix(bucket_name, region, key): return True return False def get_timing_info(bucket_name, region, prefix): """ gets the timing information for jobs - labelled start & finish Returns: a 3-tuple of (finish time, elapsed time string, task name string) """ s3 = boto3.resource('s3', region) bucket = s3.Bucket(bucket_name) start_dict = {} finish_dict = {} # osi - object summary iterator for osi in bucket.objects.filter( Prefix=prefix): name = osi.key last_mod = osi.last_modified if 'start' in name: start_dict[name] = last_mod if 'finish' in name: finish_dict[name] = last_mod results = [] for name, finish_time in finish_dict.items(): start_name = name.replace('finish', 'start') if start_name in start_dict: elapsed = str(finish_time - start_dict[start_name]) results.append((finish_time, elapsed, name.replace('finish', 'task').split('/')[-1]. split('.')[0])) return sorted(results) # download files matching regex def download_files(bucket_name, region, prefix='', suffix='', dest_dir=''): """ downloads files who's path & name match given prefix & suffix to specified dir Args: bucket_name: the name of the s3 bucket to download from prefix: string - start of full path the s3 file suffix: string - the end characters of the file (e.g. '.vcf') dest_dir: string - the (local) directory to which the files are downloaded """ # TODO better to raise ValueError?? assert (prefix or suffix), 'must have a value for either prefix or suffix' # get rid of '/' at end of dir if exists if dest_dir.endswith('/'): dest_dir = dest_dir[:-1] # create directory in case not exist if dest_dir: os.makedirs(dest_dir, exist_ok=True) else: # no dir provided - default to current dir dest_dir = '.' names = [] client = boto3.client('s3', region) transfer = S3Transfer(client) for name in list_by_prefix(bucket_name, region, prefix): if name.endswith(suffix): # remove any path from the file name fname = name.split('/').pop() # download the file transfer.download_file(bucket_name, name, dest_dir + '/' + fname)
[ "boto3.client", "os.makedirs", "re.compile", "boto3.s3.transfer.S3Transfer", "boto3.resource" ]
[((244, 272), 'boto3.resource', 'boto3.resource', (['"""s3"""', 'region'], {}), "('s3', region)\n", (258, 272), False, 'import boto3\n'), ((1017, 1045), 'boto3.resource', 'boto3.resource', (['"""s3"""', 'region'], {}), "('s3', region)\n", (1031, 1045), False, 'import boto3\n'), ((1408, 1436), 'boto3.resource', 'boto3.resource', (['"""s3"""', 'region'], {}), "('s3', region)\n", (1422, 1436), False, 'import boto3\n'), ((1898, 1924), 'boto3.client', 'boto3.client', (['"""s3"""', 'region'], {}), "('s3', region)\n", (1910, 1924), False, 'import boto3\n'), ((1940, 1958), 'boto3.s3.transfer.S3Transfer', 'S3Transfer', (['client'], {}), '(client)\n', (1950, 1958), False, 'from boto3.s3.transfer import S3Transfer\n'), ((2694, 2726), 're.compile', 're.compile', (["(prefix + '([0-9]+)/')"], {}), "(prefix + '([0-9]+)/')\n", (2704, 2726), False, 'import re\n'), ((3631, 3659), 'boto3.resource', 'boto3.resource', (['"""s3"""', 'region'], {}), "('s3', region)\n", (3645, 3659), False, 'import boto3\n'), ((4552, 4580), 'boto3.resource', 'boto3.resource', (['"""s3"""', 'region'], {}), "('s3', region)\n", (4566, 4580), False, 'import boto3\n'), ((6295, 6321), 'boto3.client', 'boto3.client', (['"""s3"""', 'region'], {}), "('s3', region)\n", (6307, 6321), False, 'import boto3\n'), ((6337, 6355), 'boto3.s3.transfer.S3Transfer', 'S3Transfer', (['client'], {}), '(client)\n', (6347, 6355), False, 'from boto3.s3.transfer import S3Transfer\n'), ((6146, 6182), 'os.makedirs', 'os.makedirs', (['dest_dir'], {'exist_ok': '(True)'}), '(dest_dir, exist_ok=True)\n', (6157, 6182), False, 'import os\n')]
from django.urls import path from . import views app_name = "app" urlpatterns = [ path('', views.index, name="index"), path('posts/', views.posts, name="posts"), path('categories/', views.categories, name="categories"), path('comments/', views.comments, name="comments"), path('users/', views.users, name="users"), path('test/', views.test, name="test"), path('login/', views.login, name="login"), path('logout/', views.logout, name="logout"), path('details/', views.details, name="details"), ]
[ "django.urls.path" ]
[((89, 124), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (93, 124), False, 'from django.urls import path\n'), ((130, 171), 'django.urls.path', 'path', (['"""posts/"""', 'views.posts'], {'name': '"""posts"""'}), "('posts/', views.posts, name='posts')\n", (134, 171), False, 'from django.urls import path\n'), ((177, 233), 'django.urls.path', 'path', (['"""categories/"""', 'views.categories'], {'name': '"""categories"""'}), "('categories/', views.categories, name='categories')\n", (181, 233), False, 'from django.urls import path\n'), ((239, 289), 'django.urls.path', 'path', (['"""comments/"""', 'views.comments'], {'name': '"""comments"""'}), "('comments/', views.comments, name='comments')\n", (243, 289), False, 'from django.urls import path\n'), ((295, 336), 'django.urls.path', 'path', (['"""users/"""', 'views.users'], {'name': '"""users"""'}), "('users/', views.users, name='users')\n", (299, 336), False, 'from django.urls import path\n'), ((342, 380), 'django.urls.path', 'path', (['"""test/"""', 'views.test'], {'name': '"""test"""'}), "('test/', views.test, name='test')\n", (346, 380), False, 'from django.urls import path\n'), ((386, 427), 'django.urls.path', 'path', (['"""login/"""', 'views.login'], {'name': '"""login"""'}), "('login/', views.login, name='login')\n", (390, 427), False, 'from django.urls import path\n'), ((433, 477), 'django.urls.path', 'path', (['"""logout/"""', 'views.logout'], {'name': '"""logout"""'}), "('logout/', views.logout, name='logout')\n", (437, 477), False, 'from django.urls import path\n'), ((483, 530), 'django.urls.path', 'path', (['"""details/"""', 'views.details'], {'name': '"""details"""'}), "('details/', views.details, name='details')\n", (487, 530), False, 'from django.urls import path\n')]
#!/usr/bin/env python # # Copyright (c) 2017 10X Genomics, Inc. All rights reserved. # import cellranger.analysis.io as analysis_io import cellranger.analysis.constants as analysis_constants import cellranger.h5_constants as h5_constants import cellranger.io as cr_io import cellranger.analysis.stats as analysis_stats import collections from irlb import irlb import numpy as np import os import tables # The RUNPCA stage attempts to run the PCA at this threshold, and if that # fails it reruns at zero. In the event thresholding prevents us from # returning the requested number of components and we are at this threshold # value, we throw an exception. DEFAULT_RUNPCA_THRESHOLD = 2 from sklearn.utils import sparsefuncs class MatrixRankTooSmallException(Exception): pass PCA = collections.namedtuple('PCA', ['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion', 'features_selected']) def get_original_columns_used(cols_not_removed, cols_used_after_removal): """If a matrix is subset down to only have columns indexed by cols_not_removed, and then is further subset to only contain cols_used_after removal, in that order, than this method returns the index of which columns in the old matrix correspond the the columns in the new matrix.""" return [cols_not_removed[x] for x in cols_used_after_removal] def run_pca(matrix, pca_features=None, pca_bcs=None, n_pca_components=None, random_state=None, min_count_threshold=0): """ Run a PCA on the matrix using the IRLBA matrix factorization algorithm. Prior to the PCA analysis, the matrix is modified so that all barcodes/columns have the same counts, and then the counts are transformed by a log2(1+X) operation. If desired, only a subset of features (e.g. sample rows) can be selected for PCA analysis. Each feature is ranked by its dispersion relative to other features that have a similar mean count. The top `pca_features` as ranked by this method will then be used for the PCA. One can also select to subset number of barcodes to use (e.g. sample columns), but in this case they are simply randomly sampled. Args: matrix (CountMatrix): The matrix to perform PCA on. pca_features (int): Number of features to subset from matrix and use in PCA. The top pca_features ranked by dispersion are used pca_bcs (int): Number of barcodes to randomly sample for the matrix. n_pca_components (int): How many PCA components should be used. random_state (int): The seed for the RNG min_count_threshold (int): The minimum sum of each row/column for that row/column to be passed to PCA (this filter is prior to any subsetting that occurs). Returns: A PCA object """ if random_state is None: random_state=analysis_constants.RANDOM_STATE np.random.seed(0) # Threshold the rows/columns of matrix, will throw error if an empty matrix results. thresholded_matrix, _, thresholded_features = matrix.select_axes_above_threshold(min_count_threshold) # If requested, we can subsample some of the barcodes to get a smaller matrix for PCA pca_bc_indices = np.arange(thresholded_matrix.bcs_dim) if pca_bcs is None: pca_bcs = thresholded_matrix.bcs_dim pca_bc_indices = np.arange(thresholded_matrix.bcs_dim) elif pca_bcs < thresholded_matrix.bcs_dim: pca_bc_indices = np.sort(np.random.choice(np.arange(thresholded_matrix.bcs_dim), size=pca_bcs, replace=False)) elif pca_bcs > thresholded_matrix.bcs_dim: msg = ("You requested {} barcodes but the matrix after thresholding only " "included {}, so the smaller amount is being used.").format(pca_bcs, thresholded_matrix.bcs_dim) print(msg) pca_bcs = thresholded_matrix.bcs_dim pca_bc_indices = np.arange(thresholded_matrix.bcs_dim) # If requested, select fewer features to use by selecting the features with highest normalized dispersion if pca_features is None: pca_features = thresholded_matrix.features_dim elif pca_features > thresholded_matrix.features_dim: msg = ("You requested {} features but the matrix after thresholding only included {} features," "so the smaller amount is being used.").format(pca_features, thresholded_matrix.features_dim) print(msg) pca_features = thresholded_matrix.features_dim # Calc mean and variance of counts after normalizing # But don't transform to log space, in order to preserve the mean-variance relationship m = analysis_stats.normalize_by_umi(thresholded_matrix) # Get mean and variance of rows (mu, var) = analysis_stats.summarize_columns(m.T) dispersion = analysis_stats.get_normalized_dispersion(mu.squeeze(), var.squeeze()) # TODO set number of bins? pca_feature_indices = np.argsort(dispersion)[-pca_features:] # Now determine how many components. if n_pca_components is None: n_pca_components = analysis_constants.PCA_N_COMPONENTS_DEFAULT likely_matrix_rank = min(pca_features, pca_bcs) if likely_matrix_rank < n_pca_components: if min_count_threshold == DEFAULT_RUNPCA_THRESHOLD: # Kick back to run_pca stage so it can retry with no threshold, this is for historical reasons raise MatrixRankTooSmallException() else: print(("There are fewer nonzero features or barcodes ({}) than requested " "PCA components ({}); reducing the number of components.").format(likely_matrix_rank, n_pca_components)) n_pca_components = likely_matrix_rank if (likely_matrix_rank * 0.5) <= float(n_pca_components): print("Requested number of PCA components is large relative to the matrix size, an exact approach to matrix factorization may be faster.") # Note, after subsetting it is possible some rows/cols in pca_mat have counts below the threshold. # However, we are not performing a second thresholding as in practice subsetting is not used and we explain # that thresholding occurs prior to subsetting in the doc string. pca_mat = thresholded_matrix.select_barcodes(pca_bc_indices).select_features(pca_feature_indices) (pca_norm_mat, pca_center, pca_scale) = normalize_and_transpose(pca_mat) (u, d, v, _, _) = irlb(pca_norm_mat, n_pca_components, center=pca_center.squeeze(), scale=pca_scale.squeeze(), random_state=random_state) # make sure to project the matrix before centering, to avoid densification (full_norm_mat, full_center, full_scale) = normalize_and_transpose(matrix) sparsefuncs.inplace_column_scale(full_norm_mat, 1 / full_scale.squeeze()) # can have some zeros here # Get a coordinate map so we know which columns in the old matrix correspond to columns in the new org_cols_used = get_original_columns_used(thresholded_features, pca_feature_indices) transformed_irlba_matrix = full_norm_mat[:,org_cols_used].dot(v) - (full_center / full_scale)[:,org_cols_used].dot(v) irlba_components = np.zeros((n_pca_components, matrix.features_dim)) irlba_components[:,org_cols_used] = v.T # calc proportion of variance explained variance_sum = len(pca_feature_indices) # each feature has variance=1, mean=0 after normalization variance_explained = np.square(d)/((len(pca_bc_indices)-1) * variance_sum) features_selected = np.array([f.id for f in matrix.feature_ref.feature_defs])[org_cols_used] # Now project back up the dispersion to return. full_dispersion = np.empty(matrix.features_dim) full_dispersion[:] = np.nan full_dispersion[thresholded_features] = dispersion # sanity check dimensions assert transformed_irlba_matrix.shape == (matrix.bcs_dim, n_pca_components) assert irlba_components.shape == (n_pca_components, matrix.features_dim) assert variance_explained.shape == (n_pca_components,) return PCA(transformed_irlba_matrix, irlba_components, variance_explained, full_dispersion, features_selected) def normalize_and_transpose(matrix): matrix.tocsc() m = analysis_stats.normalize_by_umi(matrix) # Use log counts m.data = np.log2(1 + m.data) # Transpose m = m.T # compute centering (mean) and scaling (stdev) (c,v) = analysis_stats.summarize_columns(m) # TODO: Inputs to this function shouldn't have zero variance columns v[np.where(v == 0.0)] = 1.0 s = np.sqrt(v) return (m, c, s) def get_irlb_mem_gb_from_matrix_dim(nonzero_entries): irlba_mem_gb = round(np.ceil(1.0 * nonzero_entries / analysis_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)) + analysis_constants.IRLB_BASE_MEM_GB return h5_constants.MATRIX_MEM_GB_MULTIPLIER * max(h5_constants.MIN_MEM_GB, irlba_mem_gb) def save_pca_csv(pca_map, matrix, base_dir): save_pca_csv_with_bc_feature(pca_map, matrix.bcs, matrix.feature_ref.feature_defs, base_dir) def save_pca_csv_with_bc_feature(pca_map, barcodes, features, base_dir): for n_components, pca in pca_map.iteritems(): n_components_dir = os.path.join(base_dir, '%d_components' % n_components) cr_io.makedirs(n_components_dir, allow_existing=True) matrix_fn = os.path.join(n_components_dir, 'projection.csv') n_columns = pca.transformed_pca_matrix.shape[1] assert n_columns <= n_components matrix_header = ['Barcode'] + ['PC-%d' % (i+1) for i in xrange(n_columns)] analysis_io.save_matrix_csv(matrix_fn, pca.transformed_pca_matrix, matrix_header, barcodes) # FBPCA presently provides 0-sized entries for the following PCA() member variables. # This allows us to distinguish FBPCA from IRLBA, and also avoids weird empty files. if pca.components.size > 0: components_fn = os.path.join(n_components_dir, 'components.csv') components_header = ['PC'] + [f.id for f in features] analysis_io.save_matrix_csv(components_fn, pca.components, components_header, range(1, n_components+1)) if pca.variance_explained.size > 0: variance_fn = os.path.join(n_components_dir, 'variance.csv') variance_header = ['PC','Proportion.Variance.Explained'] analysis_io.save_matrix_csv(variance_fn, pca.variance_explained, variance_header, range(1, n_components+1)) if pca.dispersion.size > 0: dispersion_fn = os.path.join(n_components_dir, 'dispersion.csv') dispersion_header = ['Feature','Normalized.Dispersion'] analysis_io.save_matrix_csv(dispersion_fn, pca.dispersion, dispersion_header, [f.id for f in features]) if pca.features_selected.size > 0: features_fn = os.path.join(n_components_dir, 'features_selected.csv') # TODO: there are two columns here, but only 1 entry in the header...BAD features_header = ['Feature'] analysis_io.save_matrix_csv(features_fn, pca.features_selected, features_header, range(1, len(pca.features_selected)+1)) def save_pca_h5(pca_map, f): group = f.create_group(f.root, analysis_constants.ANALYSIS_H5_PCA_GROUP) for n_components, pca in pca_map.iteritems(): analysis_io.save_h5(f, group, str(n_components), pca) def load_pca_from_h5(filename): """ Load just the PCA info from an analysis h5 """ with tables.open_file(filename, 'r') as f: group = f.root._v_groups[analysis_constants.ANALYSIS_H5_PCA_GROUP] # Just take the first PCA object, assuming we never have multiple for _, pca in analysis_io.load_h5_iter(group, PCA): return pca
[ "cellranger.analysis.io.load_h5_iter", "numpy.sqrt", "cellranger.analysis.io.save_matrix_csv", "numpy.argsort", "numpy.array", "numpy.arange", "numpy.where", "cellranger.io.makedirs", "numpy.empty", "numpy.random.seed", "numpy.ceil", "collections.namedtuple", "cellranger.analysis.stats.norma...
[((790, 922), 'collections.namedtuple', 'collections.namedtuple', (['"""PCA"""', "['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion',\n 'features_selected']"], {}), "('PCA', ['transformed_pca_matrix', 'components',\n 'variance_explained', 'dispersion', 'features_selected'])\n", (812, 922), False, 'import collections\n'), ((2911, 2928), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2925, 2928), True, 'import numpy as np\n'), ((3237, 3274), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3246, 3274), True, 'import numpy as np\n'), ((4639, 4690), 'cellranger.analysis.stats.normalize_by_umi', 'analysis_stats.normalize_by_umi', (['thresholded_matrix'], {}), '(thresholded_matrix)\n', (4670, 4690), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((4743, 4780), 'cellranger.analysis.stats.summarize_columns', 'analysis_stats.summarize_columns', (['m.T'], {}), '(m.T)\n', (4775, 4780), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((7113, 7162), 'numpy.zeros', 'np.zeros', (['(n_pca_components, matrix.features_dim)'], {}), '((n_pca_components, matrix.features_dim))\n', (7121, 7162), True, 'import numpy as np\n'), ((7605, 7634), 'numpy.empty', 'np.empty', (['matrix.features_dim'], {}), '(matrix.features_dim)\n', (7613, 7634), True, 'import numpy as np\n'), ((8151, 8190), 'cellranger.analysis.stats.normalize_by_umi', 'analysis_stats.normalize_by_umi', (['matrix'], {}), '(matrix)\n', (8182, 8190), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((8226, 8245), 'numpy.log2', 'np.log2', (['(1 + m.data)'], {}), '(1 + m.data)\n', (8233, 8245), True, 'import numpy as np\n'), ((8339, 8374), 'cellranger.analysis.stats.summarize_columns', 'analysis_stats.summarize_columns', (['m'], {}), '(m)\n', (8371, 8374), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((8489, 8499), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (8496, 8499), True, 'import numpy as np\n'), ((3369, 3406), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3378, 3406), True, 'import numpy as np\n'), ((4922, 4944), 'numpy.argsort', 'np.argsort', (['dispersion'], {}), '(dispersion)\n', (4932, 4944), True, 'import numpy as np\n'), ((7379, 7391), 'numpy.square', 'np.square', (['d'], {}), '(d)\n', (7388, 7391), True, 'import numpy as np\n'), ((7457, 7514), 'numpy.array', 'np.array', (['[f.id for f in matrix.feature_ref.feature_defs]'], {}), '([f.id for f in matrix.feature_ref.feature_defs])\n', (7465, 7514), True, 'import numpy as np\n'), ((8454, 8472), 'numpy.where', 'np.where', (['(v == 0.0)'], {}), '(v == 0.0)\n', (8462, 8472), True, 'import numpy as np\n'), ((9115, 9169), 'os.path.join', 'os.path.join', (['base_dir', "('%d_components' % n_components)"], {}), "(base_dir, '%d_components' % n_components)\n", (9127, 9169), False, 'import os\n'), ((9178, 9231), 'cellranger.io.makedirs', 'cr_io.makedirs', (['n_components_dir'], {'allow_existing': '(True)'}), '(n_components_dir, allow_existing=True)\n', (9192, 9231), True, 'import cellranger.io as cr_io\n'), ((9253, 9301), 'os.path.join', 'os.path.join', (['n_components_dir', '"""projection.csv"""'], {}), "(n_components_dir, 'projection.csv')\n", (9265, 9301), False, 'import os\n'), ((9490, 9585), 'cellranger.analysis.io.save_matrix_csv', 'analysis_io.save_matrix_csv', (['matrix_fn', 'pca.transformed_pca_matrix', 'matrix_header', 'barcodes'], {}), '(matrix_fn, pca.transformed_pca_matrix,\n matrix_header, barcodes)\n', (9517, 9585), True, 'import cellranger.analysis.io as analysis_io\n'), ((11505, 11536), 'tables.open_file', 'tables.open_file', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (11521, 11536), False, 'import tables\n'), ((11714, 11750), 'cellranger.analysis.io.load_h5_iter', 'analysis_io.load_h5_iter', (['group', 'PCA'], {}), '(group, PCA)\n', (11738, 11750), True, 'import cellranger.analysis.io as analysis_io\n'), ((8601, 8692), 'numpy.ceil', 'np.ceil', (['(1.0 * nonzero_entries / analysis_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)'], {}), '(1.0 * nonzero_entries / analysis_constants.\n NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)\n', (8608, 8692), True, 'import numpy as np\n'), ((9865, 9913), 'os.path.join', 'os.path.join', (['n_components_dir', '"""components.csv"""'], {}), "(n_components_dir, 'components.csv')\n", (9877, 9913), False, 'import os\n'), ((10201, 10247), 'os.path.join', 'os.path.join', (['n_components_dir', '"""variance.csv"""'], {}), "(n_components_dir, 'variance.csv')\n", (10213, 10247), False, 'import os\n'), ((10536, 10584), 'os.path.join', 'os.path.join', (['n_components_dir', '"""dispersion.csv"""'], {}), "(n_components_dir, 'dispersion.csv')\n", (10548, 10584), False, 'import os\n'), ((10665, 10772), 'cellranger.analysis.io.save_matrix_csv', 'analysis_io.save_matrix_csv', (['dispersion_fn', 'pca.dispersion', 'dispersion_header', '[f.id for f in features]'], {}), '(dispersion_fn, pca.dispersion,\n dispersion_header, [f.id for f in features])\n', (10692, 10772), True, 'import cellranger.analysis.io as analysis_io\n'), ((10873, 10928), 'os.path.join', 'os.path.join', (['n_components_dir', '"""features_selected.csv"""'], {}), "(n_components_dir, 'features_selected.csv')\n", (10885, 10928), False, 'import os\n'), ((3905, 3942), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3914, 3942), True, 'import numpy as np\n'), ((3504, 3541), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3513, 3541), True, 'import numpy as np\n')]
#!/usr/bin/env python """ -------------------------------------------------------- IMPORT_IWORX reads and converts various IWORX datafiles into a FieldTrip-type data structure. Use as data, event = import_iworx(filename) where the filename should point to a .mat or .txt datafile. data has the following nested fields: .trial .time .label event has the following nested fields: .type .sample .value Copyright (C) 2022, <NAME> -------------------------------------------------------- """ import os import scipy.io def import_iworx(filename): # check the input path = os.path.split(filename)[0] # xxx/ name = os.path.split(filename)[-1][:-4] # xxx ext = os.path.splitext(filename)[-1] # .xxx if ext != ".mat" and ext != ".txt": print("file extension should be either .mat or .txt for this function") hasmat = False if ext == ".mat": hasmat = True hastxt = False hasmark = False if ext == ".txt": hastxt = True if name[-10:] == "_MarksData": hasmark = True # organize the input if hasmark: datafile = os.path.join(path, name[:-10] + ".mat") headerfile = os.path.join(path, name[:-10] + ".txt") markerfile = filename elif hastxt or hasmat: datafile = os.path.join(path, name + ".mat") headerfile = os.path.join(path, name + ".txt") markerfile = os.path.join(path, name + "_MarksData.txt") # read the data mat = scipy.io.loadmat(datafile) # initialize data structure class Data(object): def __init__(self): self.trial = [] self.time = [] self.label = [] # organize data structure data = Data() for t in range(mat["n"][0][0]): # n is a variable contained by the mat file data.trial.append(mat["b" + str(t + 1)].T) data.time.append(mat["b" + str(t + 1)][:, 0]) # read the header information try: with open(headerfile) as f: contents = f.readlines() data.label = contents[0].split(" ") except: print("could not read the header information") # initialize event structure class Event(object): def __init__(self): self.type = [] self.sample = [] self.value = [] # read the markers event = Event() try: with open(markerfile) as f: contents = f.readlines() for e in range(1, len(contents)): event.type.append(contents[e].split(" ")[0]) event.sample.append(contents[e].split(" ")[1]) event.value.append(contents[e].split(" ")[4]) except: print("could not read the marker information") return data, event
[ "os.path.join", "os.path.splitext", "os.path.split" ]
[((630, 653), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (643, 653), False, 'import os\n'), ((726, 752), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (742, 752), False, 'import os\n'), ((1158, 1197), 'os.path.join', 'os.path.join', (['path', "(name[:-10] + '.mat')"], {}), "(path, name[:-10] + '.mat')\n", (1170, 1197), False, 'import os\n'), ((1219, 1258), 'os.path.join', 'os.path.join', (['path', "(name[:-10] + '.txt')"], {}), "(path, name[:-10] + '.txt')\n", (1231, 1258), False, 'import os\n'), ((676, 699), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (689, 699), False, 'import os\n'), ((1335, 1368), 'os.path.join', 'os.path.join', (['path', "(name + '.mat')"], {}), "(path, name + '.mat')\n", (1347, 1368), False, 'import os\n'), ((1390, 1423), 'os.path.join', 'os.path.join', (['path', "(name + '.txt')"], {}), "(path, name + '.txt')\n", (1402, 1423), False, 'import os\n'), ((1445, 1488), 'os.path.join', 'os.path.join', (['path', "(name + '_MarksData.txt')"], {}), "(path, name + '_MarksData.txt')\n", (1457, 1488), False, 'import os\n')]
import arcpy import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB input_csv = arcpy.GetParameterAsText(0) test_string = arcpy.GetParameterAsText(1) df = pd.read_csv(input_csv) target = df['is_there_an_emotion_directed_at_a_brand_or_product'] text = df['tweet_text'] fixed_text = text[pd.notnull(text)] fixed_target = target[pd.notnull(text)] count_vect = CountVectorizer() count_vect.fit(fixed_text) counts = count_vect.transform(fixed_text) # NB has a bunch of parameters -- somewhat scary for those who haven't # used it before. That said, Scikit-Learn mostly has sane defaults, # and usually it's not necessary to modify them. Can also try to # change a new algorithm, but usually it's not the best way to spend # your time. nb = MultinomialNB() nb.fit(counts, fixed_target) arcpy.AddMessage(nb.predict(count_vect.transform([test_string]))) # testing an addition to the script.
[ "pandas.read_csv", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.naive_bayes.MultinomialNB", "arcpy.GetParameterAsText", "pandas.notnull" ]
[((152, 179), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(0)'], {}), '(0)\n', (176, 179), False, 'import arcpy\n'), ((194, 221), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(1)'], {}), '(1)\n', (218, 221), False, 'import arcpy\n'), ((228, 250), 'pandas.read_csv', 'pd.read_csv', (['input_csv'], {}), '(input_csv)\n', (239, 250), True, 'import pandas as pd\n'), ((432, 449), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (447, 449), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((812, 827), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (825, 827), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((360, 376), 'pandas.notnull', 'pd.notnull', (['text'], {}), '(text)\n', (370, 376), True, 'import pandas as pd\n'), ((400, 416), 'pandas.notnull', 'pd.notnull', (['text'], {}), '(text)\n', (410, 416), True, 'import pandas as pd\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import gg from ggconfig import config ############################################################################## # CONTENT SNIPPETS ############################################################################## def test_logo_url(): assert gg.logo_url(config) == 'https://oliz.io/ggpy/static/gg.png' assert gg.logo_url() == '' def test_pagetitle(): assert gg.pagetitle('Good Generator.py', config) == 'Good Generator.py' assert gg.pagetitle('Some Page', config) == 'Some Page | Good Generator.py' assert gg.pagetitle('Title with default config') == 'Title with default config' assert gg.pagetitle('') == '' assert gg.pagetitle() == '' assert gg.pagetitle('', config) == 'Good Generator.py' def test_meta(): meta = gg.meta('oz', 'Nice text!', '__draft__, foo, __inline__, bar, tags, __no_header__') assert meta == \ '''<meta name="author" content="oz"> <meta name="description" content="Nice text!"> <meta name="keywords" content="foo, bar, tags">''' def test_meta_single_special_tag(): meta = gg.meta('oz', 'Nice text!', '__draft__') assert meta == \ '''<meta name="author" content="oz"> <meta name="description" content="Nice text!">''' def test_opengraph(): opengraph = gg.opengraph('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20', config) assert opengraph == \ '''<meta property="og:title" content="Title!"> <meta property="og:type" content="article"> <meta property="og:url" content="https://oliz.io/ggpy/"> <meta property="og:description" content="Nice text!"> <meta property="og:image" content="https://oliz.io/ggpy/static/gg.png"> <meta property="og:locale" content="en-US"> <meta property="article:published_time" content="2020-02-20">''' opengraph_default_config = gg.opengraph('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20') assert opengraph_default_config == \ '''<meta property="og:title" content="Title!"> <meta property="og:type" content="article"> <meta property="og:url" content="https://oliz.io/ggpy/"> <meta property="og:description" content="Nice text!"> <meta property="og:locale" content="en-US"> <meta property="article:published_time" content="2020-02-20">''' def test_json_ld(): json_ld = gg.json_ld('Title! "BAM!"', 'https://oliz.io/ggpy/', 'It says "BAM!"', config) assert json_ld == \ '''<script type="application/ld+json"> {"@context":"http://schema.org","@type":"WebSite","headline":"Title! \\"BAM!\\"","url":"https://oliz.io/ggpy/","name":"Good Generator.py","description":"It says \\"BAM!\\""}</script>''' json_ld_default_config = gg.json_ld('Title! "BAM!"', 'https://oliz.io/ggpy/', 'It says "BAM!"') assert json_ld_default_config == \ '''<script type="application/ld+json"> {"@context":"http://schema.org","@type":"WebSite","headline":"Title! \\"BAM!\\"","url":"https://oliz.io/ggpy/","description":"It says \\"BAM!\\""}</script>''' def test_header(): header = gg.header('https://example.com/logo.png', '<h1>Title!</h1>', '2021-03-27', config) assert header == \ '''<a href="https://oliz.io/ggpy"><img src="https://example.com/logo.png" class="avatar" /></a> <div style="text-align:right;"> <h1>Title!</h1> <small><a href="https://oliz.io/ggpy">Good Gen</a>, 2021-03-27</small> </div>''' header_default_config = gg.header('', '<h1>Title!</h1>', '2021-03-27') assert header_default_config == \ '''<div style="text-align:right;"> <h1>Title!</h1> <small>2021-03-27</small> </div>''' def test_post_header(): post_header = gg.post_header('<h1 id="title">Title!</h1>', '2020-02-20', config) assert post_header == \ '''<div style="text-align:right;"> <h1 id="title">Title!</h1> <small><a href="https://oliz.io/ggpy">Good Gen</a>, 2020-02-20</small> </div>''' post_header_default_config = gg.post_header('<h1 id="title">Title!</h1>', '2020-02-20') assert post_header_default_config == \ '''<div style="text-align:right;"> <h1 id="title">Title!</h1> <small>2020-02-20</small> </div>''' def test_footer_navigation(): footer_nav = gg.footer_navigation() assert footer_nav == \ '''<a href="#" class="nav">top</a> <a href="javascript:toggleTheme()" class="nav">πŸŒ“</a> <a href="javascript:toggleFontSize()" class="nav">aA</a>''' def test_about_and_social_icons(): about_and_social = gg.about_and_social_icons(config) assert about_and_social == \ '''<a href="mailto:<EMAIL>" class="social">email</a> <a href="https://nitter.net/" class="social">twitter</a> <a href="https://github.com/ooz/ggpy" class="social">github</a> <a href="https://oliz.io/about.html" class="social">about</a>''' about_and_social_default_config = gg.about_and_social_icons() assert about_and_social_default_config == '' def test_posts_index(): '''Generate index without inlined posts. ''' posts = gg.scan_posts(['.']) posts = [post for post in posts if gg.TAG_INLINE not in post['tags']] posts_index = gg.posts_index(posts) assert posts_index == \ '''<div> <div class="card"><small class="social">2021-04-04</small><a href="test/features/meta.html"><b>Markdown Meta Data</b></a></div> <div class="card"><small class="social">2018-03-17</small><a href="test/some-post.html"><b>Some Post</b></a></div> <div class="card"><small class="social">1996-06-06</small><a href="test/features/"><b>Markdown Feature Test without &quot;quotes bug&quot;</b></a></div> </div>''' def test_posts_index_inline(): '''Generate index with inlined posts. Four cases: 1. Lots of content but not description -> details block with title as summary 2. Lots of content with description -> details block with description as summary 3. Has description but no content -> only show description 4. Else -> show content directly ''' posts = gg.scan_posts(['test/features/index-inline-posts/']) posts_index = gg.posts_index(posts) assert posts_index == \ '''<div> <div class="card"><small class="social">2021-07-17</small> <a href="little-inline-content-no-description.html"><b>Little inline content, no description</b></a> <div> <p>This shows directly on the card, without details+summary blocks.</p> </div> </div> <div class="card"><small class="social">2021-07-17</small> <a href="no-content-with-description.html"><b>No content, but with description</b></a> <div> Just some more minor text from the description </div> </div> <div class="card"><small class="social">2021-07-17</small> <a href="lots-of-content-with-description.html"><b>Lots of content, with description</b></a> <details><summary>Click here to expand...</summary> <ul> <li>One</li> <li>Two</li> <li>Three</li> <li>Four</li> <li>Five</li> <li>Six</li> <li>Seven</li> <li>Eight</li> <li>Nine</li> <li>Ten</li> </ul> <p>... and some more lines.</p> </details> </div> <div class="card"><small class="social">2021-07-17</small> <details><summary><a href="lots-of-content-no-description.html"><b>Lots of content, no description</b></a></summary> <ul> <li>One</li> <li>Two</li> <li>Three</li> <li>Four</li> <li>Five</li> <li>Six</li> <li>Seven</li> <li>Eight</li> <li>Nine</li> <li>Ten</li> </ul> <p>... and some more lines.</p> </details> </div> </div>''' ############################################################################## # HTML SNIPPETS ############################################################################## def test_html_opening_boilerplate(): assert gg.html_opening_boilerplate() == \ '''<!DOCTYPE html> <html lang="en-US"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width,initial-scale=1">''' def test_html_head_body_boilerplate(): assert gg.html_head_body_boilerplate() == \ '''</head> <body onload="initTheme()">''' def test_html_tag_line(): assert gg.html_tag_line('title', 'Nice!') == '<title>Nice!</title>' def test_html_tag_block(): assert gg.html_tag_block('footer', '<p>in closing</p>') == \ '''<footer> <p>in closing</p> </footer>''' def test_html_tag_empty(): link_tag = gg.html_tag_empty('link', [('rel', 'canonical'), ('href','https://example.com')]) assert link_tag == '<link rel="canonical" href="https://example.com">' omit_empty_tag = gg.html_tag_empty('link', []) assert omit_empty_tag == '' def test_html_closing_boilerplate(): assert gg.html_closing_boilerplate() == \ '''</body> </html> ''' def test_inline_style(): style = gg.inline_style() assert 'body {' in style assert '.dark-mode' in style assert '.avatar' in style assert '.nav' in style assert '.social' in style def test_inline_javascript(): js = gg.inline_javascript() assert 'function toggleTheme' in js assert 'function initTheme' in js
[ "gg.html_opening_boilerplate", "gg.html_tag_block", "gg.scan_posts", "gg.inline_style", "gg.about_and_social_icons", "gg.footer_navigation", "gg.logo_url", "gg.posts_index", "gg.pagetitle", "gg.inline_javascript", "gg.opengraph", "gg.post_header", "gg.meta", "gg.html_closing_boilerplate", ...
[((803, 890), 'gg.meta', 'gg.meta', (['"""oz"""', '"""Nice text!"""', '"""__draft__, foo, __inline__, bar, tags, __no_header__"""'], {}), "('oz', 'Nice text!',\n '__draft__, foo, __inline__, bar, tags, __no_header__')\n", (810, 890), False, 'import gg\n'), ((1091, 1131), 'gg.meta', 'gg.meta', (['"""oz"""', '"""Nice text!"""', '"""__draft__"""'], {}), "('oz', 'Nice text!', '__draft__')\n", (1098, 1131), False, 'import gg\n'), ((1279, 1366), 'gg.opengraph', 'gg.opengraph', (['"""Title!"""', '"""https://oliz.io/ggpy/"""', '"""Nice text!"""', '"""2020-02-20"""', 'config'], {}), "('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20',\n config)\n", (1291, 1366), False, 'import gg\n'), ((1803, 1878), 'gg.opengraph', 'gg.opengraph', (['"""Title!"""', '"""https://oliz.io/ggpy/"""', '"""Nice text!"""', '"""2020-02-20"""'], {}), "('Title!', 'https://oliz.io/ggpy/', 'Nice text!', '2020-02-20')\n", (1815, 1878), False, 'import gg\n'), ((2266, 2344), 'gg.json_ld', 'gg.json_ld', (['"""Title! "BAM!\\""""', '"""https://oliz.io/ggpy/"""', '"""It says "BAM!\\""""', 'config'], {}), '(\'Title! "BAM!"\', \'https://oliz.io/ggpy/\', \'It says "BAM!"\', config)\n', (2276, 2344), False, 'import gg\n'), ((2623, 2693), 'gg.json_ld', 'gg.json_ld', (['"""Title! "BAM!\\""""', '"""https://oliz.io/ggpy/"""', '"""It says "BAM!\\""""'], {}), '(\'Title! "BAM!"\', \'https://oliz.io/ggpy/\', \'It says "BAM!"\')\n', (2633, 2693), False, 'import gg\n'), ((2964, 3050), 'gg.header', 'gg.header', (['"""https://example.com/logo.png"""', '"""<h1>Title!</h1>"""', '"""2021-03-27"""', 'config'], {}), "('https://example.com/logo.png', '<h1>Title!</h1>', '2021-03-27',\n config)\n", (2973, 3050), False, 'import gg\n'), ((3323, 3369), 'gg.header', 'gg.header', (['""""""', '"""<h1>Title!</h1>"""', '"""2021-03-27"""'], {}), "('', '<h1>Title!</h1>', '2021-03-27')\n", (3332, 3369), False, 'import gg\n'), ((3538, 3604), 'gg.post_header', 'gg.post_header', (['"""<h1 id="title">Title!</h1>"""', '"""2020-02-20"""', 'config'], {}), '(\'<h1 id="title">Title!</h1>\', \'2020-02-20\', config)\n', (3552, 3604), False, 'import gg\n'), ((3809, 3867), 'gg.post_header', 'gg.post_header', (['"""<h1 id="title">Title!</h1>"""', '"""2020-02-20"""'], {}), '(\'<h1 id="title">Title!</h1>\', \'2020-02-20\')\n', (3823, 3867), False, 'import gg\n'), ((4057, 4079), 'gg.footer_navigation', 'gg.footer_navigation', ([], {}), '()\n', (4077, 4079), False, 'import gg\n'), ((4314, 4347), 'gg.about_and_social_icons', 'gg.about_and_social_icons', (['config'], {}), '(config)\n', (4339, 4347), False, 'import gg\n'), ((4658, 4685), 'gg.about_and_social_icons', 'gg.about_and_social_icons', ([], {}), '()\n', (4683, 4685), False, 'import gg\n'), ((4825, 4845), 'gg.scan_posts', 'gg.scan_posts', (["['.']"], {}), "(['.'])\n", (4838, 4845), False, 'import gg\n'), ((4938, 4959), 'gg.posts_index', 'gg.posts_index', (['posts'], {}), '(posts)\n', (4952, 4959), False, 'import gg\n'), ((5820, 5872), 'gg.scan_posts', 'gg.scan_posts', (["['test/features/index-inline-posts/']"], {}), "(['test/features/index-inline-posts/'])\n", (5833, 5872), False, 'import gg\n'), ((5891, 5912), 'gg.posts_index', 'gg.posts_index', (['posts'], {}), '(posts)\n', (5905, 5912), False, 'import gg\n'), ((8075, 8161), 'gg.html_tag_empty', 'gg.html_tag_empty', (['"""link"""', "[('rel', 'canonical'), ('href', 'https://example.com')]"], {}), "('link', [('rel', 'canonical'), ('href',\n 'https://example.com')])\n", (8092, 8161), False, 'import gg\n'), ((8253, 8282), 'gg.html_tag_empty', 'gg.html_tag_empty', (['"""link"""', '[]'], {}), "('link', [])\n", (8270, 8282), False, 'import gg\n'), ((8460, 8477), 'gg.inline_style', 'gg.inline_style', ([], {}), '()\n', (8475, 8477), False, 'import gg\n'), ((8667, 8689), 'gg.inline_javascript', 'gg.inline_javascript', ([], {}), '()\n', (8687, 8689), False, 'import gg\n'), ((295, 314), 'gg.logo_url', 'gg.logo_url', (['config'], {}), '(config)\n', (306, 314), False, 'import gg\n'), ((366, 379), 'gg.logo_url', 'gg.logo_url', ([], {}), '()\n', (377, 379), False, 'import gg\n'), ((420, 461), 'gg.pagetitle', 'gg.pagetitle', (['"""Good Generator.py"""', 'config'], {}), "('Good Generator.py', config)\n", (432, 461), False, 'import gg\n'), ((496, 529), 'gg.pagetitle', 'gg.pagetitle', (['"""Some Page"""', 'config'], {}), "('Some Page', config)\n", (508, 529), False, 'import gg\n'), ((576, 617), 'gg.pagetitle', 'gg.pagetitle', (['"""Title with default config"""'], {}), "('Title with default config')\n", (588, 617), False, 'import gg\n'), ((660, 676), 'gg.pagetitle', 'gg.pagetitle', (['""""""'], {}), "('')\n", (672, 676), False, 'import gg\n'), ((694, 708), 'gg.pagetitle', 'gg.pagetitle', ([], {}), '()\n', (706, 708), False, 'import gg\n'), ((726, 750), 'gg.pagetitle', 'gg.pagetitle', (['""""""', 'config'], {}), "('', config)\n", (738, 750), False, 'import gg\n'), ((7428, 7457), 'gg.html_opening_boilerplate', 'gg.html_opening_boilerplate', ([], {}), '()\n', (7455, 7457), False, 'import gg\n'), ((7717, 7748), 'gg.html_head_body_boilerplate', 'gg.html_head_body_boilerplate', ([], {}), '()\n', (7746, 7748), False, 'import gg\n'), ((7835, 7869), 'gg.html_tag_line', 'gg.html_tag_line', (['"""title"""', '"""Nice!"""'], {}), "('title', 'Nice!')\n", (7851, 7869), False, 'import gg\n'), ((7935, 7983), 'gg.html_tag_block', 'gg.html_tag_block', (['"""footer"""', '"""<p>in closing</p>"""'], {}), "('footer', '<p>in closing</p>')\n", (7952, 7983), False, 'import gg\n'), ((8364, 8393), 'gg.html_closing_boilerplate', 'gg.html_closing_boilerplate', ([], {}), '()\n', (8391, 8393), False, 'import gg\n')]
from typing import TYPE_CHECKING, Any, ClassVar, Optional from urllib.parse import quote as _uriquote if TYPE_CHECKING: from dis_snek.models.discord.snowflake import Snowflake_Type __all__ = ["Route"] class Route: BASE: ClassVar[str] = "https://discord.com/api/v9" path: str params: dict[str, str | int] webhook_id: Optional["Snowflake_Type"] webhook_token: Optional[str] def __init__(self, method: str, path: str, **parameters: Any): self.path: str = path self.method: str = method self.params = parameters self.channel_id = parameters.get("channel_id") self.guild_id = parameters.get("guild_id") self.webhook_id = parameters.get("webhook_id") self.webhook_token = parameters.get("webhook_token") self.known_bucket: Optional[str] = None def __eq__(self, other): if isinstance(other, Route): return self.rl_bucket == other.rl_bucket return NotImplemented def __hash__(self): return hash(self.rl_bucket) def __repr__(self): return f"<Route {self.endpoint}>" def __str__(self): return self.endpoint @property def rl_bucket(self) -> str: """This route's full rate limit bucket""" if self.known_bucket: return self.known_bucket if self.webhook_token: return f"{self.webhook_id}{self.webhook_token}:{self.channel_id}:{self.guild_id}:{self.endpoint}" return f"{self.channel_id}:{self.guild_id}:{self.endpoint}" @property def endpoint(self) -> str: """The endpoint for this route""" return f"{self.method} {self.path}" @property def url(self) -> str: """The full url for this route""" return f"{self.BASE}{self.path}".format_map( {k: _uriquote(v) if isinstance(v, str) else v for k, v in self.params.items()} )
[ "urllib.parse.quote" ]
[((1828, 1840), 'urllib.parse.quote', '_uriquote', (['v'], {}), '(v)\n', (1837, 1840), True, 'from urllib.parse import quote as _uriquote\n')]
# udi dataset process module # modiflied from nuscenes_dataset.py import json import pickle import time import random from copy import deepcopy from functools import partial from pathlib import Path import subprocess import fire import numpy as np import os from second.core import box_np_ops from second.core import preprocess as prep from second.data import kitti_common as kitti from second.data.dataset import Dataset, register_dataset from second.utils.eval import get_coco_eval_result, get_official_eval_result from second.utils.progress_bar import progress_bar_iter as prog_bar from second.utils.timer import simple_timer @register_dataset class UDIDataset(Dataset): NumPointFeatures = 4 NameMapping = { 'car': 'car', 'pedestrian': 'pedestrian', 'cyclist': 'cyclist', 'truck': 'truck', 'forklift': 'forklift', 'golf car': 'golf car', 'motorcyclist': 'motorcyclist', 'bicycle': 'bicycle', 'motorbike': 'motorbike' } DefaultAttribute = { "car": "object_action_parked", "pedestrain": "object_action_walking", "bicycle": "object_action_driving_straight_forward", "motorcycle": "object_action_parked", "other_vehicle": "object_action_driving_straight_forward", "emergency_vehicle": "object_action_driving_straight_forward", "truck": "object_action_parked", "animal": "", "bus": "object_action_driving_straight_forward", } def __init__(self, root_path, info_path, class_names=None, prep_func=None, num_point_features=None): self._root_path = Path(root_path) self._info_path = Path(info_path) with open(info_path, 'rb') as f: data = pickle.load(f) self._udi_infos = data["infos"] self._metadata = data["metadata"] self._class_names = class_names self._prep_func = prep_func self.version = self._metadata["version"] self._with_velocity = False def __len__(self): return len(self._udi_infos) def __getitem__(self, idx): input_dict = self.get_sensor_data(idx) example = self._prep_func(input_dict=input_dict) example["metadata"] = input_dict["metadata"] if "anchors_mask" in example: example["anchors_mask"] = example["anchors_mask"].astype(np.uint8) return example def get_sensor_data(self, query): idx = query if isinstance(query, dict): assert "lidar" in query idx = query["lidar"]["idx"] info = self._udi_infos[idx] res = { "lidar": { "type": "lidar", "points": None, }, "metadata": { "token": info["token"] }, } lidar_path = Path(info['lidar_path']) points = np.fromfile(str(lidar_path), dtype=np.float32).reshape((-1,4)) points[:, 3] /= 255 res["lidar"]["points"] = points if 'gt_boxes' in info: res["lidar"]["annotations"] = { 'boxes': info["gt_boxes"], 'names': info["gt_names"] } return res def evaluation_udi(self, detections, output_dir): version = self.version eval_set_map = { # "v1.0-mini": "mini_train", "v1.0-trainval": "val", } # gt_annos = self.ground_truth_annotations # if gt_annos is None: # return None udi_annos = {} mapped_class_names = self._class_names token2info = {} for info in self._udi_infos: token2info[info["token"]] = info for det in detections: annos = [] boxes = _second_det_to_udi_box(det) for i, box in enumerate(boxes): name = mapped_class_names[box.label] velocity = box.velocity[:2].tolist() box.velocity = np.array([*velocity, 0.0]) for i, box in enumerate(boxes): name = mapped_class_names[box.label] velocity = box.velocity[:2].tolist() nusc_anno = { "sample_token": det["metadata"]["token"], "translation": box.center.tolist(), "size": box.wlh.tolist(), "rotation": box.orientation.elements.tolist(), "velocity": velocity, "detection_name": name, "detection_score": box.score, "attribute_name": "", } annos.append(nusc_anno) udi_annos[det["metadata"]["token"]] = annos nusc_submissions = { "meta": { "use_camera": False, "use_lidar": False, "use_radar": False, "use_map": False, "use_external": False, }, "results": udi_annos, } res_path = Path(output_dir) / "results_udi.json" with open(res_path, "w") as f: json.dump(nusc_submissions, f) eval_main_file = Path(__file__).resolve().parent / "udi_eval.py" # why add \"{}\"? to support path with spaces. cmd = f"python3 {str(eval_main_file)} --root_path=\"{str(self._root_path)}\"" cmd += f" --info_path=\"{str(self._info_path)}\"" cmd += f" --version={self.version}" cmd += f" --res_path=\"{str(res_path)}\" --eval_set={eval_set_map[self.version]}" cmd += f" --output_dir=\"{output_dir}\"" # use subprocess can release all nusc memory after evaluation subprocess.check_output(cmd, shell=True) with open(Path(output_dir) / "metrics_summary.json", "r") as f: metrics = json.load(f) detail = {} res_path.unlink() # delete results_nusc.json since it's very large result = f"Nusc {version} Evaluation\n" for name in mapped_class_names: detail[name] = {} for k, v in metrics["label_aps"][name].items(): detail[name][f"dist@{k}"] = v tp_errs = [] tp_names = [] for k, v in metrics["label_tp_errors"][name].items(): detail[name][k] = v tp_errs.append(f"{v:.4f}") tp_names.append(k) threshs = ', '.join(list(metrics["label_aps"][name].keys())) scores = list(metrics["label_aps"][name].values()) scores = ', '.join([f"{s * 100:.2f}" for s in scores]) result += f"{name} Nusc dist AP@{threshs} and TP errors\n" result += scores result += "\n" result += ', '.join(tp_names) + ": " + ', '.join(tp_errs) result += "\n" return { "results": { "nusc": result }, "detail": { "nusc": detail }, } def evaluation(self, detections, output_dir): res_udi = self.evaluation_udi(detections, output_dir) res = { "results": { "nusc": res_udi["result"]["nusc"], }, "detail": { "eval.nusc": res_udi["detail"]["nusc"], }, } return res def _second_det_to_udi_box(detection): from udi_eval import Box import pyquaternion box3d = detection["box3d_lidar"].detach().cpu().numpy() scores = detection["scores"].detach().cpu().numpy() labels = detection["label_preds"].detach().cpu().numpy() box3d[:, 6] = -box3d[:, 6] - np.pi/2 box_list = [] for i in range(box3d.shape[0]): quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box3d[i,6]) velocity = (np.nan, np.nan, np.nan) # if box3d.shape[1] == 9: # velocity = (*box3d[i, 7:9], 0.0) box = Box( box3d[i, :3], box3d[i, 3:6], quat, label=labels[i], score=scores[i], velocity=velocity) box_list.append(box) return box_list # def _lidar_nusc_box_to_global(info, boxes, classes, eval_version="ICLR 2019"): # import pyquaternion # box_list = [] # for box in boxes: # box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation'])) # box.translate(np.array(info['lidar2ego_translation'])) # box.rotate(pyquaternion.Quaternion(info['ego2global_rotation'])) # box.translate(np.array(info['ego2global_translation'])) # box_list.append(box) # return box_list # def _get_available_scenes(lyft): # available_scenes = [] # print("total scene num:", len(lyft.scene)) # for scene in lyft.scene: # scene_token = scene["token"] # scene_rec = lyft.get('scene', scene_token) # sample_rec = lyft.get('sample', scene_rec['first_sample_token']) # sd_rec = lyft.get('sample_data', sample_rec['data']["LIDAR_TOP"]) # has_more_frames = True # scene_not_exist = False # while has_more_frames: # lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token']) # if not Path(lidar_path).exists(): # scenes_not_exist = True # break # else: # break # if not sd_rec['next'] == "": # sd_rec = lyft.get('sample_data', sd_rec['next']) # else: # has_more_frames = False # if scene_not_exist: # continue # available_scenes.append(scene) # print("exist scene num:", len(available_scenes)) # return available_scenes def _fill_train_infos(root_path): train_udi_infos = [] lidar_root_path = root_path+ "/lidar" label_root_path = root_path + "/label" img_root_path = root_path + "/image" filenames = os.listdir(lidar_root_path) for filename in prog_bar(filenames): index = filename.split(".")[0] lidar_path = lidar_root_path + "/" + index + ".bin" cam_path = img_root_path + "/" + index + ".jpg" label_path = label_root_path + "/" + index + "_bin.json" assert Path(lidar_path).exists() assert Path(cam_path).exists() assert Path(label_path).exists() with open(label_path, encoding='utf-8') as f: res = f.read() result = json.loads(res) boxes = result["elem"] info = { "lidar_path": lidar_path, "cam_front_path": cam_path, "filename": filename, "token": int(index), } gt_locs_list = [] gt_dims_list = [] print("label file path:", label_path) for box in boxes: box_loc = box["position"] box_size = box["size"] box_loc_ = np.array([box_loc["x"],box_loc["y"], box_loc["z"]], dtype=np.float) box_size_ = np.array([box_size["width"],box_size["depth"],box_size["height"]], dtype=np.float) box_loc_ = box_loc_.reshape(-1, 3) box_size_ = box_size_.reshape(-1, 3) gt_locs_list.append(box_loc_) gt_dims_list.append(box_size_) locs = np.concatenate(gt_locs_list, axis=0) dims = np.concatenate(gt_dims_list, axis=0) rots = np.array([b["yaw"] for b in boxes]).reshape(-1, 1) names = [b["class"] for b in boxes] for i in range(len(names)): if names[i] in UDIDataset.NameMapping: names[i] = UDIDataset.NameMapping[names[i]] names = np.array(names) # we need to convert rot to SECOND format. # change the rot format will break all checkpoint. gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) info["gt_boxes"] = gt_boxes info["gt_names"] = names train_udi_infos.append(info) return train_udi_infos def create_udi_infos(root_path): # root_path = Path(root_path) root_path = str(root_path) train_udi_infos = _fill_train_infos(root_path) metadata = { "version": "v0.1-train", } print( f"train sample: {len(train_udi_infos)}" ) data = { "infos": train_udi_infos, "metadata": metadata, } with open(root_path + "/infos_udi_train.pkl", 'wb') as f: pickle.dump(data, f) def get_box_mean(info_path, class_name="car"): with open(info_path, 'rb') as f: lyft_infos = pickle.load(f)["infos"] gt_boxes_list = [] for info in lyft_infos: gt_boxes = info["gt_boxes"] gt_names = info["gt_names"] mask = np.array([s == class_name for s in info["gt_names"]], dtype=np.bool_) gt_names = gt_names[mask] gt_boxes = gt_boxes[mask] gt_boxes_list.append(gt_boxes.reshape(-1, 7)) gt_boxes_list = np.concatenate(gt_boxes_list, axis=0) return { "box3d": gt_boxes_list.mean(0).tolist(), "detail": gt_boxes_list } def get_all_box_mean(info_path): det_names = set() for k, v in UDIDataset.NameMapping.items(): if v not in det_names: det_names.add(v) det_names = sorted(list(det_names)) res = {} details = {} for k in det_names: result = get_box_mean(info_path, k) details[k] = result["detail"] res[k] = result["box3d"] print(json.dumps(res, indent=2)) return details if __name__ == "__main__": fire.Fire()
[ "subprocess.check_output", "json.loads", "os.listdir", "pickle.dump", "fire.Fire", "pathlib.Path", "json.dumps", "udi_eval.Box", "pickle.load", "numpy.array", "numpy.concatenate", "json.load", "pyquaternion.Quaternion", "json.dump", "second.utils.progress_bar.progress_bar_iter" ]
[((9955, 9982), 'os.listdir', 'os.listdir', (['lidar_root_path'], {}), '(lidar_root_path)\n', (9965, 9982), False, 'import os\n'), ((10004, 10023), 'second.utils.progress_bar.progress_bar_iter', 'prog_bar', (['filenames'], {}), '(filenames)\n', (10012, 10023), True, 'from second.utils.progress_bar import progress_bar_iter as prog_bar\n'), ((12921, 12958), 'numpy.concatenate', 'np.concatenate', (['gt_boxes_list'], {'axis': '(0)'}), '(gt_boxes_list, axis=0)\n', (12935, 12958), True, 'import numpy as np\n'), ((13521, 13532), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (13530, 13532), False, 'import fire\n'), ((1711, 1726), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (1715, 1726), False, 'from pathlib import Path\n'), ((1753, 1768), 'pathlib.Path', 'Path', (['info_path'], {}), '(info_path)\n', (1757, 1768), False, 'from pathlib import Path\n'), ((2927, 2951), 'pathlib.Path', 'Path', (["info['lidar_path']"], {}), "(info['lidar_path'])\n", (2931, 2951), False, 'from pathlib import Path\n'), ((5764, 5804), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (5787, 5804), False, 'import subprocess\n'), ((7783, 7843), 'pyquaternion.Quaternion', 'pyquaternion.Quaternion', ([], {'axis': '[0, 0, 1]', 'radians': 'box3d[i, 6]'}), '(axis=[0, 0, 1], radians=box3d[i, 6])\n', (7806, 7843), False, 'import pyquaternion\n'), ((7982, 8077), 'udi_eval.Box', 'Box', (['box3d[i, :3]', 'box3d[i, 3:6]', 'quat'], {'label': 'labels[i]', 'score': 'scores[i]', 'velocity': 'velocity'}), '(box3d[i, :3], box3d[i, 3:6], quat, label=labels[i], score=scores[i],\n velocity=velocity)\n', (7985, 8077), False, 'from udi_eval import Box\n'), ((10466, 10481), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (10476, 10481), False, 'import json\n'), ((11287, 11323), 'numpy.concatenate', 'np.concatenate', (['gt_locs_list'], {'axis': '(0)'}), '(gt_locs_list, axis=0)\n', (11301, 11323), True, 'import numpy as np\n'), ((11339, 11375), 'numpy.concatenate', 'np.concatenate', (['gt_dims_list'], {'axis': '(0)'}), '(gt_dims_list, axis=0)\n', (11353, 11375), True, 'import numpy as np\n'), ((11650, 11665), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (11658, 11665), True, 'import numpy as np\n'), ((11795, 11850), 'numpy.concatenate', 'np.concatenate', (['[locs, dims, -rots - np.pi / 2]'], {'axis': '(1)'}), '([locs, dims, -rots - np.pi / 2], axis=1)\n', (11809, 11850), True, 'import numpy as np\n'), ((12420, 12440), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (12431, 12440), False, 'import pickle\n'), ((12709, 12780), 'numpy.array', 'np.array', (["[(s == class_name) for s in info['gt_names']]"], {'dtype': 'np.bool_'}), "([(s == class_name) for s in info['gt_names']], dtype=np.bool_)\n", (12717, 12780), True, 'import numpy as np\n'), ((13442, 13467), 'json.dumps', 'json.dumps', (['res'], {'indent': '(2)'}), '(res, indent=2)\n', (13452, 13467), False, 'import json\n'), ((1829, 1843), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1840, 1843), False, 'import pickle\n'), ((5111, 5127), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (5115, 5127), False, 'from pathlib import Path\n'), ((5200, 5230), 'json.dump', 'json.dump', (['nusc_submissions', 'f'], {}), '(nusc_submissions, f)\n', (5209, 5230), False, 'import json\n'), ((5899, 5911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5908, 5911), False, 'import json\n'), ((10907, 10975), 'numpy.array', 'np.array', (["[box_loc['x'], box_loc['y'], box_loc['z']]"], {'dtype': 'np.float'}), "([box_loc['x'], box_loc['y'], box_loc['z']], dtype=np.float)\n", (10915, 10975), True, 'import numpy as np\n'), ((10999, 11088), 'numpy.array', 'np.array', (["[box_size['width'], box_size['depth'], box_size['height']]"], {'dtype': 'np.float'}), "([box_size['width'], box_size['depth'], box_size['height']], dtype=\n np.float)\n", (11007, 11088), True, 'import numpy as np\n'), ((12547, 12561), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12558, 12561), False, 'import pickle\n'), ((4070, 4096), 'numpy.array', 'np.array', (['[*velocity, 0.0]'], {}), '([*velocity, 0.0])\n', (4078, 4096), True, 'import numpy as np\n'), ((10261, 10277), 'pathlib.Path', 'Path', (['lidar_path'], {}), '(lidar_path)\n', (10265, 10277), False, 'from pathlib import Path\n'), ((10302, 10316), 'pathlib.Path', 'Path', (['cam_path'], {}), '(cam_path)\n', (10306, 10316), False, 'from pathlib import Path\n'), ((10341, 10357), 'pathlib.Path', 'Path', (['label_path'], {}), '(label_path)\n', (10345, 10357), False, 'from pathlib import Path\n'), ((11391, 11426), 'numpy.array', 'np.array', (["[b['yaw'] for b in boxes]"], {}), "([b['yaw'] for b in boxes])\n", (11399, 11426), True, 'import numpy as np\n'), ((5823, 5839), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (5827, 5839), False, 'from pathlib import Path\n'), ((5256, 5270), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5260, 5270), False, 'from pathlib import Path\n')]
#!/usr/bin/env python """ Configure folder for Multicolor testing. Hazen 01/18 """ import argparse import inspect import numpy import os import pickle import subprocess import storm_analysis import storm_analysis.sa_library.parameters as parameters import storm_analysis.sa_library.sa_h5py as saH5Py import storm_analysis.simulator.background as background import storm_analysis.simulator.camera as camera import storm_analysis.simulator.drift as drift import storm_analysis.simulator.photophysics as photophysics import storm_analysis.simulator.psf as psf import storm_analysis.simulator.simulate as simulate import storm_analysis.sCMOS.scmos_analysis as scmos import storm_analysis.diagnostics.multicolor.settings as settings def testingParametersSCMOS(): """ Create a sCMOS parameters object. """ params = parameters.ParametersSCMOS() params.setAttr("max_frame", "int", -1) params.setAttr("start_frame", "int", -1) params.setAttr("background_sigma", "float", 8.0) params.setAttr("camera_calibration", "filename", "calib.npy") params.setAttr("find_max_radius", "int", 5) params.setAttr("foreground_sigma", "float", 1.5) params.setAttr("iterations", "int", settings.iterations) params.setAttr("model", "string", "2dfixed") params.setAttr("pixel_size", "float", settings.pixel_size) params.setAttr("sigma", "float", 150.0/settings.pixel_size) params.setAttr("threshold", "float", 6.0) # Don't do tracking. params.setAttr("descriptor", "string", "1") params.setAttr("radius", "float", "0.0") # Don't do drift-correction. params.setAttr("d_scale", "int", 2) params.setAttr("drift_correction", "int", 0) params.setAttr("frame_step", "int", 500) params.setAttr("z_correction", "int", 0) return params def testingParametersMC(): """ Create a Multiplane parameters object. """ params = parameters.ParametersMultiplaneArb() params.setAttr("max_frame", "int", -1) params.setAttr("start_frame", "int", -1) params.setAttr("background_sigma", "float", 8.0) params.setAttr("find_max_radius", "int", 2) params.setAttr("independent_heights", "int", settings.independent_heights) params.setAttr("iterations", "int", settings.iterations) params.setAttr("mapping", "filename", "map.map") params.setAttr("no_fitting", "int", 0) params.setAttr("pixel_size", "float", settings.pixel_size) params.setAttr("sigma", "float", 1.5) params.setAttr("threshold", "float", 6.0) params.setAttr("weights", "filename", "weights.npy") params.setAttr("z_value", "float-array", settings.z_value) params.setAttr("channel0_cal", "filename", "calib.npy") params.setAttr("channel1_cal", "filename", "calib.npy") params.setAttr("channel2_cal", "filename", "calib.npy") params.setAttr("channel3_cal", "filename", "calib.npy") params.setAttr("channel0_ext", "string", "_c1.dax") params.setAttr("channel1_ext", "string", "_c2.dax") params.setAttr("channel2_ext", "string", "_c3.dax") params.setAttr("channel3_ext", "string", "_c4.dax") params.setAttr("channel0_offset", "int", 0) params.setAttr("channel1_offset", "int", 0) params.setAttr("channel2_offset", "int", 0) params.setAttr("channel3_offset", "int", 0) params.setAttr("spline0", "filename", "c1_psf.spline") params.setAttr("spline1", "filename", "c2_psf.spline") params.setAttr("spline2", "filename", "c3_psf.spline") params.setAttr("spline3", "filename", "c4_psf.spline") # Do tracking (localization color analysis depends on the tracks). params.setAttr("descriptor", "string", "1") params.setAttr("radius", "float", "1.0") params.setAttr("max_z", "float", str(0.001 * settings.psf_z_range)) params.setAttr("min_z", "float", str(-0.001 * settings.psf_z_range)) # Don't do drift-correction. params.setAttr("d_scale", "int", 2) params.setAttr("drift_correction", "int", 0) params.setAttr("frame_step", "int", 500) params.setAttr("z_correction", "int", 0) return params def configure(): # Get relevant paths. mm_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/micrometry/" mp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/multi_plane/" sp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/spliner/" # Create analysis XML files. # print("Creating XML files.") params = testingParametersSCMOS() params.toXMLFile("scmos.xml") params = testingParametersMC() params.toXMLFile("multicolor.xml") # Useful variables aoi_size = int(settings.psf_size/2)+1 # Create sCMOS data and HDF5 files we'll need for the simulation. # if True: # Create sCMOS camera calibration files. # numpy.save("calib.npy", [numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset, numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance, numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain, 1]) # Create localization on a grid file. # print("Creating gridded localizations.") sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/" subprocess.call(["python", sim_path + "emitters_on_grid.py", "--bin", "grid_list.hdf5", "--nx", str(settings.nx), "--ny", str(settings.ny), "--spacing", "20", "--zrange", str(settings.test_z_range), "--zoffset", str(settings.test_z_offset)]) # Create randomly located localizations file (for STORM movies). # print("Creating random localizations.") subprocess.call(["python", sim_path + "emitters_uniform_random.py", "--bin", "random_storm.hdf5", "--density", "1.0", "--margin", str(settings.margin), "--sx", str(settings.x_size), "--sy", str(settings.y_size), "--zrange", str(settings.test_z_range)]) # Create randomly located localizations file (for mapping measurement). # print("Creating random localizations.") subprocess.call(["python", sim_path + "emitters_uniform_random.py", "--bin", "random_map.hdf5", "--density", "0.0003", "--margin", str(settings.margin), "--sx", str(settings.x_size), "--sy", str(settings.y_size)]) # Create sparser grid for PSF measurement. # print("Creating data for PSF measurement.") sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/" subprocess.call(["python", sim_path + "emitters_on_grid.py", "--bin", "psf_list.hdf5", "--nx", "6", "--ny", "3", "--spacing", "40"]) ## This part makes / tests measuring the mapping. ## if True: print("Measuring mapping.") # Make localization files for simulations. # locs = saH5Py.loadLocalizations("random_map.hdf5") locs["z"][:] = 1.0e-3 * settings.z_planes[0] saH5Py.saveLocalizations("c1_random_map.hdf5", locs) for i in range(1,4): locs["x"] += settings.dx locs["y"] += settings.dy locs["z"][:] = settings.z_planes[i] saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs) # Make localization files for simulations. # locs = saH5Py.loadLocalizations("random_map.hdf5") locs["z"][:] = 1.0e-3 * settings.z_planes[0] saH5Py.saveLocalizations("c1_random_map.hdf5", locs) for i in range(1,4): locs["x"] += settings.dx locs["y"] += settings.dy locs["z"][:] = settings.z_planes[i] saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs) # Make simulated mapping data. # bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10) cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy") pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0) psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size) sim = simulate.Simulate(background_factory = bg_f, camera_factory = cam_f, photophysics_factory = pp_f, psf_factory = psf_f, x_size = settings.x_size, y_size = settings.y_size) for i in range(4): sim.simulate("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_random_map.hdf5", 1) # Analyze simulated mapping data # for i in range(4): scmos.analyze("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_map.hdf5", "scmos.xml") # Measure mapping. # for i in range(3): subprocess.call(["python", mm_path + "micrometry.py", "--locs1", "c1_map.hdf5", "--locs2", "c" + str(i+2) + "_map.hdf5", "--results", "c1_c" + str(i+2) + "_map.map", "--no_plots"]) # Merge mapping. # subprocess.call(["python", mm_path + "merge_maps.py", "--results", "map.map", "--maps", "c1_c2_map.map", "c1_c3_map.map", "c1_c4_map.map"]) # Print mapping. # if True: print("Mapping is:") subprocess.call(["python", mp_path + "print_mapping.py", "--mapping", "map.map"]) print("") # Check that mapping is close to what we expect (within 5%). # with open("map.map", 'rb') as fp: mappings = pickle.load(fp) for i in range(3): if not numpy.allclose(mappings["0_" + str(i+1) + "_x"], numpy.array([settings.dx*(i+1), 1.0, 0.0]), rtol = 0.05, atol = 0.05): print("X mapping difference for channel", i+1) if not numpy.allclose(mappings["0_" + str(i+1) + "_y"], numpy.array([settings.dy*(i+1), 0.0, 1.0]), rtol = 0.05, atol = 0.05): print("Y mapping difference for channel", i+1) ## This part measures / test the PSF measurement. ## if True: # Create drift file, this is used to displace the localizations in the # PSF measurement movie. # dz = numpy.arange(-settings.psf_z_range, settings.psf_z_range + 0.05, 0.01) drift_data = numpy.zeros((dz.size, 3)) drift_data[:,2] = dz numpy.savetxt("drift.txt", drift_data) # Also create the z-offset file. # z_offset = numpy.ones((dz.size, 2)) z_offset[:,1] = dz numpy.savetxt("z_offset.txt", z_offset) # Create simulated data for PSF measurements. # bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10) cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy") drift_f = lambda s, x, y, h5 : drift.DriftFromFile(s, x, y, h5, "drift.txt") pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0) psf_f = lambda s, x, y, h5 : psf.PupilFunction(s, x, y, h5, settings.pixel_size, []) sim = simulate.Simulate(background_factory = bg_f, camera_factory = cam_f, drift_factory = drift_f, photophysics_factory = pp_f, psf_factory = psf_f, x_size = settings.x_size, y_size = settings.y_size) if True: for i in range(4): sim.simulate("c" + str(i+1) + "_zcal.dax", "c" + str(i+1) + "_random_map.hdf5", dz.size) # Get localizations to use for PSF measurement. # subprocess.call(["python", mp_path + "psf_localizations.py", "--bin", "c1_map_ref.hdf5", "--map", "map.map", "--aoi_size", str(aoi_size)]) # Create PSF z stacks. # for i in range(4): subprocess.call(["python", mp_path + "psf_zstack.py", "--movie", "c" + str(i+1) + "_zcal.dax", "--bin", "c1_map_ref_c" + str(i+1) + "_psf.hdf5", "--zstack", "c" + str(i+1) + "_zstack", "--scmos_cal", "calib.npy", "--aoi_size", str(aoi_size)]) # Measure PSF. # for i in range(4): subprocess.call(["python", mp_path + "measure_psf.py", "--zstack", "c" + str(i+1) + "_zstack.npy", "--zoffsets", "z_offset.txt", "--psf_name", "c" + str(i+1) + "_psf_normed.psf", "--z_range", str(settings.psf_z_range), "--normalize"]) ## This part creates the splines. ## if True: print("Measuring Splines.") for i in range(4): subprocess.call(["python", sp_path + "psf_to_spline.py", "--psf", "c" + str(i+1) + "_psf_normed.psf", "--spline", "c" + str(i+1) + "_psf.spline", "--spline_size", str(settings.psf_size)]) ## This part measures the Cramer-Rao weights. ## if True: print("Calculating weights.") subprocess.call(["python", mp_path + "plane_weighting.py", "--background", str(settings.photons[0][0]), "--photons", str(settings.photons[0][1]), "--output", "weights.npy", "--xml", "multicolor.xml", "--no_plots"]) if (__name__ == "__main__"): configure()
[ "storm_analysis.simulator.camera.SCMOS", "storm_analysis.simulator.drift.DriftFromFile", "numpy.array", "numpy.arange", "storm_analysis.simulator.simulate.Simulate", "storm_analysis.sa_library.sa_h5py.loadLocalizations", "storm_analysis.simulator.psf.GaussianPSF", "inspect.getfile", "storm_analysis....
[((832, 860), 'storm_analysis.sa_library.parameters.ParametersSCMOS', 'parameters.ParametersSCMOS', ([], {}), '()\n', (858, 860), True, 'import storm_analysis.sa_library.parameters as parameters\n'), ((1922, 1958), 'storm_analysis.sa_library.parameters.ParametersMultiplaneArb', 'parameters.ParametersMultiplaneArb', ([], {}), '()\n', (1956, 1958), True, 'import storm_analysis.sa_library.parameters as parameters\n'), ((7043, 7179), 'subprocess.call', 'subprocess.call', (["['python', sim_path + 'emitters_on_grid.py', '--bin', 'psf_list.hdf5',\n '--nx', '6', '--ny', '3', '--spacing', '40']"], {}), "(['python', sim_path + 'emitters_on_grid.py', '--bin',\n 'psf_list.hdf5', '--nx', '6', '--ny', '3', '--spacing', '40'])\n", (7058, 7179), False, 'import subprocess\n'), ((7469, 7512), 'storm_analysis.sa_library.sa_h5py.loadLocalizations', 'saH5Py.loadLocalizations', (['"""random_map.hdf5"""'], {}), "('random_map.hdf5')\n", (7493, 7512), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((7574, 7626), 'storm_analysis.sa_library.sa_h5py.saveLocalizations', 'saH5Py.saveLocalizations', (['"""c1_random_map.hdf5"""', 'locs'], {}), "('c1_random_map.hdf5', locs)\n", (7598, 7626), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((7935, 7978), 'storm_analysis.sa_library.sa_h5py.loadLocalizations', 'saH5Py.loadLocalizations', (['"""random_map.hdf5"""'], {}), "('random_map.hdf5')\n", (7959, 7978), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((8040, 8092), 'storm_analysis.sa_library.sa_h5py.saveLocalizations', 'saH5Py.saveLocalizations', (['"""c1_random_map.hdf5"""', 'locs'], {}), "('c1_random_map.hdf5', locs)\n", (8064, 8092), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((8733, 8899), 'storm_analysis.simulator.simulate.Simulate', 'simulate.Simulate', ([], {'background_factory': 'bg_f', 'camera_factory': 'cam_f', 'photophysics_factory': 'pp_f', 'psf_factory': 'psf_f', 'x_size': 'settings.x_size', 'y_size': 'settings.y_size'}), '(background_factory=bg_f, camera_factory=cam_f,\n photophysics_factory=pp_f, psf_factory=psf_f, x_size=settings.x_size,\n y_size=settings.y_size)\n', (8750, 8899), True, 'import storm_analysis.simulator.simulate as simulate\n'), ((9785, 9928), 'subprocess.call', 'subprocess.call', (["['python', mm_path + 'merge_maps.py', '--results', 'map.map', '--maps',\n 'c1_c2_map.map', 'c1_c3_map.map', 'c1_c4_map.map']"], {}), "(['python', mm_path + 'merge_maps.py', '--results',\n 'map.map', '--maps', 'c1_c2_map.map', 'c1_c3_map.map', 'c1_c4_map.map'])\n", (9800, 9928), False, 'import subprocess\n'), ((11023, 11093), 'numpy.arange', 'numpy.arange', (['(-settings.psf_z_range)', '(settings.psf_z_range + 0.05)', '(0.01)'], {}), '(-settings.psf_z_range, settings.psf_z_range + 0.05, 0.01)\n', (11035, 11093), False, 'import numpy\n'), ((11115, 11140), 'numpy.zeros', 'numpy.zeros', (['(dz.size, 3)'], {}), '((dz.size, 3))\n', (11126, 11140), False, 'import numpy\n'), ((11178, 11216), 'numpy.savetxt', 'numpy.savetxt', (['"""drift.txt"""', 'drift_data'], {}), "('drift.txt', drift_data)\n", (11191, 11216), False, 'import numpy\n'), ((11288, 11312), 'numpy.ones', 'numpy.ones', (['(dz.size, 2)'], {}), '((dz.size, 2))\n', (11298, 11312), False, 'import numpy\n'), ((11348, 11387), 'numpy.savetxt', 'numpy.savetxt', (['"""z_offset.txt"""', 'z_offset'], {}), "('z_offset.txt', z_offset)\n", (11361, 11387), False, 'import numpy\n'), ((11894, 12083), 'storm_analysis.simulator.simulate.Simulate', 'simulate.Simulate', ([], {'background_factory': 'bg_f', 'camera_factory': 'cam_f', 'drift_factory': 'drift_f', 'photophysics_factory': 'pp_f', 'psf_factory': 'psf_f', 'x_size': 'settings.x_size', 'y_size': 'settings.y_size'}), '(background_factory=bg_f, camera_factory=cam_f,\n drift_factory=drift_f, photophysics_factory=pp_f, psf_factory=psf_f,\n x_size=settings.x_size, y_size=settings.y_size)\n', (11911, 12083), True, 'import storm_analysis.simulator.simulate as simulate\n'), ((4186, 4217), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (4201, 4217), False, 'import inspect\n'), ((4266, 4297), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (4281, 4297), False, 'import inspect\n'), ((4347, 4378), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (4362, 4378), False, 'import inspect\n'), ((8419, 8472), 'storm_analysis.simulator.background.UniformBackground', 'background.UniformBackground', (['s', 'x', 'y', 'h5'], {'photons': '(10)'}), '(s, x, y, h5, photons=10)\n', (8447, 8472), True, 'import storm_analysis.simulator.background as background\n'), ((8512, 8550), 'storm_analysis.simulator.camera.SCMOS', 'camera.SCMOS', (['s', 'x', 'y', 'h5', '"""calib.npy"""'], {}), "(s, x, y, h5, 'calib.npy')\n", (8524, 8550), True, 'import storm_analysis.simulator.camera as camera\n'), ((8587, 8630), 'storm_analysis.simulator.photophysics.AlwaysOn', 'photophysics.AlwaysOn', (['s', 'x', 'y', 'h5', '(20000.0)'], {}), '(s, x, y, h5, 20000.0)\n', (8608, 8630), True, 'import storm_analysis.simulator.photophysics as photophysics\n'), ((8668, 8717), 'storm_analysis.simulator.psf.GaussianPSF', 'psf.GaussianPSF', (['s', 'x', 'y', 'i3', 'settings.pixel_size'], {}), '(s, x, y, i3, settings.pixel_size)\n', (8683, 8717), True, 'import storm_analysis.simulator.psf as psf\n'), ((10081, 10166), 'subprocess.call', 'subprocess.call', (["['python', mp_path + 'print_mapping.py', '--mapping', 'map.map']"], {}), "(['python', mp_path + 'print_mapping.py', '--mapping',\n 'map.map'])\n", (10096, 10166), False, 'import subprocess\n'), ((10359, 10374), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (10370, 10374), False, 'import pickle\n'), ((11489, 11542), 'storm_analysis.simulator.background.UniformBackground', 'background.UniformBackground', (['s', 'x', 'y', 'h5'], {'photons': '(10)'}), '(s, x, y, h5, photons=10)\n', (11517, 11542), True, 'import storm_analysis.simulator.background as background\n'), ((11582, 11620), 'storm_analysis.simulator.camera.SCMOS', 'camera.SCMOS', (['s', 'x', 'y', 'h5', '"""calib.npy"""'], {}), "(s, x, y, h5, 'calib.npy')\n", (11594, 11620), True, 'import storm_analysis.simulator.camera as camera\n'), ((11660, 11705), 'storm_analysis.simulator.drift.DriftFromFile', 'drift.DriftFromFile', (['s', 'x', 'y', 'h5', '"""drift.txt"""'], {}), "(s, x, y, h5, 'drift.txt')\n", (11679, 11705), True, 'import storm_analysis.simulator.drift as drift\n'), ((11742, 11785), 'storm_analysis.simulator.photophysics.AlwaysOn', 'photophysics.AlwaysOn', (['s', 'x', 'y', 'h5', '(20000.0)'], {}), '(s, x, y, h5, 20000.0)\n', (11763, 11785), True, 'import storm_analysis.simulator.photophysics as photophysics\n'), ((11823, 11878), 'storm_analysis.simulator.psf.PupilFunction', 'psf.PupilFunction', (['s', 'x', 'y', 'h5', 'settings.pixel_size', '[]'], {}), '(s, x, y, h5, settings.pixel_size, [])\n', (11840, 11878), True, 'import storm_analysis.simulator.psf as psf\n'), ((5359, 5390), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (5374, 5390), False, 'import inspect\n'), ((6986, 7017), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (7001, 7017), False, 'import inspect\n'), ((4891, 4938), 'numpy.zeros', 'numpy.zeros', (['(settings.y_size, settings.x_size)'], {}), '((settings.y_size, settings.x_size))\n', (4902, 4938), False, 'import numpy\n'), ((4998, 5044), 'numpy.ones', 'numpy.ones', (['(settings.y_size, settings.x_size)'], {}), '((settings.y_size, settings.x_size))\n', (5008, 5044), False, 'import numpy\n'), ((5106, 5152), 'numpy.ones', 'numpy.ones', (['(settings.y_size, settings.x_size)'], {}), '((settings.y_size, settings.x_size))\n', (5116, 5152), False, 'import numpy\n'), ((10471, 10517), 'numpy.array', 'numpy.array', (['[settings.dx * (i + 1), 1.0, 0.0]'], {}), '([settings.dx * (i + 1), 1.0, 0.0])\n', (10482, 10517), False, 'import numpy\n'), ((10673, 10719), 'numpy.array', 'numpy.array', (['[settings.dy * (i + 1), 0.0, 1.0]'], {}), '([settings.dy * (i + 1), 0.0, 1.0])\n', (10684, 10719), False, 'import numpy\n')]
from django.test import TestCase from django.urls import reverse from django_netjsongraph.tests import CreateGraphObjectsMixin from django_netjsongraph.tests.base.test_admin import TestAdminMixin from openwisp_users.tests.utils import TestOrganizationMixin from openwisp_utils.tests.utils import TestMultitenantAdminMixin from . import CreateOrgMixin from ..apps import OpenwispNetworkTopologyConfig as appconfig from ..models import Link, Node, Topology class TestAdmin(CreateGraphObjectsMixin, CreateOrgMixin, TestAdminMixin, TestCase): topology_model = Topology link_model = Link node_model = Node @property def prefix(self): return 'admin:{0}'.format(appconfig.label) def setUp(self): org = self._create_org() t = self._create_topology(organization=org) self._create_node(label="node1", addresses="192.168.0.1;", topology=t, organization=org) self._create_node(label="node2", addresses="192.168.0.2;", topology=t, organization=org) super(TestAdmin, self).setUp() class TestMultitenantAdmin(CreateGraphObjectsMixin, TestMultitenantAdminMixin, TestOrganizationMixin, TestCase): topology_model = Topology node_model = Node link_model = Link operator_permission_filters = [ {'codename__endswith': 'topology'}, {'codename__endswith': 'node'}, {'codename__endswith': 'link'}, ] def _create_multitenancy_test_env(self): org1 = self._create_org(name='test1org') org2 = self._create_org(name='test2org') inactive = self._create_org(name='inactive-org', is_active=False) operator = self._create_operator(organizations=[org1, inactive]) t1 = self._create_topology(label='topology1org', organization=org1) t2 = self._create_topology(label='topology2org', organization=org2) t3 = self._create_topology(label='topology3org', organization=inactive) n11 = self._create_node(label='node1org1', topology=t1, organization=org1) n12 = self._create_node(label='node2org1', topology=t1, organization=org1) n21 = self._create_node(label='node1org2', topology=t2, organization=org2) n22 = self._create_node(label='node2org2', topology=t2, organization=org2) n31 = self._create_node(label='node1inactive', topology=t3, organization=inactive) n32 = self._create_node(label='node2inactive', topology=t3, organization=inactive) l1 = self._create_link(topology=t1, organization=org1, source=n11, target=n12) l2 = self._create_link(topology=t2, organization=org2, source=n21, target=n22) l3 = self._create_link(topology=t3, organization=inactive, source=n31, target=n32) data = dict(t1=t1, t2=t2, t3_inactive=t3, n11=n11, n12=n12, l1=l1, n21=n21, n22=n22, l2=l2, n31=n31, n32=n32, l3_inactive=l3, org1=org1, org2=org2, inactive=inactive, operator=operator) return data def test_topology_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_topology_changelist'), visible=[data['t1'].label, data['org1'].name], hidden=[data['t2'].label, data['org2'].name, data['t3_inactive'].label] ) def test_topology_organization_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_topology_add'), visible=[data['org1'].name], hidden=[data['org2'].name, data['inactive']], select_widget=True ) def test_node_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_node_changelist'), visible=[data['n11'].label, data['n12'].label, data['org1'].name], hidden=[data['n21'].label, data['n22'].label, data['org2'].name, data['n31'].label, data['n32'].label, data['inactive']] ) def test_node_organization_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_node_add'), visible=[data['org1'].name], hidden=[data['org2'].name, data['inactive']], select_widget=True ) def test_link_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_link_changelist'), visible=[str(data['l1']), data['org1'].name], hidden=[str(data['l2']), data['org2'].name, str(data['l3_inactive'])] ) def test_link_organization_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_link_add'), visible=[data['org1'].name], hidden=[data['org2'].name, data['inactive']], select_widget=True ) def test_node_topology_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_node_add'), visible=[data['t1'].label], hidden=[data['t2'].label, data['t3_inactive'].label] ) def test_link_topology_fk_queryset(self): data = self._create_multitenancy_test_env() self._test_multitenant_admin( url=reverse('admin:topology_link_add'), visible=[data['t1'].label], hidden=[data['t2'].label, data['t3_inactive'].label] ) def test_node_topology_filter(self): data = self._create_multitenancy_test_env() t_special = self._create_topology(label='special', organization=data['org1']) self._test_multitenant_admin( url=reverse('admin:topology_node_changelist'), visible=[data['t1'].label, t_special.label], hidden=[data['t2'].label, data['t3_inactive'].label] ) def test_link_topology_filter(self): data = self._create_multitenancy_test_env() t_special = self._create_topology(label='special', organization=data['org1']) self._test_multitenant_admin( url=reverse('admin:topology_link_changelist'), visible=[data['t1'].label, t_special.label], hidden=[data['t2'].label, data['t3_inactive'].label] )
[ "django.urls.reverse" ]
[((3659, 3704), 'django.urls.reverse', 'reverse', (['"""admin:topology_topology_changelist"""'], {}), "('admin:topology_topology_changelist')\n", (3666, 3704), False, 'from django.urls import reverse\n'), ((4040, 4078), 'django.urls.reverse', 'reverse', (['"""admin:topology_topology_add"""'], {}), "('admin:topology_topology_add')\n", (4047, 4078), False, 'from django.urls import reverse\n'), ((4361, 4402), 'django.urls.reverse', 'reverse', (['"""admin:topology_node_changelist"""'], {}), "('admin:topology_node_changelist')\n", (4368, 4402), False, 'from django.urls import reverse\n'), ((4803, 4837), 'django.urls.reverse', 'reverse', (['"""admin:topology_node_add"""'], {}), "('admin:topology_node_add')\n", (4810, 4837), False, 'from django.urls import reverse\n'), ((5120, 5161), 'django.urls.reverse', 'reverse', (['"""admin:topology_link_changelist"""'], {}), "('admin:topology_link_changelist')\n", (5127, 5161), False, 'from django.urls import reverse\n'), ((5490, 5524), 'django.urls.reverse', 'reverse', (['"""admin:topology_link_add"""'], {}), "('admin:topology_link_add')\n", (5497, 5524), False, 'from django.urls import reverse\n'), ((5819, 5853), 'django.urls.reverse', 'reverse', (['"""admin:topology_node_add"""'], {}), "('admin:topology_node_add')\n", (5826, 5853), False, 'from django.urls import reverse\n'), ((6123, 6157), 'django.urls.reverse', 'reverse', (['"""admin:topology_link_add"""'], {}), "('admin:topology_link_add')\n", (6130, 6157), False, 'from django.urls import reverse\n'), ((6508, 6549), 'django.urls.reverse', 'reverse', (['"""admin:topology_node_changelist"""'], {}), "('admin:topology_node_changelist')\n", (6515, 6549), False, 'from django.urls import reverse\n'), ((6917, 6958), 'django.urls.reverse', 'reverse', (['"""admin:topology_link_changelist"""'], {}), "('admin:topology_link_changelist')\n", (6924, 6958), False, 'from django.urls import reverse\n')]
import csv def read_regressor_examples(num_of_features, num_of_decisions, file_path): xs = [] ys = [] with open(file_path, mode='r', encoding='utf-8') as file: reader = csv.reader(file, delimiter=' ') for row in reader: x = [float(value) for value in row[0 : num_of_features]] y = [float(value) for value in row[num_of_features : num_of_features + num_of_decisions]] xs.append(x) ys.append(y) return { 'x': xs, 'y': ys }
[ "csv.reader" ]
[((192, 223), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""" """'}), "(file, delimiter=' ')\n", (202, 223), False, 'import csv\n')]
''' utils.py General utility functions: unit conversions, great-circle distances, CSV queries, platform-independent web browsing. ''' import csv import math import webbrowser # UNIT CONVERSIONS MPS_TO_KTS = 1.944 class units: def mps_to_kts(mps): return mps*MPS_TO_KTS def enforceTwoDigits(numStr): if len(numStr) == 1: return "0"+numStr return numStr def enforceDigitsLeading(numStr, maxDig): digits = len(numStr) if digits < maxDig: for i in range(maxDig-digits): numStr = "0" + numStr return numStr def enforceDigitsTrailing(numStr, maxDig): digits = len(numStr) if digits < maxDig: for i in range(maxDig-digits): numStr = numStr + "0" return numStr class geo: def nearestSea(lat, lon): # true if a is inside the range [b, c] def within(a, b, c): if b > c: c,b=b,c return a >= b and a <= c def inBbox(e): lat0,lon0 = float(e['lat0']),float(e['lon0']) lat1,lon1 = float(e['lat1']),float(e['lon1']) clat,clon = float(e['clat']),float(e['clon']) dist = geo.dist_coord(lat, lon, clat, clon) return (within(lat, lat0, lat1) and within(lon, lon0, lon1),dist) def saveDist(e, args): e['dist'] = args[0] def sortDist(e): return e['dist'] seas = db.query("./data/worldseas.csv", inBbox, saveDist) seas.sort(key=sortDist) if len(seas) > 0: return seas[0]['name'] return "" def latlon_to_nmea(lat, lon): latDeg = lat latMin = (latDeg - math.floor(latDeg))*60 lonDeg = lon lonMin = (lonDeg - math.floor(lonDeg))*60 if latDeg > 0: latDir = "N" else: latDir = "S" if lonDeg > 0: lonDir = "E" else: lonDir = "W" latMinStr = str(round(latMin,4)) latMinMajorStr = latMinStr[:latMinStr.find(".")] latMinMinorStr = latMinStr[latMinStr.find(".")+1:] latMinMajorStr = units.enforceDigitsLeading(latMinMajorStr, 2) latMinMinorStr = units.enforceDigitsTrailing(latMinMinorStr, 4) latMinStr = latMinMajorStr + "." + latMinMinorStr lonMinStr = str(round(lonMin,4)) lonMinMajorStr = lonMinStr[:lonMinStr.find(".")] lonMinMinorStr = lonMinStr[lonMinStr.find(".")+1:] lonMinMajorStr = units.enforceDigitsLeading(lonMinMajorStr, 2) lonMinMinorStr = units.enforceDigitsTrailing(lonMinMinorStr, 4) lonMinStr = lonMinMajorStr + "." + lonMinMinorStr return str(int(abs(latDeg)))+latMinStr + "," + latDir + "," + str(int(abs(lonDeg)))+lonMinStr + "," + lonDir def deg_to_dms(deg, type='lat'): # source: https://stackoverflow.com/questions/2579535/convert-dd-decimal-degrees-to-dms-degrees-minutes-seconds-in-python decimals, number = math.modf(deg) d = int(number) m = int(decimals * 60) s = (deg - d - m / 60) * 3600.00 compass = { 'lat': ('N','S'), 'lon': ('E','W') } compass_str = compass[type][0 if d >= 0 else 1] return '{}{}ΒΊ{}\'{:.2f}"'.format(compass_str, abs(d), abs(m), abs(s)) def latlon_to_str(lat, lon): return geo.deg_to_dms(lat,'lat'),geo.deg_to_dms(lon,'lon') # distance between two global points in nautical miles def dist_coord(lat1,lon1,lat2,lon2): # source: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude R = 6373.0 # approximate radius of earth in km lat1 = math.radians(lat1) lon1 = math.radians(lon1) lat2 = math.radians(lat2) lon2 = math.radians(lon2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2 c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return 0.539957*R * c # wraps angle to range [0, 360) def wrap_angle(b): deg = b while deg < 0: deg = 360+deg while deg >= 360: deg = deg-360 return deg class webviz: def loadURL(url): webbrowser.open(url) def openseamap(lat, lon): return "https://map.openseamap.org/?zoom=8&lat=" + lat + "&lon=" + lon + "&mlat=" + lat + "&mlon=" + lon + "&layers=BFTFFFTFFTF0FFFFFFFFFF" def pirosail(boatid): return "http://piro.biz/tracker/?2d&marineid=" + boatid def earthwindmap(lat, lon): return "https://earth.nullschool.net/#current/wind/surface/level/orthographic=" + lon + "," + lat + ",3000/loc=" + lon + "," + lat class db: # execute a function on each element of a CSV def execute(csvFile, executeFunc): with open(csvFile, newline='') as csvfile: elements = csv.DictReader(csvfile) for element in elements: executeFunc(element) # return results filtered by a query function, and optionally post-process results def query(csvFile, queryFunc, processFunc=None): results = [] with open(csvFile, newline='') as csvfile: elements = csv.DictReader(csvfile) for element in elements: res = queryFunc(element) if res[0]: if processFunc != None: processFunc(element, res[1:]) results.append(element) return results # return first element matching query function def findFirst(csvFile, queryFunc): with open(csvFile, newline='') as csvfile: elements = csv.DictReader(csvfile) for element in elements: if queryFunc(element): return element return None
[ "csv.DictReader", "math.floor", "math.sqrt", "webbrowser.open", "math.radians", "math.cos", "math.modf", "math.sin" ]
[((3056, 3070), 'math.modf', 'math.modf', (['deg'], {}), '(deg)\n', (3065, 3070), False, 'import math\n'), ((3796, 3814), 'math.radians', 'math.radians', (['lat1'], {}), '(lat1)\n', (3808, 3814), False, 'import math\n'), ((3830, 3848), 'math.radians', 'math.radians', (['lon1'], {}), '(lon1)\n', (3842, 3848), False, 'import math\n'), ((3864, 3882), 'math.radians', 'math.radians', (['lat2'], {}), '(lat2)\n', (3876, 3882), False, 'import math\n'), ((3898, 3916), 'math.radians', 'math.radians', (['lon2'], {}), '(lon2)\n', (3910, 3916), False, 'import math\n'), ((4393, 4413), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (4408, 4413), False, 'import webbrowser\n'), ((5030, 5053), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (5044, 5053), False, 'import csv\n'), ((5364, 5387), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (5378, 5387), False, 'import csv\n'), ((5823, 5846), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (5837, 5846), False, 'import csv\n'), ((1753, 1771), 'math.floor', 'math.floor', (['latDeg'], {}), '(latDeg)\n', (1763, 1771), False, 'import math\n'), ((1824, 1842), 'math.floor', 'math.floor', (['lonDeg'], {}), '(lonDeg)\n', (1834, 1842), False, 'import math\n'), ((3983, 4001), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (3991, 4001), False, 'import math\n'), ((4090, 4102), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (4099, 4102), False, 'import math\n'), ((4104, 4120), 'math.sqrt', 'math.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (4113, 4120), False, 'import math\n'), ((4007, 4021), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (4015, 4021), False, 'import math\n'), ((4024, 4038), 'math.cos', 'math.cos', (['lat2'], {}), '(lat2)\n', (4032, 4038), False, 'import math\n'), ((4041, 4059), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (4049, 4059), False, 'import math\n')]
from flask import Flask from flask import request, render_template, redirect from datetime import datetime from pymongo import MongoClient import html import random import json import ast from flask.ext.pymongo import PyMongo from flask import make_response, request, current_app from functools import update_wrapper app = Flask(__name__) mongo = PyMongo(app) app.config['MONGO_HOST'] = 'localhost' app.config['MONGO_PORT'] = 27017 app.config['MONGO_DBNAME'] = 'chirrup' mClient = MongoClient('localhost',27017) collection = mClient['chirrup']['tweets'] @app.route('/', methods=['GET','POST']) def home(): if request.method=='POST': var = request.form['query'] return redirect('/'+var, code=302) else: distincthashtags = collection.distinct("hashtags") return render_template("home.html",distincthashtags=distincthashtags) @app.route('/<input>', methods=['GET','POST']) def analyze(input): hashtag = input country_sentiment_query = list(collection.aggregate([{"$match":{"hashtags":hashtag}},{"$group":{'_id':'$country',"avgsentiment": {"$avg":"$sentiment"}}}])) average_sentiment_query = list(collection.aggregate([{"$match":{"hashtags":hashtag}},{"$group":{'_id':'sentiment',"avgsentiment": {"$avg":"$sentiment"}}}])) if len(average_sentiment_query)==0: return render_template('fourohfour.html') country_wise_sentiment = json.dumps(country_sentiment_query) average_sentiment = json.dumps(average_sentiment_query[0]) sorter = [('timestamp', 1)] last_ten_tweets = list(collection.find({"hashtags":hashtag},{'timestamp':0, '_id': 0}).sort(sorter))[:10] return render_template("analysis.html",country_wise_sentiment=country_wise_sentiment, average_sentiment=average_sentiment, hashtag=hashtag, last_ten_tweets=last_ten_tweets) if __name__=="__main__": app.run(debug=True)
[ "flask.render_template", "flask.Flask", "json.dumps", "flask.redirect", "pymongo.MongoClient", "flask.ext.pymongo.PyMongo" ]
[((323, 338), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (328, 338), False, 'from flask import Flask\n'), ((348, 360), 'flask.ext.pymongo.PyMongo', 'PyMongo', (['app'], {}), '(app)\n', (355, 360), False, 'from flask.ext.pymongo import PyMongo\n'), ((484, 515), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {}), "('localhost', 27017)\n", (495, 515), False, 'from pymongo import MongoClient\n'), ((1395, 1430), 'json.dumps', 'json.dumps', (['country_sentiment_query'], {}), '(country_sentiment_query)\n', (1405, 1430), False, 'import json\n'), ((1455, 1493), 'json.dumps', 'json.dumps', (['average_sentiment_query[0]'], {}), '(average_sentiment_query[0])\n', (1465, 1493), False, 'import json\n'), ((1647, 1823), 'flask.render_template', 'render_template', (['"""analysis.html"""'], {'country_wise_sentiment': 'country_wise_sentiment', 'average_sentiment': 'average_sentiment', 'hashtag': 'hashtag', 'last_ten_tweets': 'last_ten_tweets'}), "('analysis.html', country_wise_sentiment=\n country_wise_sentiment, average_sentiment=average_sentiment, hashtag=\n hashtag, last_ten_tweets=last_ten_tweets)\n", (1662, 1823), False, 'from flask import request, render_template, redirect\n'), ((692, 721), 'flask.redirect', 'redirect', (["('/' + var)"], {'code': '(302)'}), "('/' + var, code=302)\n", (700, 721), False, 'from flask import request, render_template, redirect\n'), ((804, 867), 'flask.render_template', 'render_template', (['"""home.html"""'], {'distincthashtags': 'distincthashtags'}), "('home.html', distincthashtags=distincthashtags)\n", (819, 867), False, 'from flask import request, render_template, redirect\n'), ((1331, 1365), 'flask.render_template', 'render_template', (['"""fourohfour.html"""'], {}), "('fourohfour.html')\n", (1346, 1365), False, 'from flask import request, render_template, redirect\n')]
# -*- coding: utf-8 -*- """ Created on Wed Jul 28 13:31:06 2021 @author: user24 """ ''' Suchi wo nyuryoku only accepts integer end shuryou creates a graph as image ''' import matplotlib.pyplot as plt cnt = 0 Y = [] while True: ans = input("ζ•°ε€€γ‚’ε…₯εŠ›γ—γ¦γγ γ•γ„ \n-->") if ans == "end": break try: ans_int = int(ans) Y.append(ans_int) cnt += 1 except: print("ζ–‡ε­—εˆ—γ‚’θͺ­γ‚γͺい!数倀をε…₯γ‚Œγ¦γγ γ•γ„γ€‚") except Exception as error: print(error) X = range(0, cnt) plt.plot(X, Y, marker="o", color="r", linestyle="--") plt.savefig("test.png") # plt.xlabel("ε…₯εŠ›ι †η•ͺ") # Japanese char return erro plt.show() # ========================================================================== ''' '''
[ "matplotlib.pyplot.plot", "matplotlib.pyplot.savefig", "matplotlib.pyplot.show" ]
[((504, 557), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'marker': '"""o"""', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(X, Y, marker='o', color='r', linestyle='--')\n", (512, 557), True, 'import matplotlib.pyplot as plt\n'), ((558, 581), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.png"""'], {}), "('test.png')\n", (569, 581), True, 'import matplotlib.pyplot as plt\n'), ((631, 641), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (639, 641), True, 'import matplotlib.pyplot as plt\n')]
from flanexport import FlanExport, timeout_after import os import ast try: from boto.sqs import connection from boto.sqs.message import Message except: pass class AWSSQS(FlanExport): def __init__(self, meta, config): name = self.__class__.__name__ super().__init__(name, meta, config) @timeout_after(10) def prepare(self): aws_access_key_id = self._getsetting('aws_access_key_id', checkenv=True) aws_secret_access_key = self._getsetting('aws_secret_access_key', checkenv=True) is_secure = self._getsetting('is_secure', erroronnone=False, defaultvalue=True) port = self._getsetting('port', erroronnone=False) proxy = self._getsetting('proxy', erroronnone=False) proxy_port = self._getsetting('proxy_port', erroronnone=False) proxy_user = self._getsetting('proxy_user', erroronnone=False) proxy_pass = self._getsetting('proxy_pass', erroronnone=False) region = self._getsetting('region', erroronnone=False) path = self._getsetting('region', defaultvalue="/") security_token = self._getsetting('security_token', erroronnone=False) validate_certs = self._getsetting('region', defaultvalue=True) profile_name = self._getsetting('profile_name', erroronnone=False) queue_name = self._getsetting('queue_name', erroronnone=True, defaultvalue="flan") sqs_message_attributes = self._getsetting('sqs_message_attributes', erroronnone=False) if sqs_message_attributes: self.sqs_message_attributes = ast.literal_eval(sqs_message_attributes) else: self.sqs_message_attributes = {} try: self.conn = connection.SQSConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=is_secure, port=port, proxy=proxy, proxy_port=proxy_port, proxy_user=proxy_user, proxy_pass=proxy_pass, region=region, path=path, security_token=security_token, validate_certs=validate_certs, profile_name=profile_name ) self.sender = self.conn.create_queue(queue_name, self._getsetting('timeout')) except Exception as e: self.logerr('Flan->%s connection to %s:%s failed: %s' % (self.name, self.config["host"], self.config["port"], str(e))) os._exit(1) @timeout_after(10) def send(self, data): try: m = Message() m.message_attributes = self.sqs_message_attributes m.set_body(data) self.sender.write(m) except Exception as e: self.logerr('Flan->%s delivery failed: %s' % (self.name, str(e))) pass return @property def closed(self): return False @timeout_after(10) def close(self): try: self.conn.close() except: pass return
[ "boto.sqs.connection.SQSConnection", "flanexport.timeout_after", "ast.literal_eval", "boto.sqs.message.Message", "os._exit" ]
[((327, 344), 'flanexport.timeout_after', 'timeout_after', (['(10)'], {}), '(10)\n', (340, 344), False, 'from flanexport import FlanExport, timeout_after\n'), ((2625, 2642), 'flanexport.timeout_after', 'timeout_after', (['(10)'], {}), '(10)\n', (2638, 2642), False, 'from flanexport import FlanExport, timeout_after\n'), ((3038, 3055), 'flanexport.timeout_after', 'timeout_after', (['(10)'], {}), '(10)\n', (3051, 3055), False, 'from flanexport import FlanExport, timeout_after\n'), ((1570, 1610), 'ast.literal_eval', 'ast.literal_eval', (['sqs_message_attributes'], {}), '(sqs_message_attributes)\n', (1586, 1610), False, 'import ast\n'), ((1707, 2060), 'boto.sqs.connection.SQSConnection', 'connection.SQSConnection', ([], {'aws_access_key_id': 'aws_access_key_id', 'aws_secret_access_key': 'aws_secret_access_key', 'is_secure': 'is_secure', 'port': 'port', 'proxy': 'proxy', 'proxy_port': 'proxy_port', 'proxy_user': 'proxy_user', 'proxy_pass': 'proxy_pass', 'region': 'region', 'path': 'path', 'security_token': 'security_token', 'validate_certs': 'validate_certs', 'profile_name': 'profile_name'}), '(aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key, is_secure=is_secure, port=\n port, proxy=proxy, proxy_port=proxy_port, proxy_user=proxy_user,\n proxy_pass=proxy_pass, region=region, path=path, security_token=\n security_token, validate_certs=validate_certs, profile_name=profile_name)\n', (1731, 2060), False, 'from boto.sqs import connection\n'), ((2698, 2707), 'boto.sqs.message.Message', 'Message', ([], {}), '()\n', (2705, 2707), False, 'from boto.sqs.message import Message\n'), ((2607, 2618), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (2615, 2618), False, 'import os\n')]
import numpy as np import scipy.spatial as spatial def bilinear_interpolate(img, coords): """ Interpolates over every image channel http://en.wikipedia.org/wiki/Bilinear_interpolation :param img: max 3 channel image :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords :returns: array of interpolated pixels with same shape as coords """ int_coords = np.int32(coords) x0, y0 = int_coords dx, dy = coords - int_coords # 4 Neighour pixels q11 = img[y0, x0] q21 = img[y0, x0+1] q12 = img[y0+1, x0] q22 = img[y0+1, x0+1] btm = q21.T * dx + q11.T * (1 - dx) top = q22.T * dx + q12.T * (1 - dx) inter_pixel = top * dy + btm * (1 - dy) return inter_pixel.T def grid_coordinates(points): """ x,y grid coordinates within the ROI of supplied points :param points: points to generate grid coordinates :returns: array of (x, y) coordinates """ xmin = np.min(points[:, 0]) xmax = np.max(points[:, 0]) + 1 ymin = np.min(points[:, 1]) ymax = np.max(points[:, 1]) + 1 return np.asarray([(x, y) for y in range(ymin, ymax) for x in range(xmin, xmax)], np.uint32) def process_warp(src_img, result_img, tri_affines, dst_points, delaunay): """ Warp each triangle from the src_image only within the ROI of the destination image (points in dst_points). """ roi_coords = grid_coordinates(dst_points) # indices to vertices. -1 if pixel is not in any triangle roi_tri_indices = delaunay.find_simplex(roi_coords) for simplex_index in range(len(delaunay.simplices)): coords = roi_coords[roi_tri_indices == simplex_index] num_coords = len(coords) out_coords = np.dot(tri_affines[simplex_index], np.vstack((coords.T, np.ones(num_coords)))) x, y = coords.T result_img[y, x] = bilinear_interpolate(src_img, out_coords) return None def triangular_affine_matrices(vertices, src_points, dest_points): """ Calculate the affine transformation matrix for each triangle (x,y) vertex from dest_points to src_points :param vertices: array of triplet indices to corners of triangle :param src_points: array of [x, y] points to landmarks for source image :param dest_points: array of [x, y] points to landmarks for destination image :returns: 2 x 3 affine matrix transformation for a triangle """ ones = [1, 1, 1] for tri_indices in vertices: src_tri = np.vstack((src_points[tri_indices, :].T, ones)) dst_tri = np.vstack((dest_points[tri_indices, :].T, ones)) mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :] yield mat def warp_image(src_img, src_points, dest_points, dest_shape, dtype=np.uint8): # Resultant image will not have an alpha channel num_chans = 3 src_img = src_img[:, :, :3] rows, cols = dest_shape[:2] result_img = np.zeros((rows, cols, num_chans), dtype) delaunay = spatial.Delaunay(dest_points) tri_affines = np.asarray(list(triangular_affine_matrices( delaunay.simplices, src_points, dest_points))) process_warp(src_img, result_img, tri_affines, dest_points, delaunay) return result_img def test_local(): from functools import partial import cv2 import scipy.misc import locator import aligner from matplotlib import pyplot as plt # Load source image face_points_func = partial(locator.face_points, '../data') base_path = '../females/Screenshot 2015-03-04 17.11.12.png' src_path = '../females/BlDmB5QCYAAY8iw.jpg' src_img = cv2.imread(src_path) # Define control points for warps src_points = face_points_func(src_path) base_img = cv2.imread(base_path) base_points = face_points_func(base_path) size = (600, 500) src_img, src_points = aligner.resize_align(src_img, src_points, size) base_img, base_points = aligner.resize_align(base_img, base_points, size) result_points = locator.weighted_average_points(src_points, base_points, 0.2) # Perform transform dst_img1 = warp_image(src_img, src_points, result_points, size) dst_img2 = warp_image(base_img, base_points, result_points, size) import blender ave = blender.weighted_average(dst_img1, dst_img2, 0.6) mask = blender.mask_from_points(size, result_points) blended_img = blender.poisson_blend(dst_img1, dst_img2, mask) plt.subplot(2, 2, 1) plt.imshow(ave) plt.subplot(2, 2, 2) plt.imshow(dst_img1) plt.subplot(2, 2, 3) plt.imshow(dst_img2) plt.subplot(2, 2, 4) plt.imshow(blended_img) plt.show() if __name__ == "__main__": test_local()
[ "matplotlib.pyplot.imshow", "numpy.ones", "numpy.int32", "numpy.min", "numpy.max", "matplotlib.pyplot.subplot", "numpy.zeros", "numpy.linalg.inv", "functools.partial", "locator.weighted_average_points", "blender.weighted_average", "scipy.spatial.Delaunay", "numpy.vstack", "blender.mask_fro...
[((381, 397), 'numpy.int32', 'np.int32', (['coords'], {}), '(coords)\n', (389, 397), True, 'import numpy as np\n'), ((906, 926), 'numpy.min', 'np.min', (['points[:, 0]'], {}), '(points[:, 0])\n', (912, 926), True, 'import numpy as np\n'), ((970, 990), 'numpy.min', 'np.min', (['points[:, 1]'], {}), '(points[:, 1])\n', (976, 990), True, 'import numpy as np\n'), ((2801, 2841), 'numpy.zeros', 'np.zeros', (['(rows, cols, num_chans)', 'dtype'], {}), '((rows, cols, num_chans), dtype)\n', (2809, 2841), True, 'import numpy as np\n'), ((2856, 2885), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['dest_points'], {}), '(dest_points)\n', (2872, 2885), True, 'import scipy.spatial as spatial\n'), ((3292, 3331), 'functools.partial', 'partial', (['locator.face_points', '"""../data"""'], {}), "(locator.face_points, '../data')\n", (3299, 3331), False, 'from functools import partial\n'), ((3452, 3472), 'cv2.imread', 'cv2.imread', (['src_path'], {}), '(src_path)\n', (3462, 3472), False, 'import cv2\n'), ((3565, 3586), 'cv2.imread', 'cv2.imread', (['base_path'], {}), '(base_path)\n', (3575, 3586), False, 'import cv2\n'), ((3676, 3723), 'aligner.resize_align', 'aligner.resize_align', (['src_img', 'src_points', 'size'], {}), '(src_img, src_points, size)\n', (3696, 3723), False, 'import aligner\n'), ((3750, 3799), 'aligner.resize_align', 'aligner.resize_align', (['base_img', 'base_points', 'size'], {}), '(base_img, base_points, size)\n', (3770, 3799), False, 'import aligner\n'), ((3818, 3879), 'locator.weighted_average_points', 'locator.weighted_average_points', (['src_points', 'base_points', '(0.2)'], {}), '(src_points, base_points, 0.2)\n', (3849, 3879), False, 'import locator\n'), ((4063, 4112), 'blender.weighted_average', 'blender.weighted_average', (['dst_img1', 'dst_img2', '(0.6)'], {}), '(dst_img1, dst_img2, 0.6)\n', (4087, 4112), False, 'import blender\n'), ((4122, 4167), 'blender.mask_from_points', 'blender.mask_from_points', (['size', 'result_points'], {}), '(size, result_points)\n', (4146, 4167), False, 'import blender\n'), ((4184, 4231), 'blender.poisson_blend', 'blender.poisson_blend', (['dst_img1', 'dst_img2', 'mask'], {}), '(dst_img1, dst_img2, mask)\n', (4205, 4231), False, 'import blender\n'), ((4235, 4255), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (4246, 4255), True, 'from matplotlib import pyplot as plt\n'), ((4258, 4273), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ave'], {}), '(ave)\n', (4268, 4273), True, 'from matplotlib import pyplot as plt\n'), ((4276, 4296), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4287, 4296), True, 'from matplotlib import pyplot as plt\n'), ((4299, 4319), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst_img1'], {}), '(dst_img1)\n', (4309, 4319), True, 'from matplotlib import pyplot as plt\n'), ((4322, 4342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (4333, 4342), True, 'from matplotlib import pyplot as plt\n'), ((4345, 4365), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst_img2'], {}), '(dst_img2)\n', (4355, 4365), True, 'from matplotlib import pyplot as plt\n'), ((4368, 4388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (4379, 4388), True, 'from matplotlib import pyplot as plt\n'), ((4392, 4415), 'matplotlib.pyplot.imshow', 'plt.imshow', (['blended_img'], {}), '(blended_img)\n', (4402, 4415), True, 'from matplotlib import pyplot as plt\n'), ((4418, 4428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4426, 4428), True, 'from matplotlib import pyplot as plt\n'), ((936, 956), 'numpy.max', 'np.max', (['points[:, 0]'], {}), '(points[:, 0])\n', (942, 956), True, 'import numpy as np\n'), ((1000, 1020), 'numpy.max', 'np.max', (['points[:, 1]'], {}), '(points[:, 1])\n', (1006, 1020), True, 'import numpy as np\n'), ((2397, 2444), 'numpy.vstack', 'np.vstack', (['(src_points[tri_indices, :].T, ones)'], {}), '((src_points[tri_indices, :].T, ones))\n', (2406, 2444), True, 'import numpy as np\n'), ((2459, 2507), 'numpy.vstack', 'np.vstack', (['(dest_points[tri_indices, :].T, ones)'], {}), '((dest_points[tri_indices, :].T, ones))\n', (2468, 2507), True, 'import numpy as np\n'), ((2534, 2556), 'numpy.linalg.inv', 'np.linalg.inv', (['dst_tri'], {}), '(dst_tri)\n', (2547, 2556), True, 'import numpy as np\n'), ((1737, 1756), 'numpy.ones', 'np.ones', (['num_coords'], {}), '(num_coords)\n', (1744, 1756), True, 'import numpy as np\n')]
from setuptools import setup, find_packages from distutils.core import setup from Cython.Build import cythonize setup(name="mcmc", ext_modules=cythonize("./src/koleksyon/mcmc.pyx"))
[ "Cython.Build.cythonize" ]
[((144, 181), 'Cython.Build.cythonize', 'cythonize', (['"""./src/koleksyon/mcmc.pyx"""'], {}), "('./src/koleksyon/mcmc.pyx')\n", (153, 181), False, 'from Cython.Build import cythonize\n')]
import face_embedding import argparse import cv2 import numpy as np parser = argparse.ArgumentParser(description='face model test') # general parser.add_argument('--image-size', default='112,112', help='') parser.add_argument('--model', default='../models/model-r34-amf/model,0', help='path to load model.') parser.add_argument('--gpu', default=None, type=int, help='gpu id') parser.add_argument('--det', default=2, type=int, help='mtcnn option, 2 means using R+O, else using O') parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug') parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold') args = parser.parse_args() if __name__ == '__main__': model = face_embedding.FaceModel(args) img = cv2.imread('/Users/aub3/1.jpg') f1 = model.get_feature(img) img = cv2.imread('/Users/aub3/2.jpg') f2 = model.get_feature(img) img = cv2.imread('/Users/aub3/3.jpg') f3 = model.get_feature(img) dist1 = np.sum(np.square(f1-f2)) dist2 = np.sum(np.square(f1-f3)) print(dist1,dist2)
[ "face_embedding.FaceModel", "cv2.imread", "argparse.ArgumentParser", "numpy.square" ]
[((78, 132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""face model test"""'}), "(description='face model test')\n", (101, 132), False, 'import argparse\n'), ((718, 748), 'face_embedding.FaceModel', 'face_embedding.FaceModel', (['args'], {}), '(args)\n', (742, 748), False, 'import face_embedding\n'), ((759, 790), 'cv2.imread', 'cv2.imread', (['"""/Users/aub3/1.jpg"""'], {}), "('/Users/aub3/1.jpg')\n", (769, 790), False, 'import cv2\n'), ((833, 864), 'cv2.imread', 'cv2.imread', (['"""/Users/aub3/2.jpg"""'], {}), "('/Users/aub3/2.jpg')\n", (843, 864), False, 'import cv2\n'), ((907, 938), 'cv2.imread', 'cv2.imread', (['"""/Users/aub3/3.jpg"""'], {}), "('/Users/aub3/3.jpg')\n", (917, 938), False, 'import cv2\n'), ((990, 1008), 'numpy.square', 'np.square', (['(f1 - f2)'], {}), '(f1 - f2)\n', (999, 1008), True, 'import numpy as np\n'), ((1027, 1045), 'numpy.square', 'np.square', (['(f1 - f3)'], {}), '(f1 - f3)\n', (1036, 1045), True, 'import numpy as np\n')]