blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
616d099190bfe442c95489b5d0e8adabc3d94ba5 | 2bd41f2d0d4ac0ffda3fd83504c410c59360e17f | /api_basic/urls.py | 5e7ddb73631e56b10be9dfa123207215f327f93b | [] | no_license | bsperezb/Django-API-1 | 6e34316abc4447cc90e90e52126497142e0a88b2 | 5149b1b1553244bdc5b2c4542532ab19083d2b7f | refs/heads/master | 2023-06-19T10:55:36.531441 | 2021-07-18T07:01:05 | 2021-07-18T07:01:05 | 384,979,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py |
from django.urls import path, include
from .views import article_list, article_detail, ArticleAPIView, ArticleDetails, GenericAPIView, ArticleViewSet, GenericAPIViewSet,ArticleModleViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('article', ArticleViewSet, basename='article')
router2 = DefaultRouter()
router2.register('article', GenericAPIViewSet, basename='article')
ArticleModleViewSet
router3 = DefaultRouter()
router3.register('article', ArticleModleViewSet, basename='article')
urlpatterns = [
path('modelviewset/', include(router3.urls)),
path('modelviewset/<int:pk>', include(router3.urls)),
path('genericviewset/', include(router2.urls)),
path('genericviewset/<int:pk>', include(router2.urls)),
path('viewset/', include(router.urls)),
path('viewset/<int:pk>/', include(router.urls)),
# path('article/', article_list),
path('article/', ArticleAPIView.as_view()),
# path('detail/<int:pk>/', article_detail)
path('detail/<int:id>/', ArticleDetails.as_view()),
path('generic/article/<int:id>/', GenericAPIView.as_view()),
] | [
"brayanperezbuitrago@gmail.com"
] | brayanperezbuitrago@gmail.com |
cc45974788b4c903867236a1b7f7e11984d3e59a | 19cec240505e27546cb9b10104ecb16cc2454702 | /linux/lang/python/flume/flmos.py | 1e2f7237d84356a384547d47f79bf2e46c0c1cdc | [] | no_license | imosts/flume | 1a9b746c5f080c826c1f316a8008d8ea1b145a89 | a17b987c5adaa13befb0fd74ac400c8edbe62ef5 | refs/heads/master | 2021-01-10T09:43:03.931167 | 2016-03-09T12:09:53 | 2016-03-09T12:09:53 | 53,101,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,843 | py |
"""High level wrappers for wrapping auto-swig-generated code."""
#
# still import the fli._* stuff with this.
#
import flume_internal as fli
import flume
import posix
import errno
import os
import struct
##-----------------------------------------------------------------------
def raise_err (s=None):
"""Raise an error based on what the global Flume Error flag
reported was the error."""
if s is None:
s = ""
if flume.get_errno () == flume.ENOMEM:
raise MemoryError, "out of memory in flumeclient code"
elif flume.get_errno () == flume.ENULL:
raise ValueError, "NULL value encountered"
elif flume.get_errno () == flume.EPERM:
ss = []
if (s):
ss.append (s)
s2 = flume.get_errstr ()
if (s2):
ss.append (s2)
if len(ss) > 0:
s = '; '.join (ss)
else:
s = "permission denied"
raise flume.PermissionError, s
elif flume.get_errno () == flume.EINVAL:
raise ValueError, "Unexpected valued encountered: '%s'" %s
elif flume.get_errno () == flume.ERANGE:
raise IndexError, "Value out of range"
elif flume.get_errno () == flume.EHANDLE:
raise flume.HandleError, "Could not parse handle: '%s'" % s
elif flume.get_errno () == flume.ECAPABILITY:
raise flume.CapabilityError, "Expected a capability"
elif flume.get_errno () == flume.EROFS:
raise flume.ReadOnlyError, "Attempted write to read-only FS"
elif flume.get_errno () == flume.EEXPIRED:
raise flume.ExpirationError, "expiration encountered"
elif flume.get_errno () == flume.EATTR:
raise flume.ExtattrError, "Extended attributes error encountered"
elif flume.get_errno () == flume.EPERSISTENCE:
raise flume.PersistenceError, "Persistence error encountered: %s" % s
else:
ec = flume.sysErrno ()
if ec is not None:
raise OSError (ec, s)
else:
raise Exception, ("Unhandled flume error (%s) on %s" % \
(str (flume.get_errno ()), s))
##-----------------------------------------------------------------------
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
##-----------------------------------------------------------------------
def orderBits (bitset, order):
"""Given a bitset and a canonical ordering, return the set bits in the
given order."""
ret = []
for b in order:
if b & bitset != 0:
ret += [ b ]
return ret
##-----------------------------------------------------------------------
class Argv (fli._Argv):
"""A wrapper around char *argv[] things that you'll see passed to
execve and so on."""
def __init__ (self, l):
if l is None:
fli._Argv.__init__ (self, -1)
elif type(l) is list:
self.init_from_list (l)
else:
self.init_from_dict (l)
def init_from_dict (self, d):
l = [ str(k) + "=" + str (d[k]) for k in d.keys () ]
self.init_from_list (l)
def init_from_list (self, l):
fli._Argv.__init__ (self, len (l))
i = 0
for a in l:
self._set (i, a)
i += 1
##-----------------------------------------------------------------------
class RawData (fli._RawData):
"""A wrapper class around raw XDR data."""
def __init__ (self, dat = None, armored = False):
fli._RawData.__init__ (self)
if type (dat) is str:
if armored:
self.dearmor (dat)
else:
if self._init (dat) < 0:
raise_err ("error in RawData allocation");
elif isinstance (dat, fli._RawData):
if self._copy (dat) < 0:
raise_err ("failed to copy RawData in constructor")
elif dat is not None:
raise TypeError, "wrong type to RawData()"
def __eq__ (self, x):
return isinstance (x, fli._RawData) and self._eq (x)
def dearmor (self, s):
if type (s) is not str:
raise TypeError, "Can only dearmor a string"
if self._dearmor (s) < 0:
raise_err ("Cannot dearmor given string")
return self
##-----------------------------------------------------------------------
class Token (fli._Token):
"""A wrapper class around the autogenerated Swig Tokens."""
def __init__ (self, s):
fli._Token.__init__ (self, s)
def dearmor (self, s):
if fli._Token.dearmor (self, s) < 0:
raise_err ("dearmor Token")
##-----------------------------------------------------------------------
class Handle (fli._Handle):
"""A wrapper class around an autogenerated-Swig handle, to
iterface with some Python-specific features."""
# default value can't be None, since that's an error result
# from some syscalls
def __init__(self, x = 0, nm = None):
fli._Handle.__init__ (self, 0, None)
if x is None:
raise_err ("handle failure")
elif isinstance (x, fli._Handle):
self._init (x.val (), x.name ())
elif type(x) is long or type(x) is int:
self._init (x, nm)
elif isinstance (x, fli._RawData):
self.fromRaw (x)
elif type (x) is str:
h = Handle ()
h.dearmor32(x)
self._init (h.val (), nm)
else:
raise TypeError, "Bad arguments to constructor"
def fromRaw (self, x):
if self._from_raw (x) < 0:
raise_err ("Cannot convert handle from raw repr")
def toRaw (self):
return RawData (self._to_raw ())
def thaw (self):
return Label (fli._thaw_handle (self))
def __str__ (self):
"""Convert a handle to a human-readable string."""
if self.name () is not None:
return self.name () + " (0x%016x)" % self.val ()
else:
return "0x%016x" % self.val ()
def dearmor32(self, s):
"""Dearmor a given handle."""
rc = self._dearmor32 (s)
if rc < 0:
raise_err (flume.get_errstr ())
def toCapability (self, opt):
return Handle (self._to_capability (opt))
def toCapabilities (self):
# Figure out what type of handle this is, and return a list of
# the corresponding capabilities
m = ((flume.HANDLE_OPT_GROUP, [flume.CAPABILITY_GROUP_SELECT]),
(flume.HANDLE_OPT_DEFAULT_ADD, [flume.CAPABILITY_SUBTRACT]),
(flume.HANDLE_OPT_DEFAULT_SUBTRACT, [flume.CAPABILITY_ADD]),
(0, [flume.CAPABILITY_ADD, flume.CAPABILITY_SUBTRACT]))
all_opts = reduce (lambda x,y: x|y, [z[0] for z in m])
for opt, caps in m:
if (self.prefix () & all_opts) == opt:
return [self.toCapability (cap) for cap in caps]
raise ValueError ("%s does not look like a valid capability" % (self,))
def toTag (self):
"""Return a Handle with the Capability bits cleared"""
# If it's a wrap capability, return self.
if ((self.prefix () & (flume.CAPABILITY_GROUP_SELECT | flume.HANDLE_OPT_IDENTIFIER)) ==
flume.CAPABILITY_GROUP_SELECT | flume.HANDLE_OPT_IDENTIFIER):
return self
# otherwise, clear the capability bits
xhandle = Handle.mk ( ~(flume.CAPABILITY_ADD |
flume.CAPABILITY_SUBTRACT |
flume.CAPABILITY_GROUP_SELECT) & self.prefix (),
self.base ())
return Handle (xhandle)
def __hash__ (self):
return hash (self.val ())
def __eq__ (self, other):
return self.val () == other.val ()
def __ne__ (self, other):
return not self.__eq__ (other)
##-----------------------------------------------------------------------
class Int (fli._Int):
"""A wrapper class around an Int, for the purposes of making an
array of ints..."""
def __init__ (self, x = 0):
if x is None:
raise_err ("int failure")
if isinstance (x, fli._Int):
v = x.val ()
elif type (x) is long or type (x) is int:
v = x
elif type (x) is str:
v = int (x)
else:
raise TypeError, "Bad arguments to constructor"
fli._Int.__init__ (self, v)
def __str__ (self):
return "%d" % self.val ()
def to_int (self):
return self.val ()
##-----------------------------------------------------------------------
class _PyObjSetWrapperIter:
"""A simple iterator for _PyObjSetWrappers."""
def __init__ (self, osw):
self._osw = osw
self._i = 0
def next (self):
i = self._i
if i >= len (self._osw):
raise StopError
self._i =+ 1
return self._osw.get (i)
##-----------------------------------------------------------------------
class _PyObjSetWrapper:
"""A layer that goes between python objects and C++ Swig objects
derived from _ObjSetWrapper<>."""
def __init__ (self, obj, py_class, swig_class, py_obj_class, name):
self._py_class = py_class
self._swig_class = swig_class
self._py_obj_class = py_obj_class
self._class_name = name
self._swig_class.__init__ (self)
if obj is None:
raise_err ("%s allocation failure" % name)
elif type (obj) is self._swig_class:
self.copy (obj)
elif isinstance (obj, fli._RawData):
self.fromRaw (obj)
elif type (obj) is set:
self.fromList (list (obj))
elif type (obj) is list or type (obj) is tuple:
self.fromList (obj)
elif type (obj) is int:
if obj >= 0:
self.resize (obj)
else:
raise TypeError, "bad argument to constructor"
def __iter__ (self):
return _PyObjSetWrapperIter (self)
def __len__ (self):
return self._size ()
def size (self):
return self._size ()
def clone (self):
return self._py_class (self._clone ())
def get (self, i):
return self._py_obj_class (self._get (i))
def copy (self, i):
if self._copy (i) < 0:
raise_err ("%s::copy" % self._class_name)
def resize (self, i):
if self._resize (i) < 0:
raise_err ("%s::resize" % self._class_name)
def set (self, i, l):
if self._set (i, l) < 0:
raise_err ("%s::set" % self._class_name)
def fromList (self, l):
self.resize (len (l))
for i, o in enumerate (l):
self.set (i, o)
def toList (self):
return [ self.get (i) for i in range (len(self)) ]
def fromRaw (self, d):
if self._from_raw (d) < 0:
raise_err ("Error converting object from raw data")
def toRaw (self):
return RawData (self._to_raw ())
def __str__ (self):
return str ([ str(x) for x in self.toList() ])
def __getitem__ (self, key):
l = self.toList ()
return l[key]
def __setitem__ (self, key, value):
l = self.toList ()
l[key] = value
self.fromList (l)
def __delitem__ (self, key):
l = self.toList ()
del (l[key])
self.fromList (l)
def __iter__ (self):
return iter (self.toList ())
def __contains__ (self, item):
return self.toList().__contains__(item)
def _add_ready_list (self, l):
if isinstance(l, self._py_obj_class):
l = [l]
elif isinstance (l, self._py_class):
l = l.toList ()
elif type (l) is list:
for i in l:
if not isinstance (i, self._py_obj_class):
raise TypeError, "expected a list of type: %s" % \
str (self._py_obj_class)
else:
raise TypeError, "cannot add type to list: %s + $s" % \
(str (self), str (l))
return l
def __add__ (self, l):
l = self._add_ready_list (l)
return (self._py_class (self.toList () + l))
def __iadd__ (self, l):
l = self._add_ready_list (l)
self.fromList (self.toList () + l)
return self
def __sub__ (self, l):
s = set (self._add_ready_list (l))
return (self._py_class (set(self.toList ()) - s))
def __isub__ (self, l):
s = set (self._add_ready_list (l))
self.fromList (set(self.toList ()) - s)
return self
##-----------------------------------------------------------------------
class IntArr (_PyObjSetWrapper, fli._IntArr):
def __init__ (self, x = 0):
_PyObjSetWrapper.__init__ (self, x, IntArr, fli._IntArr, Int, "IntArr")
def set (self, i, x):
if type (x) is int or type (x) is long:
x = Int (x)
_PyObjSetWrapper.set (self, i, x)
def toListV (self):
return [ x.val() for x in self.toList() ]
##-----------------------------------------------------------------------
class Label (_PyObjSetWrapper, fli._Label):
"""A wrapper class around an autogenerated-Swig label, to
interface with some Python-specific features."""
# default value can't be None, since that's an error result from
# _get_label()
def __init__(self, x = 0):
_PyObjSetWrapper.__init__ (self, x, Label, fli._Label,
Handle, "Label")
def uniquify (self, seq):
""" Preserves order """
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# Overload fromList and fromRaw to remove duplicates
def fromList (self, l):
l2 = self.uniquify (l)
_PyObjSetWrapper.fromList (self, l2)
def fromRaw (self, d):
_PyObjSetWrapper.fromRaw (self, d)
l = self.uniquify (self.toList ())
self.fromList (l)
def __lt__ (self, b):
return self.toSet().__lt__(b.toSet())
def __le__ (self, b):
return self.toSet().__le__(b.toSet())
def __eq__ (self, b):
return self.toSet().__eq__(b.toSet())
def __ne__(self, b):
return self.toSet().__ne__(b.toSet())
def __gt__ (self, b):
return self.toSet().__gt__(b.toSet())
def __ge__ (self, b):
return self.toSet().__ge__(b.toSet())
def subsetOf (self, rhs, typ):
r = fli._subsetOf (self, LabelVec (rhs), typ)
if r < 0:
raise_err ()
if r == 0:
return False
return True
def freeze (self):
return Handle (fli._Label_freeze (self))
def __str__(self):
return "[" + ", ".join ([str(s) for s in self.toList ()]) + "]"
def toListV (self):
return [ x.val() for x in self.toList() ]
def toSet (self):
return set (self.toListV())
def __contains__(self, h):
if type(h) is int or type(h) is long:
v = h
else:
v = h.val()
return v in self.toSet ()
def pack (self):
l = self.toListV ()
l.sort ()
format = '!' + 'Q' * len (l)
l.insert (0, format)
return struct.pack (*l)
def unpack (s):
if (len (s) % struct.calcsize ('Q')) > 0:
raise AssertionError, ('serialized label, incorrect '
'size %d is not a multiple of %d'
% (len (s), struct.calcsize ('Q')))
format = '!' + 'Q' * (len(s)/struct.calcsize ('Q'))
v = struct.unpack (format, s)
return Label ([Handle (x) for x in v])
unpack = Callable (unpack)
def hashkey (self):
""" Returns a canonicalized string that can be used as a hash
key. We don't implement __hash__ because a labelset is
mutable and implementing __hash__ implies that the underlying
object is immutable."""
values = self.toList ()
values.sort (key=lambda h: h.val ())
return "[" + ", ".join ([str(s) for s in values]) + "]"
##-----------------------------------------------------------------------
class LabelVec (fli._LabelVec):
"""A wrapper class used as an input to subsetOf operations."""
def __init__ (self, labels):
fli._LabelVec.__init__(self, len(labels))
i = 0
for l in labels:
self._set (i, l)
i += 1
##-----------------------------------------------------------------------
class Filter (fli._Filter):
"""A wrapper class around filters."""
def __init__ (self, f):
fli._Filter.__init__ (self)
if type (f) is fli._Filter:
if self.copy (f) < 0:
raise_err ("copy Filter")
else:
raise TypeError, "bad argument to constructor"
def find (self):
return Label (fli._Filter.find (self))
def replace (self):
return Label (fli._Fitler.replace (self))
##-----------------------------------------------------------------------
class Capset (Label):
"""A type of label specifically designed for storing capabilities."""
def __init__ (self, l, priv):
Label.__init__ (self, [ x.toCapability (priv) for x in l.toList() ] )
##-----------------------------------------------------------------------
class Endpoint (fli._Endpoint):
"""A high-level wrapper to the _Endpoint autogenerated by Swig."""
def __init__ (self, ep = -1, I = None, S = None):
fli._Endpoint.__init__(self)
if ep is None:
raise_err ("Endpoint failure")
elif type(ep) is fli._Endpoint:
if self.copy (ep) < 0:
raise_err ("Endpoint copy failure")
elif type(ep) is int and ep == -1:
if I is not None:
fli._Endpoint._set_I (self, I)
if S is not None:
fli._Endpoint._set_S (self, S)
else:
raise TypeError, "bad argument to constructor"
def get_S (self):
return Label (self._get_S())
def get_I (self):
return Label (self._get_I())
def get_desc (self):
ret = self._get_desc ()
if ret is None:
ret = "<none>"
else:
ret = str (ret)
return ret
def toDict (self):
d = { 'readable' : bool (self.getReadable ()),
'writable' : bool (self.getWritable ()),
'mutable' : bool (self.getMutable ()),
'I' : self.get_I(),
'S' : self.get_S(),
'desc' : self.get_desc ()
}
return d
def attrStr (self):
l = [ ('r', self.getReadable ()),
('w', self.getWritable ()),
('m', self.getMutable ()) ]
r = ""
for t in l:
if t[1]:
r += t[0]
return r
def prettyPrint (self, prfx = ""):
pairs = [ ( 'S: ' , self.get_S () ),
( 'I: ' , self.get_I () ),
( 'attrs:' , self.attrStr () ),
( 'desc: ' , self.get_desc () ) ]
# XXX allow endpoints to be named, too
lines = [ "Endpoint {" ] + \
[ " %s %s" % (t[0], t[1]) for t in pairs ] + \
[ "}" ]
return '\n'.join ( [ "%s%s" % (prfx, l) for l in lines ] )
def __str__(self):
return str (dict ([ (k,str (v)) for (k,v) in self.toDict().items()]))
def getMutable (self):
return bool (self._get_mutable ())
def getWritable (self):
return bool (self._get_writable ())
def getReadable (self):
return bool (self._get_readable ())
def setReadable (self, b):
self._set_reable (int (b))
def setWritable (self, b):
self._set_writable (self, int (b))
def setMutable (self, b):
self._set_mutable (self, int (b))
##-----------------------------------------------------------------------
class EndpointSet (_PyObjSetWrapper, fli._EndpointSet):
"""A high-level wrapper around Endpoint sets autogenerated by Swig."""
def __init__ (self, eps = 0):
_PyObjSetWrapper.__init__ (self, eps,
EndpointSet,
fli._EndpointSet,
Endpoint,
"EndpointSet")
def prettyPrint (self):
lst = [ "EndpointSet {" ] + \
[ x.prettyPrint (" ") for x in self.toList ()] + \
[ "}" , '' ]
return '\n'.join (lst)
##-----------------------------------------------------------------------
class LabelSet (fli._LabelSet):
"""A high-level wrapper to the LabelSet autogenerated by Swig."""
# When make label changes from this LabelSet, do it in this
# order. Alex suggests this order.
change_order = [flume.LABEL_O, flume.LABEL_I, flume.LABEL_S]
def __init__(self, ls = -1, I = None, S = None, O = None, armoredraw=False):
fli._LabelSet.__init__(self)
if ls is None:
raise_err ("LabelSet failure")
elif type(ls) is dict:
for l in [ ("I", fli._LabelSet._set_I),
("S", fli._LabelSet._set_S),
("O", fli._LabelSet._set_O) ] :
try:
f = l[1]
f (self, ls[l[0]])
except KeyError:
pass
elif isinstance (ls, fli._LabelSet):
self.copy (ls)
elif isinstance (ls, fli._RawData):
self.fromRaw (ls)
elif type(ls) is int and ls == -1:
if I is not None:
fli._LabelSet._set_I (self, I)
if S is not None:
fli._LabelSet._set_S (self, S)
if O is not None:
fli._LabelSet._set_O (self, O)
elif type (ls) is type ('a'):
if armoredraw:
rd = RawData (ls, True)
self.fromRaw (rd)
else:
ls = fli._filename_to_labelset (ls)
self.copy (ls)
else:
raise TypeError, "bad argument to constructor"
def __le__ (self, b):
return self.get_S() <= b.get_S() and b.get_I() <= self.get_I()
def __ge__ (self, b):
return self.get_S() >= b.get_S() and b.get_I() >= self.get_I()
def __eq__ (self, b):
return self.get_S() == b.get_S() and b.get_I() == self.get_I()
def __ne__ (self, b):
return self.get_S() != b.get_S() or b.get_I() != self.get_I()
def __gt__ (self, b):
return self.get_S() > b.get_S() and b.get_I() > self.get_I()
def __lt__ (self, b):
return self.get_S() < b.get_S() and b.get_I() < self.get_I()
def toDict (self):
return { "I" : self.get_I (),
"S" : self.get_S (),
"O" : self.get_O () }
def toDictEnumKeys (self):
return { flume.LABEL_I : self.get_I (),
flume.LABEL_S : self.get_S (),
flume.LABEL_O : self.get_O () }
def toLabelChangeList (self, which = flume.LABEL_ALL):
ret = []
d = self.toDictEnumKeys ()
for i in orderBits (which, self.change_order):
if d[i] is not None:
ret += [ LabelChange (lab = d[i], which = i) ]
return ret
def apply (self):
LabelChangeSet (self.toLabelChangeList ()).make ()
def apply_ep (self, fd):
LabelChangeSet (self.toLabelChangeList (flume.LABEL_NO_O)).make_ep (fd)
def set_S (self, l=None):
if self._set_S (l) < 0:
raise_err ("label = %s" % l)
def set_I (self, l=None):
if self._set_I (l) < 0:
raise_err ("label = %s" % l)
def set_O (self, l=None):
if self._set_O (l) < 0:
raise_err ("label = %s" % l)
def set_label (self, typ, l=None):
return { flume.LABEL_I : self.set_I,
flume.LABEL_S : self.set_S,
flume.LABEL_O : self.set_O }[typ] (l)
def get_S (self):
return Label (self._get_S ())
def get_I (self):
return Label (self._get_I ())
def get_O (self):
return Label (self._get_O ())
def get_label (self, typ):
return self.toDictEnumKeys ()[typ]
def __str__(self):
return str (dict ([ (k,str(v)) for (k,v) in self.toDict().items() ]))
def to_filename (self):
if len(self.get_S ()) == 0 and len(self.get_I()) == 0 and \
len(self.get_I ()) == 0:
return "0"
ret = self._to_filename ()
if ret is None:
raise_err ("labelset_to_filename")
return ret
def toList (self):
return [self._get_S (), self._get_I (), self._get_O () ]
def fromRaw (self, obj):
if self._from_raw (obj) < 0:
raise_err ("Error converting object from raw data")
def toRaw (self):
return RawData (self._to_raw ())
def armoredraw (self):
return self.toRaw ().armor ()
def hashkey (self):
""" Returns a canonicalized string that can be used as a hash
key. We don't implement __hash__ because a labelset is
mutable and implementing __hash__ implies that the underlying
object is immutable."""
ret = [Label(lab).hashkey() for lab in self.toList ()]
return str(ret)
def clone (self):
ls = LabelSet ()
ls.copy (self)
return ls
##-----------------------------------------------------------------------
class LabelChange (fli._LabelChange):
"""A high-level wrapper around _LabelChange generated by swig."""
def __init__ (self, lab = None, which = fli.LABEL_NONE):
"""lab can be either a LabelChange or a Label object, and this
construction should do the right thing. Pass a LabelChange as
part a copy constructor, and pass a Label if calling from somewhere
external to this function."""
fli._LabelChange.__init__ (self)
if lab is not None:
if type (lab) is fli._LabelChange:
self.copy (lab)
else:
self.setLabel (lab)
if which is not None:
self.setWhich (which)
def getWhich (self):
return fli._LabelChange.get_which (self)
def setWhich (self, i):
return fli._LabelChange.set_which (self, i)
def clone (self):
"""Make a clone of this label change."""
return LabelChange (fli._LabelChange.clone (self))
def setLabel (self, l):
"""Set the label on LabelChange object"""
if fli._LabelChange._set_label (self, l) < 0:
raise_err ("LabelChange::set_label")
def getLabel (self):
return Label (self._get_label ())
def copy (self, l):
if fli._LabelChange._copy (self, l) < 0:
raise_err ("LabelChange::copy")
def __str__ (self):
return str ((str (self.getLabel ()), self.getWhich ()))
def make (self):
"""Actually make this label change."""
set_label (self.getWhich (), self.getLabel ())
def make_ep (self, fd):
"""Actually make this label change to an endpoint."""
set_fd_label (self.getWhich (), fd, self.getLabel ())
##-----------------------------------------------------------------------
class LabelChangeSet (_PyObjSetWrapper, fli._LabelChangeSet):
"""A high level wrapper around _LabelChangeSet generated by Swig."""
def __init__ (self, lcs = []):
_PyObjSetWrapper.__init__ (self, lcs,
LabelChangeSet,
fli._LabelChangeSet,
LabelChange,
"LabelChangeSet")
def _add_arg_to_label_change_list (self, l):
"""Convert the argument to __add__ or __iadd__ to be a list of
label changes."""
if type (l) is list:
l2 = l
elif isinstance (l, LabelChangeSet):
l2 = l.toList ()
elif isinstance (l, LabelSet):
l2 = l.toLabelChangeList ()
else:
raise TypeError, \
"arg to +/+= must be list, LabelChangeSet or LabelSet"
return l2
def __add__ (self, l):
"""Add more label changes to this label change set, without
destructively changing it."""
l1 = self.toList ()
l2 = self._add_arg_to_label_change_list (l)
return LabelChangeSet (l1 + l2)
def make (self):
"""Make the label changes contained in this label set."""
for l in self.toList ():
l.make ()
def make_ep(self, fd):
"""Make the label chagnes to the EP."""
for l in self.toList ():
l.make_ep(fd)
def __iadd__ (self, l):
"""Add more label changes to this label change set, and alter
the underlying LabelChangeSet."""
l2 = self._add_arg_to_label_change_list (l)
oldlen = len(self)
self.resize (oldlen + len (l2))
for i,obj in enumerate (l2):
self.set (oldlen + i, obj)
return self
##-----------------------------------------------------------------------
class CapabilityOp (fli._CapabilityOp):
"""A high-level wrapper around a capability operation that we have
in the SQL library."""
def __init__ (self, h, op=None):
fli._CapabilityOp.__init__ (self)
if isinstance(h, fli._CapabilityOp):
op = h._get_op ()
h = Handle (h._get_h ())
self.set_h (h)
self.set_op (op)
def __str__ (self):
return "(%s,%d)" % (str (self.get_h ()), self.get_op ())
def set_h (self, h):
self._set_h (h)
def set_op (self, op):
self._set_op (op)
def get_h (self):
return Handle (self._get_h ())
def get_op (self):
return self._get_op ()
def toPair (self):
return (self.get_h (), self.get_op ())
##-----------------------------------------------------------------------
class CapabilityOpSet (_PyObjSetWrapper, fli._CapabilityOpSet):
def __init__ (self, cos = None):
_PyObjSetWrapper.__init__ (self, cos,
CapabilityOpSet,
fli._CapabilityOpSet,
CapabilityOp,
"CapabilityOpSet")
def toDict (self):
return dict ([ cop.toPair () for cop in self ] )
##-----------------------------------------------------------------------
##-----------------------------------------------------------------------
##-----------------------------------------------------------------------
def set_label (typ, label=None, frc=True):
"""Set the processes label to the give label. 'typ' should be one
of flume.LABEL_I, flume.LABEL_S, or flume.LABEL_O, and the 'label'
argument should be a flmos.Label object. If given None, we will try
to clear the label. On success no return code; on error, raise an
exception."""
if fli._set_label (typ, label, frc) < 0:
raise_err ("set_label failed")
##-----------------------------------------------------------------------
def set_label2 (O = -1, I = -1, S = -1, frc=True):
"""A slightly more convenient way to set this proc's label. Perform
in the order: O, I, S, as per standard. Note, the -1 hack is because
None are allowed as label types, if we want to clear the label."""
dat = [ ( flume.LABEL_O, O),
( flume.LABEL_I, I),
( flume.LABEL_S, S) ]
for p in dat:
if p[1] is None or isinstance (p[1], Label):
if fli._set_label (p[0], p[1], frc) < 0:
raise_err ("set_label failed")
elif (type(p[1]) == type(-1)) and p[1] == -1:
pass
else:
raise TypeError, 'Invalid argument type to set_label2'
##-----------------------------------------------------------------------
def set_fd_label (typ, fd, label=None):
"""Set a label on the given 'fd'. Provide an 'fd' in addition to the
arguments from set_label."""
if fli._set_fd_label (typ, fd, label) < 0:
raise_err ("set_fd_label failed")
##-----------------------------------------------------------------------
def get_label (typ):
"""Where 'typ' is flume.LABEL_I, flume.LABEL_O or flume.LABEL_S, get the
given label for this process from the reference monitor."""
return Label (fli._get_label (typ))
##-----------------------------------------------------------------------
def get_endpoint_info ():
"""Get the endpoint information for the calling process."""
return EndpointSet (fli._get_endpoint_info())
##-----------------------------------------------------------------------
def get_labelset ():
"""Get the labelset for this process."""
return LabelSet (fli._get_labelset())
##-----------------------------------------------------------------------
def get_fd_label (typ, fd):
"""Same as 'get_label', but get the label for a specific FD."""
l = fli._get_fd_label (typ, fd)
if l is None:
# If l was NULL but the RM returned ENULL, that means that no
# label was set on this fd, which is not an error condition
if flume.get_errno () != flume.ENULL:
raise_err ("get_fd_label failure");
else:
l = Label (l)
return l
##-----------------------------------------------------------------------
def stat_group (h):
"""Determine the label on the object corresponding to the group
given by 'h'."""
ls = fli._stat_group (h)
if ls is None:
raise_err ("error in stat_group (%)" % h)
return LabelSet (ls)
##-----------------------------------------------------------------------
def stat_file (fn):
"""Ask the RM for the labelset on the file given by the filename 'fn'."""
ls = fli._stat_file (fn)
if ls is None:
raise_err ("error in stat_file(%s)" % fn)
return LabelSet (ls)
##-----------------------------------------------------------------------
def new_handle (opts, name):
"""
Create a new handle from the reference monitor, with the given
'opts' options and the given name 'name'. 'opts' should be a
bitwise OR of:
flume.HANDLE_OPT_DEFAULT_ADD
flume.HANDLE_OPT_DEFAULT_SUBTRACT
flume.HANDLE_OPT_PERSISTENT
flume.HANDLE_OPT_IDENTIFIER
Return a new flmos.Handle object, or raise an exception if there
was a problem.
"""
return Handle(fli._new_handle (opts, name))
##-----------------------------------------------------------------------
def new_group (name, ls):
"""
Create a new group of capabilities, given the 'name' of the group
and the flmos.LabelSet object 'ls' to specify the labels on the group.
Return a new handle that identifies the group (generated by the
reference monitor).
"""
h = Handle(fli._new_group (name, ls))
return h
##-----------------------------------------------------------------------
def add_to_group (h, v):
"""
Given the handle 'h' that names a group, add the capabilities in the
list 'v' to the group.
"""
if type (v) is list:
lst = Label (v)
else:
lst = v
if fli._add_to_group (h, lst) < 0:
raise_err ("add (%s) to group (%s) failed" % (str (h), str (lst)))
##-----------------------------------------------------------------------
def unixsocket (fn, c):
fd = fli._unixsocket (fn, c)
if fd < 0:
raise_err ("unixsocket failed: " + fn)
return fd
##-----------------------------------------------------------------------
def unixsocket_connect (fn):
fd = fli._unixsocket_connect (fn)
if fd < 0:
raise_err ()
return fd
##-----------------------------------------------------------------------
def listen (fd, queue_len):
rc = fli._listen (fd, queue_len)
if rc < 0:
raise_err ()
return rc
##-----------------------------------------------------------------------
def accept (fd):
rc = fli._accept (fd)
if rc < 0:
raise_err ()
return rc
##-----------------------------------------------------------------------
def make_login (h, duration=0, fixed=False):
tok = fli._make_login (h, duration, fixed)
if tok is None:
raise_err ("make_login failed")
return tok
##-----------------------------------------------------------------------
def req_privs (h, tok):
rc = fli._req_privs (h, tok)
if rc < 0:
if flume.get_errno () == flume.ENOENT:
raise flume.LoginError, \
"No login token found for pair (%s,'%s')" % (str (h), tok)
elif flume.get_errno () == flume.EEXPIRED:
raise flume.ExpirationError, \
"Login token expired for pair (%s,'%s')" % (str (h), tok)
else:
raise_err ("req_privs (%s,%s)" % (str (h), tok))
##-----------------------------------------------------------------------
def dearmor_token (x):
t = fli._dearmor_token (x)
if t is None:
raise_err ("dearmor")
return t
##-----------------------------------------------------------------------
def setuid_handle ():
h = fli._setuid_handle ()
if h is None:
raise_err ("setuid_handle")
return Handle (h)
##-----------------------------------------------------------------------
def make_nickname (h, name):
rc = fli._make_nickname (h, name)
if rc != 0:
raise_err ("make_nickname")
##-----------------------------------------------------------------------
def writefile (name, data, mode=0600, labelset=None):
flags = posix.O_WRONLY | posix.O_CREAT | posix.O_TRUNC
rc = fli._writefile (name, flags, mode, labelset, data)
if rc < 0:
raise_err ("writefile failed on '%s'" % name)
##-----------------------------------------------------------------------
def open (name, flags='r', mode=0, labelset=None, endpoint=None, bufsize=4096):
py_fl = ""
c_fl = 0
map = { "w" : (posix.O_WRONLY, True),
"r" : (posix.O_RDONLY, True),
"a" : (posix.O_APPEND, True),
"c" : (posix.O_CREAT, False),
"e" : (posix.O_EXCL, False),
"t" : (posix.O_TRUNC, False)}
for c in flags:
p = map.get (c)
if p is None:
raise ValueError, "Unknown flag to open: '%s'" % c
c_fl |= p[0]
if p[1]:
py_fl += c
fd = fli._open (name, c_fl, mode, labelset, endpoint)
if fd < 0:
raise_err ("open failed on file '%s'" % name)
return posix.fdopen (fd, py_fl, bufsize)
##-----------------------------------------------------------------------
def mkdir (path, mode=0700, labelset=None):
rc = fli._mkdir (path, mode, labelset)
if rc < 0:
raise_err ("mkdir failed on file '%s'" % path)
##-----------------------------------------------------------------------
def symlink (contents, newfile, labelset=None):
rc = fli._symlink (contents, newfile, labelset)
if rc < 0:
raise_err ("symlink failed on file '%s'" % newfile)
##-----------------------------------------------------------------------
def optimal_label_changes (from_ls, to_ls):
changelist = []
# get all caps (Do this before adding any I tags, since that might
# prevent us from reading our capability groups).
all_o = set(from_ls.get_O ()) | set (to_ls.get_O ())
changelist.append (LabelChange (lab=Label (all_o), which=flume.LABEL_O))
changelist.append (LabelChange (lab=to_ls.get_S (), which=flume.LABEL_S))
changelist.append (LabelChange (lab=to_ls.get_I (), which=flume.LABEL_I))
changelist.append (LabelChange (lab=to_ls.get_O (), which=flume.LABEL_O))
return LabelChangeSet (changelist)
def spawn (prog, argv, env=None, confined=True, opt=0, claim=None,
I_min=None, endpoint=None, ch_endpoint=None, labelset=None):
"""The original spawn; accepts a labelset for the child process
and returns just a handle -- the pid of the child process --
on success."""
lchanges = None
if labelset is not None:
lchanges = optimal_label_changes (get_labelset (), labelset)
tmp = spawn2 (prog=prog,
argv=argv,
env=env,
confined=confined,
opt=opt,
claim=claim,
lchanges=lchanges,
I_min=I_min,
endpoint=endpoint,
ch_endpoint=ch_endpoint)
if tmp is None:
raise_err ("Spawn failed on program '%s'" % prog)
return tmp[0]
##-----------------------------------------------------------------------
def spawn2 (prog, argv, env=None, confined=True, opt=0, claim=None,
lchanges=None, I_min=None, endpoint=None, ch_endpoint=None):
"""The new spawn procedure. Accepts a vector of label changes
to make, and returns a pair. The first element in the pair
is the pid of the child process (in Handle form). The
second element is the status, either SPAWN_OK=0 or SPAWN_DISAPPEARED."""
cl=None
if env is None:
env = os.environ
if claim is not None:
cl = Label (claim)
if confined:
opt |= fli.SPAWN_CONFINED
tmp = fli._spawn (prog, Argv (argv), Argv (env), opt, cl, lchanges,
I_min, endpoint, ch_endpoint)
if tmp is None:
raise_err ("Spawn failed on program '%s'" % prog)
return (Handle (tmp.gethandle ()), tmp.getfd ())
##-----------------------------------------------------------------------
def socketpair (duplex = fli.DUPLEX_FULL, desc = None):
sp = fli._socketpair (fli.DUPLEX_FULL, desc)
if sp is None:
raise_err ("socketpair")
return (sp.getfd (), Handle (sp.gethandle ()))
##-----------------------------------------------------------------------
def rpipe (desc = None):
return socketpair (fli.DUPLEX_THEM_TO_ME, desc)
##-----------------------------------------------------------------------
def wpipe (desc = None):
return socketpair (fli.DUPLEX_ME_TO_THEM, desc)
##-----------------------------------------------------------------------
def claim (s, desc = None):
fd = fli._claim (Handle (s), desc)
if fd < 0:
raise_err ("claim")
return fd
##-----------------------------------------------------------------------
def waitpid (h=None, opt = 0):
pr = fli._waitpid (h, opt)
if pr is None:
raise_err ("waitpid")
return (Handle (pr._get_pid (), "exitpid"), pr._get_exit_code (),
pr._get_visible())
##-----------------------------------------------------------------------
def apply_filter (name, typ):
f = fli._apply_filter (name, typ)
if f is None:
raise_err ("applying filter: %s" % name)
return Filter (f)
##-----------------------------------------------------------------------
def closed_files ():
if fli._closed_files () < 0:
raise_err ("closed_files")
##-----------------------------------------------------------------------
def unlink (path):
rc = fli._unlink (path)
if rc < 0:
raise_err ("unlink failed on file '%s'" % path)
##-----------------------------------------------------------------------
def set_libc_interposing (v):
rc = fli._set_libc_interposing (v)
if (rc <0):
raise_err ('unable to set libc interposing to %d' % v)
##-----------------------------------------------------------------------
def get_libc_interposing ():
return bool (fli._libc_interposing ())
##-----------------------------------------------------------------------
def myctlsock ():
return fli._ctl_sock ()
##-----------------------------------------------------------------------
def close (fd):
if fli._close (fd) < 0:
raise_err ('error closing fd %d' % fd)
##-----------------------------------------------------------------------
def flume_null ():
r = fli._flume_null ();
if r < 0:
raise_err ('error calling flume_null')
return r
##-----------------------------------------------------------------------
def flume_debug_msg (s):
r = fli._flume_debug_msg (s);
if r < 0:
raise_err ('error calling flume_debug_msg')
return r
##-----------------------------------------------------------------------
def verify_capabilities (fd, ops, caps):
res = fli._verify_capabilities (fd, ops, caps)
if res is None:
raise_err ('error in verify_capabilities')
return CapabilityOpSet (res)
##-----------------------------------------------------------------------
def send_capabilities (fd, caps):
r = fli._send_capabilities (fd, caps)
if r < 0:
raise_err ('error in send_capabilities')
##-----------------------------------------------------------------------
def fork (fds, confined):
if type (fds) is list:
fds = IntArr (fds)
rc = fli._flume_fork (fds, confined)
if rc < 0:
raise_err ('error in flume_fork')
return rc
##-----------------------------------------------------------------------
def setepopt (fd, strict=None, fix=None):
op = val = 0
if fix is not None and fix is True:
op = flume.FLUME_EP_OPT_FIX
val = True
elif strict is not None:
op = flume.FLUME_EP_OPT_STRICT
val = strict
else:
raise TypeError, "unknown option given to setepopt"
rc = fli._setepopt (fd, op, val)
if rc < 0:
raise_err ('error in setepopt')
##-----------------------------------------------------------------------
##-----------------------------------------------------------------------
| [
"imosts"
] | imosts |
abe38816859632c2768996d082bb13e7eae65f3f | 99768ecfa8ef7514636bd0eab5313f186d3eb366 | /documentation/20.04_ParcelsMIT2008/galapagosrun_bwd_4km_2008.py | 49af421effac96197eee7e10390b3b9764260998 | [
"MIT"
] | permissive | SLYpma/GalapagosAnalysis | 7af3f8360b232b6365f466ec1bb5ff1cec667240 | a628cab2290527cbf9e89df886c33f2ba705784e | refs/heads/master | 2022-12-11T09:51:24.096647 | 2020-09-11T13:45:45 | 2020-09-11T13:45:45 | 280,425,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,615 | py | from parcels import FieldSet, Field, ParticleSet, JITParticle, AdvectionRK4, ErrorCode, Variable
from datetime import timedelta as delta
from glob import glob
import numpy as np
import xarray as xr
import os
import warnings
warnings.simplefilter('ignore', category=xr.SerializationWarning)
ddir = "/home/sypmauu/GalapagosProject/data/MIT4km/"
wstokes = True
# set field
varfiles = glob(ddir+'RGEMS3_2008_Surf.nc')
meshfile = glob(ddir+'RGEMS3_Surf_grid.nc')
MITgcm_files = {'U': {'lon': meshfile, 'lat': meshfile, 'data': varfiles},
'V': {'lon': meshfile, 'lat': meshfile, 'data': varfiles}}
MITgcm_variables = {'U': 'UVEL', 'V': 'VVEL'}
MITgcm_dimensions = {'lon': 'XG', 'lat': 'YG', 'time': 'time'}
fieldset_MITgcm = FieldSet.from_c_grid_dataset(MITgcm_files, MITgcm_variables,
MITgcm_dimensions,
# time_periodic=delta(days=366),
tracer_interp_method='cgrid_velocity')
if wstokes:
stokesfiles = sorted(glob('/projects/0/topios/hydrodynamic_data/WaveWatch3data/CFSR/WW3-GLOB-30M_2008*_uss.nc'))
stokesdimensions = {'lat': 'latitude', 'lon': 'longitude', 'time': 'time'}
stokesvariables = {'U': 'uuss', 'V': 'vuss'}
fieldset_stokes = FieldSet.from_netcdf(stokesfiles, stokesvariables,
stokesdimensions)
# time_periodic=delta(days=366))
fieldset_stokes.add_periodic_halo(zonal=True, meridional=False, halosize=5)
fieldset = FieldSet(U=fieldset_MITgcm.U+fieldset_stokes.U,
V=fieldset_MITgcm.V+fieldset_stokes.V)
fU = fieldset.U[0]
fname = "/home/sypmauu/GalapagosProject/results/data_output/galapagosparticles_bwd_4km_2008_wstokes.nc"
else:
fieldset = fieldset_MITgcm
fU = fieldset.U
fname = "/home/sypmauu/GalapagosProject/results/data_output/galapagosparticles_bwd_4km_2008.nc"
fieldset.computeTimeChunk(fU.grid.time[-1], -1)
#initialize where to start particles
galapagos_extent = [-91.8, -89, -1.4, 0.7]
startlon, startlat = np.meshgrid(np.arange(galapagos_extent[0],
galapagos_extent[1],0.2),
np.arange(galapagos_extent[2],
galapagos_extent[3],0.2))
#functions to add to the kernel
def Age(fieldset, particle, time):
particle.age = particle.age + math.fabs(particle.dt)
if particle.age > 300*86400:
particle.delete()
def DeleteParticle(particle, fieldset, time):
particle.delete()
#additional features of the particles
class GalapagosParticle(JITParticle):
age = Variable('age', initial = 0.)
# set particle conditions
pset = ParticleSet(fieldset=fieldset,
pclass=GalapagosParticle,
lon=startlon,
lat=startlat,
time=fU.grid.time[-1],
repeatdt=delta(days=1))
outfile = pset.ParticleFile(name=fname, outputdt=delta(days=1))
pset.execute(AdvectionRK4+pset.Kernel(Age),
runtime=delta(days=30),
dt=delta(hours=-1),
output_file=outfile,
recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle})
pset.repeatdt = None
pset.execute(AdvectionRK4+pset.Kernel(Age),
runtime=delta(days=270),
dt=delta(hours=-1),
output_file=outfile,
recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle})
outfile.export()
outfile.close()
| [
"s.l.ypma@uu.nl"
] | s.l.ypma@uu.nl |
3ed7586381e1664293709ab9dac14351df1831e7 | 6999630ddf8559c9c6bee40a1dfa4a53d2ce4867 | /get_proxy_from_XMX.py | 5b1e4d26017e8ab210b7b69b68e4d87cb4dd843d | [] | no_license | possager/YFZX_new | a6a21cd7a8d6731af5ce87aae9887408472d295a | 057925659a7fcae4179d68cf2e0fca576e1de9f2 | refs/heads/master | 2021-01-02T22:33:00.488949 | 2017-11-27T00:49:47 | 2017-11-27T00:49:47 | 99,334,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | #_*_coding:utf-8_*_
#因为reids在idc上挂了,所以写了这么一个新的代理
import requests
import json
import time
from saveresult import BASIC_FILE
import random
import datetime
class Proxy:
# def __init__(self,url_proxy='http://172.16.2.11:8899'):
def __init__(self,url_proxy='http://172.16.1.4:8899/'):
self.url_proxy=url_proxy
def save_proxy(self):
response=requests.get(self.url_proxy)
jsondata=json.loads(response.text)
file1=BASIC_FILE+'/proxy.txt'
with open(file1,'w') as fl:
json.dump(jsondata,fl,encoding='utf-8')
# json.dump(jsondata,file1)
def get_proxy_couple(self,num):
file1 = BASIC_FILE + '/proxy.txt'
with open(file1,'r') as fl:
datajson=json.load(fl,encoding='utf-8')
if datajson:
# return (str(datajson[num]['ip']),str(datajson[num]['port']))
return str(datajson[num]['ip'])+':'+str(datajson[num]['port'])
# url_proxy='http://192.168.8.52:8899/'
url_proxy='http://172.16.1.4:8899/'#yuancheng
def save_proxy():
while True:
try:
response = requests.get(url_proxy)
jsondata = json.loads(response.text)
file1 = BASIC_FILE + '/proxy.txt'
with open(file1, 'w') as fl:
json.dump(jsondata, fl, encoding='utf-8')
print datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
time.sleep(30)
except Exception as e:
pass
def get_proxy_couple(num):
file1 = BASIC_FILE + '/proxy.txt'
with open(file1,'r') as fl:
datajson=json.load(fl,encoding='utf-8')
if datajson:
# return (str(datajson[num]['ip']),str(datajson[num]['port']))
num=random.randint(0,len(datajson)-1)
return str(datajson[num]['ip'])+':'+str(datajson[num]['port'])
if __name__ == '__main__':
# thisclass=Proxy()
# # thisclass.save_proxy()
# print thisclass.get_proxy_couple(2)
# print get_proxy_couple(2)
save_proxy() | [
"passager@163.com"
] | passager@163.com |
3f8e7ef8454b67aa1b2e3d30b254b7cba057466d | 88e56b76e0382ef3e2ba3275c0a2d2032ee51563 | /datetime.py | b4496bbd467bd0bafe1e1908b6b5ed23b07db1c4 | [] | no_license | tranvanquyetn/t | 07970b2bc9f15e1a48e84772adc0443a6012adbd | 81e092509493bfe329034d8fcc316774c2028f19 | refs/heads/master | 2020-04-28T11:34:37.881816 | 2019-05-17T00:07:41 | 2019-05-17T00:07:41 | 175,246,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import datetime as dt
format = '%Y-%M-%dT%H:%M :%S'
t1= dt.datetime.strptime('2008-10-12T14:45:52', format )
print('Day' + str(t1.day))
print('Month' + str(t1.month))
print('Minute' + str(t1.minute))
print('Second' + str(t1.second))
# Define todays date and time
t2=dt.datetime.now()
diff = t2-t1
print('How many days difference?' + str(diff.days))
| [
"noreply@github.com"
] | tranvanquyetn.noreply@github.com |
7ea1ee1b4b01f183e33c71fc73557e679fa631e8 | bb99dd0358c0751c41f634b1ed1b881361f01c12 | /fun/request.py | c78372ffcc94415c03dbd037d65859bc5a39463a | [
"Apache-2.0"
] | permissive | bluedian/python_wxblog | ffe1af9ec64652ed78794c4c4792c558d7a45489 | 3dd195335744b041d7c71285899bd921e633a04c | refs/heads/master | 2021-10-02T05:45:42.418445 | 2018-11-29T11:05:49 | 2018-11-29T11:05:49 | 112,463,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | # -*- coding:utf-8 -*-
import requests
def getVer():
return 1
def urlToHtml(url, datas=None,getType=True):
r=''
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
}
if getType:
r = requests.get(url, headers=headers, data=datas)
else:
r = requests.post(url, headers=headers, data=datas)
print(r.url)
r.encoding = 'utf-8'
print(r.status_code)
if (r.status_code) != 200:
return False
return r.text
def getHtml(url):
if url is None:
return None
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
}
r = requests.get(url, headers=headers)
r.encoding = r.apparent_encoding
if (r.status_code) != 200:
return r.status_code
return r.text
def updataBlogServer(url, updata):
if url is None:
url = 'http://localhost/blog/api/upblog'
print(url)
r = requests.post(url, data=updata)
r.encoding = r.apparent_encoding
if (r.status_code) != 200:
return r.status_code
return r.text
| [
"6453776@qq.com"
] | 6453776@qq.com |
1d1ccb51f278305324fa6b09edf6d6b69e7c1d48 | 4404da890512f4a366b95c67068a161cfe61a205 | /AAL/data_handling/UCR/DodgerLoopDay/preprocess.py | 047d0dc4b4b148e8bc6f6a9dd9faf552f5c6d848 | [] | no_license | chrStef/fsm-learn | 905aab2e48d3619cba79676e539d9364a2f28557 | df793c004d7aa413c18ce470cd6120c731afa279 | refs/heads/main | 2023-08-14T12:28:54.284608 | 2021-07-14T11:06:03 | 2021-07-14T11:06:03 | 386,683,202 | 0 | 0 | null | 2021-07-16T15:28:50 | 2021-07-16T15:28:49 | null | UTF-8 | Python | false | false | 1,771 | py | train_data_whole = '/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/DodgerLoopDay_TRAIN_SAX_20_ASP.csv'
train_feature1_0 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_0', 'w')
train_feature1_1 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_1', 'w')
train_feature1_2 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_2', 'w')
train_feature1_3 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_3', 'w')
train_feature1_4 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_4', 'w')
train_feature1_5 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_5', 'w')
train_feature1_6 = open('/home/nkatz/dev/datasets_asp_wayeb_04062021/selected_UCR/DodgerLoopDay/feature1_6', 'w')
file1 = open(train_data_whole, 'r')
lines = file1.readlines()
for line in lines:
label = line[len(line) - 4]
if "feature1" in line:
if label == '0':
train_feature1_0.write(line)
elif label == '1':
train_feature1_1.write(line)
elif label == '2':
train_feature1_2.write(line)
elif label == '3':
train_feature1_3.write(line)
elif label == '4':
train_feature1_4.write(line)
elif label == '5':
train_feature1_5.write(line)
elif label == '6':
train_feature1_6.write(line)
else:
pass
else:
pass
train_feature1_0.close()
train_feature1_1.close()
train_feature1_2.close()
train_feature1_3.close()
train_feature1_4.close()
train_feature1_5.close()
train_feature1_6.close()
| [
"nkatz@iit.demokritos.gr"
] | nkatz@iit.demokritos.gr |
f33a8e9b1f08ce34ab0bb55ede87c52172b0ab05 | 8215ed11c0998c55344b4db0b76199a7f7305192 | /MLP-Classifier/pishing.py | 486600ea44e26ca78bb0e0d3e8af47a96fbb1969 | [] | no_license | camilolaiton/Artificial_Intelligence | 443058549fae9bb76001627d808764d635d3f3f0 | f4c4d04c5eaff155d9a818f62b7b1b444ef16ce3 | refs/heads/master | 2021-07-11T10:48:26.864863 | 2020-06-19T01:37:26 | 2020-06-19T01:37:26 | 151,106,283 | 11 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,922 | py |
import pandas as pd # Libreria usada para procesamiento de data, y para leer los datos
import seaborn as sns # Libreria para visualizar los datos
import matplotlib.pyplot as plt # Libreria para visualizacion
import numpy as np # Libreria usada para algebra lineal
import warnings
from sklearn.model_selection import train_test_split #Para la data
from sklearn.neural_network import MLPClassifier # Red neuronal
from sklearn.neural_network import MLPRegressor
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error #Error
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import GridSearchCV
pishing = pd.read_csv("dataset.csv")
#Informacion de los datos
pishing.info()
ax=plt.subplots(1,1,figsize=(10,8))
sns.countplot('Result',data=pishing)
plt.title("Pishing attributes data")
#plt.show()
pishing.hist(edgecolor='black', linewidth=1.2)
fig=plt.gcf()
#fig.set_size_inches(12,12)
#plt.show()
"""
pishing = pishing.drop('id',axis=1)
box_data = pishing #variable representing the data array
box_target = pishing.Result #variable representing the labels array
sns.boxplot(data = box_data,width=0.5,fliersize=5)
#sns.set(rc={'figure.figsize':(2,15)})
plt.show()
"""
X = pishing.iloc[:, 1:10]
f, ax = plt.subplots(figsize=(10, 8))
corr = X.corr()
print(corr)
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),square=True, ax=ax, linewidths=.5)
#plt.show()
X = pishing.iloc[:, 1:-1].values # voy desde inicio al final en filas , todos menos el ultimo
y = pishing.iloc[:, 10].values # Todo, cantidad de caracteristicas
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20) # Consejo machine learning 80% entrenamiento 20% prueba
print(" INICIO \n", X_train, " \n\n ", X_test, " \n\n ", " \n\n ", y_train, " \n\n ", y_test)
warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn")
model = MLPClassifier(activation='relu', alpha=0.01, batch_size='auto', early_stopping=False,
hidden_layer_sizes=(20,10), learning_rate='constant',
learning_rate_init=0.01, max_iter=2000, momentum=0.5,
shuffle=True, verbose=True)
model.fit(X_train, y_train) #Training the model
#Test the model
predictions = model.predict(X_test)
print("accuracy_score: ", accuracy_score(y_test, predictions))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, predictions))
pishingclass = pishing['Result']
pishingclass_encoded, pishingclass_categories = pishingclass.factorize()
print("Encoded: ", pishingclass_encoded)
print("Categorias: ", pishingclass_categories[:10])
#X_train_R, X_test_R, y_train_R, y_test_R = train_test_split(X, pishingclass_encoded, test_size=0.20)
#model = MLPRegressor(hidden_layer_sizes = (2,2), alpha=0.01, max_iter=1000)
#model.fit(X_train_R, y_train_R) #Training the model
#predictions_R = model.predict(X_test_R)
#print("Error cuadratico medio sobre predicciones: ", mean_squared_error(y_test_R, predictions_R))
v = -np.arange(1, 5)
print("V ", v)
print("alpha: ", (0.01)**v)
param_grid = [
{
'hidden_layer_sizes' : [(15,10), (8,5), (20,10), (5,4), (10, 20)],
'max_iter':[100, 500, 1000, 2000],
'momentum':[0.5, 0.9, 0.4, 0.3],
'alpha': [0.0001, 0.001, 0.01, 0.1],
}
]
model = MLPClassifier()
grid_search = GridSearchCV(model, param_grid, cv=5, scoring='accuracy', iid=False)
grid_search.fit(X_train, y_train)
print("Mejores parametros encontrados por GRID: ", grid_search.best_params_)
print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
"""
model2 = MLPRegressor()
grid_search2 = GridSearchCV(model2, param_grid, cv=5, scoring='neg_mean_squared_error', iid=False)
grid_search2.fit(X_train_R, y_train_R)
print("Mejores parametros REGRESSOR: ", grid_search2.best_params_) #Mejores parámetros encontrados para MLPRegressor
"""
ind = grid_search.best_estimator_ #
new_predictions = ind.predict(X_test) #Utilizamos los parámetros encontrados para volver
print('accuracy_score obtenido con los parámetros encontrados por GridSearchCV:')
#print(mean_squared_error(y_test_R, new_predictions_R)) Para regresion
print(accuracy_score(y_test, new_predictions)) # Para clasificacion
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, new_predictions))
X = np.arange(1, len(y_test)+1)
plt.figure()
plt.plot(X, y_test, 'k', label='Datos Originales')
plt.plot(X, predictions, 'r', label='Primera Aproximación')
plt.plot(X, new_predictions, 'g', label='Segunda Aproximación')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Original data Vs Predictions')
plt.legend()
plt.show() | [
"kmilo9713.cl@gmail.com"
] | kmilo9713.cl@gmail.com |
9727a625dd07ca1ab6c4c27544e2bfbe8aa35fe2 | fb5d1d93c7432912f7f5e1d9ff50309f49bf6b16 | /Dropbox/workspace/pythoncode/practice/1.py | ae137b71d3b614769e34f8f52b14f8eef8dc4b0a | [] | no_license | Eileencaraway/Joyjit-s-Python-Script | 0089635bdd8609d9ae28aa03f0029304d16542b0 | 48c29fa7f1a0060b7fc5b791ce635e1ecdeb2e98 | refs/heads/master | 2021-09-20T15:21:14.717982 | 2018-08-11T06:56:57 | 2018-08-11T06:56:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | from statistics import *
print(mean([1,2,3,4,5,6,7,8]))
| [
"turelong@gmail.com"
] | turelong@gmail.com |
9f653ab13307676c72916817ec6736cef0226239 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/carpet/aaxis/_tickvals.py | d662833232a93d94748922377e31518cbba4b730 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 422 | py | import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name='tickvals', parent_name='carpet.aaxis', **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='data',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
d5d45adcd926362988d77e0f0a0877138e175946 | e45fa46b739ea5d7ef08349ccca74ce4775fafd9 | /demo/migrations/0004_auto_20150609_0238.py | 56ee1e4390809bb363980073729a84946146d9a7 | [] | no_license | mek4nr/django-smooth-perms-demo | 261847a39facd04636b5d096879b7ba7f257a7ea | 786feb30e6182630a05cb43df4bb94e0e89b3e6d | refs/heads/master | 2016-09-03T02:59:01.175689 | 2015-08-25T15:59:26 | 2015-08-25T15:59:26 | 41,371,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('demo', '0003_auto_20150609_0233'),
]
operations = [
migrations.RemoveField(
model_name='continentpermission',
name='can_change_continent',
),
migrations.AddField(
model_name='continentpermission',
name='can_change_country',
field=models.BooleanField(verbose_name='can change country', default=False),
),
]
| [
"jbaptiste.munieres@gmail.com"
] | jbaptiste.munieres@gmail.com |
146a1a8baec68459f6c445a26befeea02dcb2323 | d35ec811a7e0c15579211b802d0b4ac415b40d24 | /ibon/cli.py | d7162d1a8dd7fbc66fb0ec419fdb261e6e4647bf | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | Superbil/ibonPrinter | dc251462b77555418114331b67a6432136a053ce | be6366bdeb555c21e7e80f00ce961ee761a6c257 | refs/heads/master | 2022-02-11T16:39:29.287510 | 2022-01-08T10:55:03 | 2022-01-08T10:55:03 | 248,232,050 | 0 | 0 | MIT | 2020-03-18T12:57:37 | 2020-03-18T12:57:36 | null | UTF-8 | Python | false | false | 989 | py | #!/usr/bin/env python3
import argparse
import sys
import os
from ibon.printer import Printer
def main():
parser = argparse.ArgumentParser(
prog='ibonprinter',
description='7-11 iBon printer uploader.'
)
parser.add_argument(
'--name',
type=str,
default=' ',
help='User name'
)
parser.add_argument(
'--email',
type=str,
default=' ',
help='User email'
)
parser.add_argument(
'file',
type=str,
help='Upload file'
)
args = parser.parse_args()
if not os.path.isfile(args.file):
return f'File is not existed, {args.file}'
# TODO: check file ext
# doc、docx、ppt、pptx、xls、xlsx、txt、ini、pdf、jpg、gif、bmp
# TODO: check file size < 10M
printer = Printer()
r = printer.upload(
args.file,
user=args.name,
email=args.email,
)
if __name__ == "__main__":
sys.exit(main())
| [
"superbil@gmail.com"
] | superbil@gmail.com |
1fba8d1b553eb9efce67df776f1ad5e955255018 | 41bfa89a0bcd0daedb172921e827c384080b8e14 | /python/pyramid_python/env3/bin/pdistreport | ba6c82324e6fcb69ad0a5d8ce17ff915567099fc | [] | no_license | jsdelivrbot/various_learning | e8ea964607da11213955eaae982cebdc44aace56 | aef86bd5269faaf95cf27c4c0d6bc67568e542b3 | refs/heads/master | 2020-04-10T11:53:58.846549 | 2018-12-08T17:57:34 | 2018-12-08T17:57:34 | 161,005,909 | 0 | 0 | null | 2018-12-09T05:00:07 | 2018-12-09T05:00:07 | null | UTF-8 | Python | false | false | 307 | #!/home/echessa/Documents/DEVELOPER/Mine/various_learning/python/pyramid_python/env3/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pyramid.scripts.pdistreport import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jokhessa@yahoo.com"
] | jokhessa@yahoo.com | |
69b934ab721ebda8f49472649ba8192953df0541 | fb6c927558891433fd52eea63d9da99e6f08e9d7 | /master_mind/policy/sampling.py | 596c29371b81086fcf48bec833db23351fdbb06f | [
"MIT"
] | permissive | nariaki3551/master_mind | cc738dff9ff2e3e7c19544aa0247ffe02adf32df | 0067956f3cca87f99f8aa647872533f2ae51f7af | refs/heads/master | 2022-09-24T11:13:00.337017 | 2022-07-01T11:53:12 | 2022-07-01T11:53:12 | 230,287,150 | 1 | 0 | MIT | 2022-07-01T11:47:06 | 2019-12-26T15:35:21 | Python | UTF-8 | Python | false | false | 956 | py | from random import sample
from time import time
from utils import calc_dist, blue_str
from .minmax import get_minmax_code
from params import max_sampling
def get_sampling_code(feasible_codes, guess_iter, config):
n_sample = min(len(feasible_codes), max_sampling)
if n_sample < len(feasible_codes):
info = f'[policy] check_codes {n_sample} <- {len(feasible_codes)}'
config.logger.info(blue_str(info))
else:
info = f'[policy] check_codes {n_sample} (full)'
config.logger.info(blue_str(info))
search_time = -time()
sub_feasible_codes = sample(feasible_codes, n_sample)
sub_minmax_code, _ = get_minmax_code(
sub_feasible_codes,
guess_iter,
config
)
search_time += time()
config.logger.debug(
blue_str(f'[policy] guess code {sub_minmax_code} search time {search_time:.2f}s')
)
return sub_minmax_code, calc_dist(sub_minmax_code, feasible_codes, config)
| [
"n-tateiwa@math.kyushu-u.ac.jp"
] | n-tateiwa@math.kyushu-u.ac.jp |
68b3a0452617dade9c20330c89ba278f28b2ae3c | b8f9b3be2e65a59bacaa4b3fe77a6b18ea1bad7f | /test/test_file_management.py | d6a3a04bae1c404da6105b888cc2b51a0abfbc0c | [] | no_license | jhajagos/SequenceVisualizer | 5a3c6e1e653b37224e5a30ce96a6e9b5bb328bb7 | 89141d6be859eb9044e76237885cd41f7a80844f | refs/heads/master | 2022-10-22T20:46:52.138638 | 2020-06-14T19:45:52 | 2020-06-14T19:45:52 | 271,257,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | import unittest
from functional.file_management import calculate_md5_sum
class TestFileManagement(unittest.TestCase):
def test_md5_sum(self):
calculated_md5_sum = calculate_md5_sum("./test_processed.hdf5")
self.assertEqual("8c3770cde105d8048ec9cffa46e36dd8", calculated_md5_sum)
if __name__ == '__main__':
unittest.main()
| [
"risk.limits@gmai.com"
] | risk.limits@gmai.com |
5311b314a07e1ce162dcdaf4d0c2ee28398b1995 | 2f626a69741ee4e1aa0accb2534196a6368fde35 | /Actions.py | 330757e8e7676bd30100b25267173cdefd29ef98 | [] | no_license | BogdanYanov/lightit-py-game | 5bd0cb071850a8cc90dea0445fd6f86b8b65c562 | 352fb2989978dc3feb282a03a3fa77f114b27ffd | refs/heads/master | 2020-08-01T15:04:59.862530 | 2019-09-26T11:39:54 | 2019-09-26T11:39:54 | 211,029,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | from IActions import IActions
import random
class Actions(IActions):
"""A class that implements actions that a participant in a game can perform"""
def deals_damage(self, min_range, max_range):
damage = random.randint(min_range, max_range)
# The damage dealt to the enemy is randomly selected in the range
# from min_range to max_range
return damage
def use_first_aid_kit(self, min_low_range, max_low_range):
additional_health = random.randint(min_low_range, max_low_range)
# The health that a participant in the game can recover is also randomly selected
# in the range from min_range to max_range
return additional_health
| [
"b.yanov76@gmail.com"
] | b.yanov76@gmail.com |
654f906f59ef8bb22afe907524e10160829658d8 | 1f006f0c7871fcde10986c4f5cec916f545afc9f | /apps/ice/plugins/required/plugin_info.py | efab21549b3cbef46e32e6e38adbc4c8701eb49f | [] | no_license | ptsefton/integrated-content-environment | 248b8cd29b29e8989ec1a154dd373814742a38c1 | c1d6b5a1bea3df4dde10cb582fb0da361dd747bc | refs/heads/master | 2021-01-10T04:46:09.319989 | 2011-05-05T01:42:52 | 2011-05-05T01:42:52 | 36,273,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | #
# Copyright (C) 2007 Distance and e-Learning Centre,
# University of Southern Queensland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import pysvn
import sys
pluginName = "ice.info"
pluginDesc = ""
pluginFunc = None # either (or both) pluginFunc or pluginClass should
pluginClass = None # be set by the pluginInit() method
pluginInitialized = False # set to True by pluginInit() method
def pluginInit(iceContext, **kwargs):
global pluginFunc, pluginClass, pluginInitialized
pluginFunc = None
pluginClass = VersionInfo
pluginInitialized = True
return pluginFunc
class VersionInfo:
def __init__(self, iceContext=None, *args):
self.iceContext = iceContext
def svn(self):
return str(pysvn.svn_version)
def pysvn(self):
return str(pysvn.version)
def python(self):
return str(os.sys.version)
def iceTrunkRevision(self):
svn = pysvn.Client()
return str(svn.info('../../../trunk').revision)
def __getArgs(self):
return self.__args
def summary(self):
info = VersionInfo()
summary = "Built from ICE trunk " + info.iceTrunkRevision() + "\n"
summary = summary + "SVN version " + info.svn() + "\n"
summary = summary + "pysvn version " + info.pysvn() + "\n"
summary = summary + "Python: " + info.python()
return summary
def getSummary(self):
argv = sys.argv
info = VersionInfo()
try:
result = "ICE version: " + argv[1] + "\n"
result = result + info.summary()
return str(result)
except:
try:
f = open('version_info.txt', 'r')
info = f.read()
f.close()
return info
except IOError:
summary = "ICE version: unversioned \n"
summary = summary + "SVN version " + info.svn() + "\n"
summary = summary + "pysvn version " + info.pysvn() + "\n"
summary = summary + "Python: " + info.python()
return summary
def main(argv=None):
if argv is None:
argv = sys.argv
info = VersionInfo()
print "%s" % info.getSummary()
if __name__ == "__main__":
sys.exit(main())
| [
"raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05"
] | raward@gmail.com@110e3293-9ef9-cb8f-f479-66bdb1942d05 |
38440c52596ae0ccaaf4bb0b957964b863e6e96c | 1a2ada5afcb526e6b9148bc1249107c6364ef7a3 | /feature_enhancement/hed/hed.py | 6dafadcb146fa65f2a20b75473188f0b180b460f | [] | no_license | fcUalberta/tless_edge_based | e7197aeb8535abf738e070dbe9f61395b7a27958 | 5d36f899daf399ad56a747b37320f7efdec819b4 | refs/heads/master | 2022-11-23T12:04:31.456528 | 2020-10-13T01:09:10 | 2020-10-13T01:09:10 | 229,332,996 | 2 | 0 | null | 2022-11-22T04:38:08 | 2019-12-20T20:39:32 | Python | UTF-8 | Python | false | false | 3,856 | py | """
Reference: https://www.pyimagesearch.com/2019/03/04/holistically-nested-edge-detection-with-opencv-and-deep-learning/
"""
# USAGE
# python detect_edges_image.py --edge-detector hed_model --image images/guitar.jpg
# import the necessary packages
#import argparse
import cv2
import matplotlib.pyplot as plt
import PIL.Image as pil
import numpy as np
#import os
#import glob
## construct the argument parser and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-d", "--edge-detector", type=str, required=True,
# help="path to OpenCV's deep learning edge detector")
#ap.add_argument("-i", "--Pics", required=True,
# help="path to input image")
#args = vars(ap.parse_args())
class CropLayer(object):
def __init__(self, params, blobs):
# initialize our starting and ending (x, y)-coordinates of
# the crop
self.startX = 0
self.startY = 0
self.endX = 0
self.endY = 0
def getMemoryShapes(self, inputs):
# the crop layer will receive two inputs -- we need to crop
# the first input blob to match the shape of the second one,
# keeping the batch size and number of channels
(inputShape, targetShape) = (inputs[0], inputs[1])
(batchSize, numChannels) = (inputShape[0], inputShape[1])
(H, W) = (targetShape[2], targetShape[3])
# compute the starting and ending crop coordinates
self.startX = int((inputShape[3] - targetShape[3]) / 2)
self.startY = int((inputShape[2] - targetShape[2]) / 2)
self.endX = self.startX + W
self.endY = self.startY + H
# return the shape of the volume (we'll perform the actual
# crop during the forward pass
return [[batchSize, numChannels, H, W]]
def forward(self, inputs):
# use the derived (x, y)-coordinates to perform the crop
return [inputs[0][:, :, self.startY:self.endY,
self.startX:self.endX]]
def load_hed_model():
# load our serialized edge detector from disk
print("[INFO] loading edge detector...")
protoPath = r"C:\PythonCodes\MM803\code\feature_enhancement\hed\hed_model\deploy.prototxt"
modelPath = r"C:\PythonCodes\MM803\code\feature_enhancement\hed\hed_model\hed_pretrained_bsds.caffemodel"
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# register our new layer with the model
cv2.dnn_registerLayer("Crop", CropLayer)
#print(net.shape)
print(net)
return net
def holistically_nested(net,image,alpha = 1, beta=0.6):
# image = cv2.UMat.get(image)
# load the all input images and grab their dimensions
#image = cv2.imread(imagePath,cv2.COLOR_BGR2GRAY)
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(W, H),
mean=(104.00698793, 116.66876762, 122.67891434),swapRB=False, crop=False)
# print("[INFO] performing holistically-nested edge detection...")
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], (W, H))
hed = (255 * hed).astype("uint8")
# cv2.imshow("HED", hed)
# cv2.imshow("Input", gray_img)
# cv2.waitKey(0)
# Create Overlay
# hed_bgr = cv2.cvtColor(hed,cv2.COLOR_GRAY2BGR)
# hed_overlay = cv2.addWeighted(image,alpha, hed, beta,0)
# Create Overlay
#
# hed_overlay = cv2.addWeighted(image,alpha, 255-hed_bgr, beta,0)
# plt.imshow(hed_overlay)
# plt.title("HED overlay")
# plt.show()
#
# image1 = pil.fromarray(image)
# hed1 = pil.fromarray(hed)
# pil_overlay=pil.blend(image1.convert('RGBA'),hed1.convert('RGBA'),0.5)
# plt.imshow(pil_overlay)
# plt.title("test")
# plt.show()
#
# return hed, hed_overlay,np.float32(pil_overlay)
return hed
'''
#I cant save the images using this code. It will save the same image 16 times
num = 0
for imagePath in glob.glob(args["Pics"] + "/*.png"):
cv2.imwrite(str(num)+".png",hed)
num = num+1
''' | [
"noreply@github.com"
] | fcUalberta.noreply@github.com |
2ae65acc073be45cc40bf1d2acf61ccdbda24d22 | 7d6379a620a06e3e09c18b3be568c69d155696c8 | /Algorithms/anamoly_detection.py | 8e93956073a27232ac730dd8438b90bf14d6bd7e | [] | no_license | abhishekbamotra/machinelearning24787 | 50d599a8129bf419f5d152a928f1986e17d23036 | ea7c047c869a325e9b35a015f6cbd74f22e3b80f | refs/heads/master | 2020-10-02T07:32:44.514382 | 2019-12-13T04:08:33 | 2019-12-13T04:08:33 | 227,731,559 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,754 | py | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os
from utils import *
from distance_functions import*
np.random.seed(0)
# READING DATA FROM THE DATASET TO PANDAS DATA FRAME
x=pd.read_excel('DSL-StrongPasswordData.xls')
# REMOVING TWO UNWANTED COLUMN FROM THE DATA SET
del x['sessionIndex']
del x['rep']
# EXTRACTING LIST OF USER NAMES FROM THE DATASET (IN THIS CASE 51)
x1=x.groupby('subject')
user_names=x1.subject.unique()
# CREATING A DICTIONARY CALLED 'all_user_data' WHERE DATA IS STORED AS FOLLOWS
# {KEY : VALUE}
# {USER_i : PANDAS DATAFRAME FOR USER i}
# EXAMPLE: {"user1" : data frame for user 1}
user_all_data = {}
# CREATING A DICTIONARY CALLED 'usernames' WHERE ORIGINAL USER NAMES ARE MAPPED TO NEW USER NAMES
# {NEW USER NAME : ORIGINAL USER NAME}
# EXAMPLE: {"user1" : "s002"}
usernames = {}
for i, user in enumerate(user_names):
# CREATING NAME OF USER
name = "user"+str(i+1)
# STORING USER NAME AND DATA FRAMES TO DICTIONARY
user_all_data[name] = x1.get_group(user[0])
usernames[name] = user[0]
# CREATE DATA STRUCTURE FOR TRAIN TEST AND ANAMOLY DATASET
train_data, test_data, anamoly_data = create_data_stucture(user_all_data, usernames)
# LOAD TEST TRAIN DATA
metric = 'Z_SCORE'
equal_error_rate_list = []
zero_false_alarm_list = []
for i,user in enumerate(usernames):
# user = 'user4'
train = train_data[user]
test = test_data[user]
anamoly = pd.DataFrame()
# CREATE A LOOP TO CREATE THE ANAMOLY DATASET
anamoly = create_anamoly_data_set_for_a_user(anamoly_data, user)
# CREATE MEAN VECTOR
mean = train.mean(axis = 0)
# CREATE MEAN VECTOR
std = train.std(axis = 0)
# CALCULATE COVARIENCE MATRIX FROM TRAINING DATA
cov_mat = train.T@train
# MEAN ABSOLUTE DEVIATION OF EACH FEATURE
abs_deviation = abs(train-mean)
mean_abs_dev = np.sum(abs_deviation, axis=0)/train.shape[0]
# CALCULATE z_score USER SCORE
user_score, user_max, user_min = calculate_zscore_dist(test, mean, std)
print(user+" test score - " + str(user_score.shape[0]) + " elemnts" )
# CALCULATE z_score IMPOSTER SCORE
anamoly_score, anamoly_max, anamoly_min = calculate_zscore_dist(anamoly, mean, std)
print(user+" impo score - " + str(anamoly_score.shape[0]) + " elemnts" )
# -------------------------------------------------------------------------------
# # CALCULATE MANHATTAN SCALED USER SCORE
# user_score, user_max, user_min = calculate_manhattan_scaled_dist(test, mean, mean_abs_dev)
# print(user+" test score - " + str(user_score.shape[0]) + " elemnts" )
# # CALCULATE MANHATTAN SCALED IMPOSTER SCORE
# anamoly_score, anamoly_max, anamoly_min = calculate_manhattan_scaled_dist(anamoly, mean, mean_abs_dev)
# print(user+" impo score - " + str(anamoly_score.shape[0]) + " elemnts" )
# # CALCULATE MANHATTAN USER SCORE
# user_score, user_max, user_min = calculate_manhattan_dist(test, mean)
# print(user+" test score - " + str(user_score.shape[0]) + " elemnts" )
# # CALCULATE MANHATTAN IMPOSTER SCORE
# anamoly_score, anamoly_max, anamoly_min = calculate_manhattan_dist(anamoly, mean)
# print(user+" impo score - " + str(anamoly_score.shape[0]) + " elemnts" )
# # CALCULATE MAHALANOBIS NORMED USER SCORE
# user_score, user_max, user_min = calculate_mahalanobis_normed_dist(test, mean, cov_mat)
# print(user+" test score - " + str(user_score.shape[0]) + " elemnts" )
# # CALCULATE MAHALANOBIS IMPOSTER SCORE
# anamoly_score, anamoly_max, anamoly_min = calculate_mahalanobis_normed_dist(anamoly, mean, cov_mat)
# print(user+" impo score - " + str(anamoly_score.shape[0]) + " elemnts" )
# # CALCULATE MAHALANOBIS USER SCORE
# user_score, user_max, user_min = calculate_mahalanobis_dist(test, mean, cov_mat)
# print(user+" test score - " + str(user_score.shape[0]) + " elemnts" )
# # CALCULATE MAHALANOBIS IMPOSTER SCORE
# anamoly_score, anamoly_max, anamoly_min = calculate_mahalanobis_dist(anamoly, mean, cov_mat)
# print(user+" impo score - " + str(anamoly_score.shape[0]) + " elemnts" )
# # CALCULATE EUCLEDIAN USER SCORE
# user_score, user_max, user_min = (calculate_eucledian_dist(test, mean))
# print(user+" test score - " + str(user_score.shape[0]) + " elemnts" )
# # CALCULATE IMPOSTER SCORE
# anamoly_score, anamoly_max, anamoly_min = calculate_eucledian_dist(anamoly, mean)
# print(user+" impo score - " + str(anamoly_score.shape[0]) + " elemnts" )
# ----------------------------------------------------------------------------------
# CREATE SEARCHSPACE FOR OPTIMAL THRESHOLD
hit_rate_list, miss_rate_list, flase_alarm_rate_list, error, equal_error_index, zero_miss_rate_index = search_optimal_threshold(user_score, anamoly_score)
# STORING THE EQUAL ERROR RATE FOR EACH USER
equal_error_rate_list.append(miss_rate_list[equal_error_index])
zero_false_alarm_list.append(flase_alarm_rate_list[zero_miss_rate_index])
# PLOT THE ROC CURVE
plot_ROC(i, user, hit_rate_list, flase_alarm_rate_list, equal_error_index, zero_miss_rate_index, metric)
equal_error_rate_list = np.stack(equal_error_rate_list)
zero_false_alarm_list = np.stack(zero_false_alarm_list)
error_rate_avg = np.mean(equal_error_rate_list)
error_rate_std = np.std(equal_error_rate_list)
zero_miss_rate_avg = np.mean(zero_false_alarm_list)
zero_miss_rate_std = np.std(zero_false_alarm_list)
print("Equal error rate average = "+ str(error_rate_avg))
print("Equal error rate STD = "+ str(error_rate_std))
print("Zero miss false alarm rate average = "+ str(zero_miss_rate_avg))
print("Zero miss false alarm rate STD = "+ str(zero_miss_rate_std)) | [
"noreply@github.com"
] | abhishekbamotra.noreply@github.com |
2aeafe5b3376ac821c1a91dc45efb6c85ac84c32 | 81bb77804b2481a92c0d48ad2f76e5b79d29e9ec | /qa/rpc-tests/keypool.py | 561520db1dda65c0f711f0ef0ef68dcc286cafa4 | [
"MIT"
] | permissive | AndrewJEON/qtum | a5216a67c25e818b11266366f37d0b7bcf5a573f | 5373115c4550a9dbd99f360dd50cc4f67722dc91 | refs/heads/master | 2021-06-11T16:50:00.126511 | 2017-03-14T17:12:40 | 2017-03-14T17:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
from test_framework.test_framework import QuantumTestFramework
from test_framework.util import *
class KeyPoolTest(QuantumTestFramework):
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
quantumd_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].validateaddress(addr)
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException as e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException as e:
assert(e.error['code']==-12)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
try:
nodes[0].generate(1)
raise AssertionError('Keypool should be exhausted after three addesses')
except JSONRPCException as e:
assert(e.error['code']==-12)
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_network(self):
self.nodes = self.setup_nodes()
if __name__ == '__main__':
KeyPoolTest().main()
| [
"development@solarius.fi"
] | development@solarius.fi |
23c8648c2c83d3f3cd53ed503bee313509ab3992 | c03ece9d8530ec65ad2a4af51fc84991d4c5c64e | /Project_Folder/boss.py | 81eb6d394fc2eb1a1a18208882daa018730bde91 | [] | no_license | ruen346/2D-Final-Project | 6aa4d9dc3d4cc8424ac52c616cac872c5b7c96c7 | a81cfef7a9b5efc97f4c8a5ea2928e1a30b45062 | refs/heads/master | 2020-04-04T10:00:39.253933 | 2018-12-03T16:34:01 | 2018-12-03T16:34:01 | 155,839,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,910 | py | import game_framework
from pico2d import *
import game_world
import main_state
PIXEL_PER_METER = (10.0/0.3)
RUN_SPEED_KMPH = 15.0
RUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)
RUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)
RUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)
TIME_PER_ACTION = 0.5
ACTION_PER_TIME = 1.0 / TIME_PER_ACTION
FRAMES_PER_ACTION = 8
class IdleState:
@staticmethod
def enter(boss, event):
boss.timer = get_time()
@staticmethod
def exit(boss, event):
pass
@staticmethod
def do(boss):
if boss.move == 1 and boss.x >= 128 * 6 - 64:
boss.move = 2
elif boss.move == 2 and boss.y <= 720 - (128 * 10 - 64):
boss.move = 3
elif boss.move == 3 and boss.x >= 128 * 18 - 64:
boss.move = 4
elif boss.move == 4 and boss.y >= 720 - (128 * 7 - 64):
boss.move = 5
elif boss.move == 5 and boss.x <= 128 * 12 - 64:
boss.move = 6
if boss.move == 1:
boss.x += RUN_SPEED_PPS * game_framework.frame_time
elif boss.move == 2:
boss.y -= RUN_SPEED_PPS * game_framework.frame_time
elif boss.move == 3:
boss.x += RUN_SPEED_PPS * game_framework.frame_time
elif boss.move == 4:
boss.y += RUN_SPEED_PPS * game_framework.frame_time
elif boss.move == 5:
boss.x -= RUN_SPEED_PPS * game_framework.frame_time
elif boss.move == 6:
boss.y += RUN_SPEED_PPS * game_framework.frame_time
for game_object in game_world.all_objects():
if str(game_object).find("shot_arrow") != -1: # shot_arrow와 충돌시
if game_object.x > boss.x - 64 and game_object.x < boss.x + 64 and game_object.y < boss.y + 64 and game_object.y > boss.y - 64:
game_world.remove_object(game_object)
boss.hp -= main_state.tower1_d + game_object.damage
break
elif str(game_object).find("elf_arrow") != -1: # elf_arrow와 충돌시
if game_object.x > boss.x - 64 and game_object.x < boss.x + 64 and game_object.y < boss.y + 64 and game_object.y > boss.y - 64:
game_world.remove_object(game_object)
boss.hp -= main_state.elf_d
break
elif str(game_object).find("magic") != -1 and str(game_object).find("tower") == -1: # magic와 충돌시
if math.sqrt((game_object.x - boss.x)**2 + (game_object.y - boss.y)**2) < 250 and get_time() >= boss.time + 0.1:
boss.hp -= main_state.tower2_d + game_object.damage
break
elif str(game_object).find("boom") != -1: # boom와 충돌시
if game_object.x > boss.x - 64 and game_object.x < boss.x + 64 and game_object.y < boss.y + 64 and game_object.y > boss.y - 64:
from fire import Fire
fire = Fire(boss.x, boss.y, game_object.damage)
game_world.add_object(fire, 2)
game_world.remove_object(game_object)
break
elif str(game_object).find("fire") != -1: # fire와 충돌시
if math.sqrt((game_object.x - boss.x)**2 + (game_object.y - boss.y)**2) < 100 and get_time() >= boss.time + 0.1:
game_world.remove_object(game_object)
boss.hp -= main_state.tower3_d + game_object.damage
break
if get_time() >= boss.time + 0.1: #다단히트 스킬땜시
boss.time = get_time()
if boss.hp <= 0: #피가 0되서 죽음
boss.sound.play()
game_world.remove_object(boss)
main_state.ui.money += 100
if boss.y > 720 + 64: #경로에 나가서 사라짐
game_world.remove_object(boss)
main_state.ui.life -= 5
@staticmethod
def draw(boss):
boss.image.draw(boss.x + main_state.elf_move_window_x, boss.y + main_state.elf_move_window_y)
boss.hp_bar.draw(boss.x + main_state.elf_move_window_x, boss.y + main_state.elf_move_window_y + 70)
boss.hp_red.clip_draw(2, 2, int(60 * boss.hp / 3500), 12, boss.x + main_state.elf_move_window_x, boss.y + main_state.elf_move_window_y + 70)
class Boss:
def __init__(self):
self.x, self.y = 0, 720-320
self.image = load_image('image\\boss.png')
self.hp_bar = load_image('image\\hp_bar.png')
self.hp_red = load_image('image\\hp_red.png')
self.move = 1
self.hp = 3500
self.event_que = []
self.cur_state = IdleState
self.cur_state.enter(self, None)
self.time = get_time()
self.sound = load_wav('sound\\dead.wav')
self.sound.set_volume(46)
def update(self):
self.cur_state.do(self)
def draw(self):
self.cur_state.draw(self) | [
"ruen346@gmail.com"
] | ruen346@gmail.com |
20a5389145ea522daccca65f7fb7d8b787f1b09e | 978248bf0f275ae688f194593aa32c267832b2b6 | /xlsxwriter/test/comparison/test_set_start_page01.py | 11f627dcef39de7dc1ca840d9031d251ff300970 | [
"BSD-2-Clause-Views"
] | permissive | satish1337/XlsxWriter | b0c216b91be1b74d6cac017a152023aa1d581de2 | 0ab9bdded4f750246c41a439f6a6cecaf9179030 | refs/heads/master | 2021-01-22T02:35:13.158752 | 2015-03-31T20:32:28 | 2015-03-31T20:32:28 | 33,300,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'set_start_page01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with printer settings."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_start_page(1)
worksheet.set_paper(9)
worksheet.vertical_dpi = 200
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
6d5ab6b38fbc7423f0f7c32c66fb29cfcc65b652 | a93909cc5935ba9c9d5cf25d12d7c772d8ca2aa0 | /logistic_regression_class.py | 77365ce781f703be6d28974bb8c99a289f834f0b | [] | no_license | weoqpur/torch_study | 6d7b78f4447b3010014aa144e670b945e658fea0 | 1708a8faa1f67142386dc6711fb183283f0555bc | refs/heads/main | 2023-05-30T19:48:25.358464 | 2021-06-23T12:14:59 | 2021-06-23T12:14:59 | 361,636,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]]
x_train = torch.FloatTensor(x_data)
y_train = torch.FloatTensor(y_data)
class BinaryClassifier(nn.Module):
def __init__(self):
super(BinaryClassifier, self).__init__()
self.linear = nn.Linear(2, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.sigmoid(self.linear(x))
model = BinaryClassifier()
# optimizer 설정
optimizer = optim.SGD(model.parameters(), lr=1)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# H(x) 계산
hypothesis = model(x_train)
# cost 계산
cost = F.binary_cross_entropy(hypothesis, y_train)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 20번마다 로그 출력
if epoch % 10 == 0:
prediction = hypothesis >= torch.FloatTensor([0.5]) # 예측값이 0.5를 넘으면 True로 간주
correct_prediction = prediction.float() == y_train # 실제값과 일치하는 경우만 True로 간주
accuracy = correct_prediction.sum().item() / len(correct_prediction) # 정확도를 계산
print('Epoch {:4d}/{} Cost: {:.6f} Accuracy {:2.2f}%'.format( # 각 에포크마다 정확도를 출력
epoch, nb_epochs, cost.item(), accuracy * 100,
)) | [
"tlsanruf123@gmail.com"
] | tlsanruf123@gmail.com |
ab1b83f541859e0497ec43adf826cb8f44c0793e | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/sw/SwFcMon.py | 2bf6bd3af9a6b1ae66a826281d4f11a20c9017d0 | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,479 | py | """This module contains the general information for SwFcMon ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwFcMonConsts():
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
FSM_PREV_DEPLOY_BEGIN = "DeployBegin"
FSM_PREV_DEPLOY_FAIL = "DeployFail"
FSM_PREV_DEPLOY_SUCCESS = "DeploySuccess"
FSM_PREV_DEPLOY_UPDATE_FC_MON = "DeployUpdateFcMon"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DEPLOY_BEGIN = "DeployBegin"
FSM_STATUS_DEPLOY_FAIL = "DeployFail"
FSM_STATUS_DEPLOY_SUCCESS = "DeploySuccess"
FSM_STATUS_DEPLOY_UPDATE_FC_MON = "DeployUpdateFcMon"
FSM_STATUS_NOP = "nop"
HAS_LAST_DEST_FALSE = "false"
HAS_LAST_DEST_NO = "no"
HAS_LAST_DEST_TRUE = "true"
HAS_LAST_DEST_YES = "yes"
LIFE_CYCLE_DELETED = "deleted"
LIFE_CYCLE_NEW = "new"
LIFE_CYCLE_NORMAL = "normal"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
class SwFcMon(ManagedObject):
"""This is SwFcMon class."""
consts = SwFcMonConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("SwFcMon", "swFcMon", "mon-[name]", VersionMeta.Version141i, "InputOutput", 0xff, [], ["read-only"], [u'swFcSanMon'], [u'dcxVc', u'eventInst', u'faultInst', u'swEthMonDestEp', u'swFcMonDestEp', u'swFcMonFsm', u'swFcMonFsmTask', u'swFcMonSrcEp', u'swFcSanPc', u'swFcoeSanPc', u'swSubGroup', u'swVsan'], ["Get"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disabled", "enabled"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x4, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateFcMon", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, ["DeployBegin", "DeployFail", "DeploySuccess", "DeployUpdateFcMon", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"has_last_dest": MoPropertyMeta("has_last_dest", "hasLastDest", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"life_cycle": MoPropertyMeta("life_cycle", "lifeCycle", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, ["deleted", "new", "normal"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version141i, MoPropertyMeta.NAMING, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"session": MoPropertyMeta("session", "session", "uint", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["1-255"]),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["A", "B", "NONE"], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"hasLastDest": "has_last_dest",
"lifeCycle": "life_cycle",
"name": "name",
"peerDn": "peer_dn",
"rn": "rn",
"sacl": "sacl",
"session": "session",
"status": "status",
"switchId": "switch_id",
"transport": "transport",
"type": "type",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.has_last_dest = None
self.life_cycle = None
self.peer_dn = None
self.sacl = None
self.session = None
self.status = None
self.switch_id = None
self.transport = None
self.type = None
ManagedObject.__init__(self, "SwFcMon", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
034ebb7456f82467d4f6eac71983f9f9b364a2db | 306045a1cd0fb362f46d4db88311f442311bbc16 | /examples/idioms/programs/002.0011-print-hello-10-times.py | 3793da0117c8784ebd602f2ed2ba9a02168556cd | [
"MIT"
] | permissive | laowantong/paroxython | 608c9010a2b57c8f7ed5ea309e24035c2b2e44a3 | a6d45829dd34f046d20e5bae780fbf7af59429cb | refs/heads/master | 2023-09-01T05:18:29.687916 | 2022-11-07T17:40:31 | 2022-11-07T17:40:31 | 220,820,424 | 36 | 5 | MIT | 2023-09-08T04:44:58 | 2019-11-10T16:54:56 | Python | UTF-8 | Python | false | false | 307 | py | """Print Hello 10 times.
Loop to execute some code a constant number of times
Source: programming-idioms.org
"""
# Implementation author:
# Created on 2015-11-30T12:37:23.746597Z
# Last modified on 2019-09-27T02:17:54.987284Z
# Version 2
# Indention is mandatory
for i in range(10):
print("Hello")
| [
"laowantong@users.noreply.github.com"
] | laowantong@users.noreply.github.com |
e3bd3b68698ff7bf980750e374ea63e79a56f8fb | 981a5d228f47ff8816835df64dd157dfd9a4f2f8 | /redison/__init__.py | 9c4ba1c2c5ca3bd0258977dbb37593ae108cdd55 | [
"MIT"
] | permissive | JeanMaximilienCadic/redison | 956cea36e427e3983f1450550fdb44bb7e62d493 | 40ddf8dd6722bfd64903ecd14100d223691e8d49 | refs/heads/master | 2023-04-29T02:41:18.990034 | 2019-12-01T16:31:13 | 2019-12-01T16:31:13 | 200,581,772 | 4 | 0 | MIT | 2023-04-25T03:37:09 | 2019-08-05T04:23:55 | Python | UTF-8 | Python | false | false | 57 | py | from .redis_object import RedisObject
__version__="1.0a5" | [
"j.cadic@9dw-lab.jp"
] | j.cadic@9dw-lab.jp |
8dfe4354130dd664527f1ddd3ce0a81ac5a51536 | 3c9103046db53185cfedc1598933a790718e4d57 | /pygame_assets/tests/test_loaders.py | e8d9dde92a8a2bda26361859538fcfebf686ce40 | [
"MIT"
] | permissive | florimondmanca/pygame-assets | 9aabe7e482e72c37a95f9283f6b67e47acadf941 | 1ad7870800866d2b1b287d8063bd10edd99fd521 | refs/heads/master | 2021-08-19T12:46:04.149161 | 2017-11-25T12:14:06 | 2017-11-25T12:14:06 | 110,216,972 | 3 | 0 | null | 2017-11-11T12:28:38 | 2017-11-10T07:31:56 | Python | UTF-8 | Python | false | false | 5,844 | py | """Tests for the loaders API."""
import unittest
import pygame
from pygame_assets.loaders import image as load_image
from pygame_assets.loaders import image_with_rect as load_image_with_rect
from pygame_assets.loaders import sound as load_sound
from pygame_assets.loaders import music as load_music
from pygame_assets.loaders import font as load_font
from pygame_assets.loaders import freetype as load_freetype
from pygame_assets.configure import get_config
from .utils import TestCase, change_config
class LoaderTestCase(TestCase):
"""Test case suited for loader unit testing.
Class attributes
----------------
loader : function
A loader as defined by pygame_assets.
filename : str
If defined, the .asset() shortcut will be available to get the
corresponding asset.
"""
filename = None
loader = None
@classmethod
def asset(cls, *args, **kwargs):
if cls.filename is None:
raise ValueError('Could not get asset: no filename defined.')
return cls.loader(cls.filename, *args, **kwargs)
class TestImageLoader(LoaderTestCase):
"""Unit tests for the image loader."""
loader = load_image
filename = 'test-image.png'
@classmethod
def setUpClass(cls):
pygame.init()
# pygame requires to set_mode before loading images
# the same constraint applies to pygame_assets
cls.screen = pygame.display.set_mode((800, 600))
def test_load_image_from_path(self):
self.assertIsInstance(self.asset(), pygame.Surface)
def test_image_with_alpha_keeps_alpha(self):
image = load_image('test-image-with-alpha.png')
self.assertIsNotNone(image.get_alpha())
def test_image_without_alpha_has_no_alpha(self):
image = load_image('test-image-without-alpha.jpg')
self.assertIsNone(image.get_alpha())
def test_force_convert_alpha(self):
self.asset(convert_alpha=True)
self.asset(convert_alpha=False)
def test_alpha_is_kwarg_only(self):
with self.assertRaises(TypeError):
self.asset(True)
class TestImageWithRectLoader(LoaderTestCase):
"""Unit tests for the image_with_rect loader."""
loader = load_image_with_rect
filename = 'test-image.png'
@classmethod
def setUpClass(cls):
pygame.init()
# pygame requires to set_mode before loading images
# the same constraint applies to pygame_assets
cls.screen = pygame.display.set_mode((800, 600))
def test_load_image_with_rect(self):
image, rect = self.asset()
self.assertIsInstance(image, pygame.Surface)
self.assertIsInstance(rect, pygame.Rect)
class TestSoundLoader(LoaderTestCase):
"""Unit tests for the sound loader."""
loader = load_sound
filename = 'test-sound.wav'
@classmethod
def setUpClass(cls):
pygame.mixer.init()
def test_load_sound_from_path(self):
self.assertIsInstance(self.asset(), pygame.mixer.Sound)
def test_set_volume_when_loading(self):
sound = self.asset(volume=0.5)
self.assertEqual(sound.get_volume(), 0.5)
def test_volume_is_kwarg_only(self):
with self.assertRaises(TypeError):
self.asset(0.5)
class TestMusicLoader(LoaderTestCase):
"""Unit tests for the music loader."""
loader = load_music
filename = 'test-sound.wav'
@classmethod
def setUpClass(cls):
pygame.mixer.init()
def test_dir_is_sound(self):
self.assertListEqual(get_config().dirs['music'], ['sound'])
def test_load_music_from_path(self):
self.assertFalse(pygame.mixer.music.get_busy())
returned_value = self.asset()
self.assertIsNone(returned_value)
# music did not start playing
self.assertFalse(pygame.mixer.music.get_busy())
def test_set_volume_when_loading(self):
self.asset(volume=0.5)
self.assertEqual(pygame.mixer.music.get_volume(), 0.5)
def test_volume_is_kwarg_only(self):
with self.assertRaises(TypeError):
self.asset(0.5)
class TestFontLoader(LoaderTestCase):
"""Unit tests for the font loader."""
filename = 'bebas-neue.otf'
loader = load_font
@classmethod
def setUpClass(cls):
pygame.font.init()
def test_load_font_from_path(self):
self.assertIsInstance(self.asset(), pygame.font.Font)
def test_load_with_size(self):
self.assertAlmostEqual(self.asset(size=40).get_height(), 40, delta=10)
def test_default_size_is_20(self):
self.assertEqual(get_config().default_font_size, 20)
self.assertAlmostEqual(self.asset().get_height(), 20, delta=10)
def test_default_change_default_size(self):
with change_config('default_font_size') as config:
config.default_font_size = 60
self.assertAlmostEqual(self.asset().get_height(), 60, delta=15)
class TestFreetypeFontLoader(LoaderTestCase):
"""Unit tests for the freetype font loader."""
filename = 'bebas-neue.otf'
loader = load_freetype
@classmethod
def setUpClass(cls):
pygame.font.init()
def test_dir_is_font(self):
self.assertListEqual(get_config().dirs['freetype'], ['font'])
def test_load_font_from_path(self):
self.assertIsInstance(self.asset(), pygame.freetype.Font)
def test_load_with_size(self):
self.assertEqual(self.asset(size=40).size, 40)
def test_default_size_is_20(self):
self.assertEqual(get_config().default_font_size, 20)
self.assertEqual(self.asset().size, 20)
def test_change_default_size(self):
with change_config('default_font_size') as config:
config.default_font_size = 60
self.assertEqual(self.asset().size, 60)
if __name__ == '__main__':
unittest.main()
| [
"florimond.manca@gmail.com"
] | florimond.manca@gmail.com |
984b6705b695b164abcf50dd6f4dead5248de09d | 8bcf0ef8dfacf5b8cca65b80779b2fbf1a4f7f84 | /coffeeapi/level3/tests/test_api_delete.py | 99d1932e1c5cc0b42d52de5ff231babb4dc02738 | [] | no_license | luiz158/design-api | a953ad1177def119bb690fd89254fbc22a03bbe4 | b8c2a4beaf5d5058cbd988384bd7737be84a3bc6 | refs/heads/master | 2023-06-16T07:46:13.449127 | 2021-07-17T20:29:37 | 2021-07-17T20:29:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from http import HTTPStatus
import pytest
from coffeeapi.level3.domain import Status
def test_delete_success(apiclient, onecoffee):
url = '/v3/order/1'
response = apiclient.delete(url)
assert response.status_code == HTTPStatus.NO_CONTENT
assert len(onecoffee.orders) == 1
assert onecoffee.read(1).is_cancelled()
def test_delete_not_found(apiclient, onecoffee):
url = '/v3/order/404'
response = apiclient.delete(url)
assert response.status_code == HTTPStatus.NOT_FOUND
def test_delete_conflict(apiclient, onecoffee):
url = '/v3/order/1'
onecoffee.read(1).status = Status.Paid
response = apiclient.delete(url)
assert response.status_code == HTTPStatus.CONFLICT | [
"virb30@gmail.com"
] | virb30@gmail.com |
18c381de7282cb9e143b3c630f47752bc1dca908 | 894b8a99a3e05dda63ff156d9a2f3ce81f25c3ba | /imix/data/reader/textvqa_reader.py | 984acecbce85bfcc240b0181dd9e58d455efa3cc | [
"Apache-2.0"
] | permissive | jjInsper/iMIX | e5e46c580e2925fb94a2571c25777ce504ffab14 | 99898de97ef8b45462ca1d6bf2542e423a73d769 | refs/heads/master | 2023-08-08T01:24:47.161948 | 2021-09-16T09:35:35 | 2021-09-16T09:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from ..utils.stream import ItemFeature
from .base_reader import IMIXDataReader
from imix.utils.common_function import update_d1_with_d2
class TextVQAReader(IMIXDataReader):
def __init__(self, cfg):
super().__init__(cfg)
# assert self.default_feature, ('Not support non-default features now.')
def __len__(self):
return len(self.mix_annotations)
def __getitem__(self, idx):
annotation = self.mix_annotations[idx]
feature = self.feature_obj[idx]
global_feature, ocr_feature = {}, {}
item_feature = ItemFeature(annotation)
item_feature.error = False
item_feature.tokens = annotation['question_tokens']
item_feature.img_id = annotation['image_id']
update_d1_with_d2(d1=item_feature, d2=feature)
if self.global_feature_obj:
global_feature = self.global_feature_obj[idx]
global_feature.update({'features_global': global_feature.pop('features')})
update_d1_with_d2(d1=item_feature, d2=global_feature)
if self.ocr_feature_obj:
ocr_feature = self.ocr_feature_obj[idx]
ocr_feature.update({'features_ocr': ocr_feature.pop('features')})
update_d1_with_d2(d1=item_feature, d2=ocr_feature)
item_feature.error = None in [feature, global_feature, ocr_feature]
return item_feature
| [
"hsslab.inspur@gmail.com"
] | hsslab.inspur@gmail.com |
08d782838db68810147ca27d62dcd4ca28c26ec9 | e81d274d6a1bcabbe7771612edd43b42c0d48197 | /Django/day76(中间件)/demo/webapp/user/views.py | 45a2bf2c3f84afbb53964e886aeb9bd72f7aabe7 | [
"MIT"
] | permissive | ChWeiking/PythonTutorial | 1259dc04c843382f2323d69f6678b9431d0b56fd | 1aa4b81cf26fba2fa2570dd8e1228fef4fd6ee61 | refs/heads/master | 2020-05-15T00:50:10.583105 | 2016-07-30T16:03:45 | 2016-07-30T16:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | from django.shortcuts import render,redirect
from user.models import *
from hashlib import *
from django.http import *
from django.template import loader,RequestContext
from django.core.urlresolvers import reverse
from datetime import timedelta
from django.views.decorators.csrf import csrf_exempt
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from io import StringIO,BytesIO
import random
# Create your views here.
def register(request):
return render(request, 'user/register.html')
def register_handler(request):
user = User()
user.uname = request.POST.get('username')
user.upwd = sha1(request.POST.get('userpwd').encode('utf-8')).hexdigest()
user.save()
return render(request, 'user/success.html')
def login(request):
print('session:%s'%request.session.get('currentUser'))
context = {}
#获取cookie
username = request.COOKIES.get('mycooki')
if username:
context['username']=username
return render(request, 'user/login.html',context)
#@csrf_exempt
def login_handler(request):
# 定义上下文
context = {}
#获取验证码
userverification = request.POST.get('userverification')
if userverification==None or request.session['codes'].upper() != userverification.upper():
context = {'userverification_error':'验证码输入错误'}
return render(request,'user/login.html',context)
#用户名密码
username = request.POST.get('username')
userpwd = sha1(request.POST.get('userpwd').encode('utf-8')).hexdigest()
#匹配
ret = User.objects.filter(uname=username,upwd=userpwd)
if len(ret)==0:
return HttpResponseRedirect('/user/login')
else:
#在服务端保持一个session键值对
request.session['currentUser'] = username
request.session.set_expiry(36000)
#request.session.set_expiry(timedelta(days=2))
#加载模板
t1 = loader.get_template('user/success.html')
#上下文
requestcontext = RequestContext(request,context)
#创建具有模板和上下文的reponse
response = HttpResponse(t1.render(requestcontext))
#记录用户名密码的变量
rememberName = request.POST.get('rememberName')
#判断
if rememberName=='1':
#写cookie
response.set_cookie('mycookie',username,max_age=3600)
return response
def verification(request):
# 240 x 60:
width = 60 * 4
height = 60
image = Image.new('RGB', (width, height), (255, 255, 255))
# 创建Font对象:
font = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationSerif-BoldItalic.ttf', 36)
# 创建Draw对象:
draw = ImageDraw.Draw(image)
# 填充每个像素:
for x in range(width):
for y in range(height):
draw.point((x, y), fill=rndColor())
#储存验证码字符串
codes = ''
# 输出文字:
for t in range(4):
code = rndChar()
codes += code
draw.text((60 * t + 10, 10),code , font=font, fill=rndColor2())
# 模糊:
image = image.filter(ImageFilter.BLUR)
#将验证码字符串存储到session中
request.session['codes'] = codes
request.session.set_expiry(0)
#内存级的字节读写
f = BytesIO()
image.save(f,'jpeg')
return HttpResponse(f.getvalue(),'image/jpeg')
def test1(request):
#模拟异常
num = 1/0
return render(request, 'user/test1.html')
# 随机字母:
def rndChar():
return chr(random.randint(65, 90))
# 随机颜色1:
def rndColor():
return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))
# 随机颜色2:
def rndColor2():
return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))
| [
"1025212779@qq.com"
] | 1025212779@qq.com |
afb62f95eaaa4ce1aada8b5967d560921f144a77 | 6d2e4655ce0a9012aea88c83e2f49572e6d06738 | /day-04/day-04-passport-processing-01.py | e90db8ced1bc732920d3e2c46bd83a708a9de7e0 | [] | no_license | LilySu/Advent_of_Code_2020 | d7664b2e4469e5b0434db94d2452cdf62bc05daa | 521da7b20b3e47d49a6180e2a2aad78b4d923efa | refs/heads/main | 2023-02-05T00:51:56.363196 | 2020-12-26T03:43:30 | 2020-12-26T03:43:30 | 321,393,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | import re
from typing import List
from run_for_all_input_and_timer import Manager, timer
setup = Manager()
input = setup.get_file()
@timer
def solve(input: List[str]) -> int:
counter = 0
passports = []
txt_block = []
req = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
reqc = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid', 'cid']
for line in input:
if line != '':
if ' ' in line:
line = line.split(' ')
for i in line:
txt_block.append(i)
else:
txt_block.append(line)
else:
passports.append(txt_block)
txt_block = []
for idx, txt_block in enumerate(passports):
for idy, field in enumerate(passports[idx]):
before_colon = re.compile(r"^[^:]+:")
[requirement] = before_colon.findall(field)
passports[idx][idy] = requirement[:-1]
for txt_block in passports:
if (sorted(txt_block) == sorted(req)) or (sorted(txt_block) == sorted(reqc)):
counter += 1
return counter
if __name__ == "__main__":
print(solve(input))
| [
"LilySu@users.noreply.github.com"
] | LilySu@users.noreply.github.com |
0b16ea7eb6eeee60dfe71c9adef9a4d0eb907fc5 | f23a9ff9dc20e34d4e3e2a2281ec06ba97a6ff8d | /FINE/expansionModules/transformationPath.py | ef7ae3ede34440952de5df2675ca9e2bee40162b | [
"MIT"
] | permissive | OfficialCodexplosive/FINE-GL | 8e04078efd332105c07e92e8a6da9b19281bb87b | 18c5a983f194ec4fc4bd168db38ff36aa53d5ebd | refs/heads/main | 2023-06-06T10:54:14.025084 | 2021-06-18T20:45:37 | 2021-06-18T20:45:37 | 377,762,821 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,079 | py | """
Last edited: February 11, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
from FINE import utils
from FINE.IOManagement import standardIO
import pandas as pd
import copy
def optimizeSimpleMyopic(esM,
startYear,
endYear=None,
nbOfSteps=None,
nbOfRepresentedYears=None,
timeSeriesAggregation=True,
numberOfTypicalPeriods = 7,
numberOfTimeStepsPerPeriod=24,
logFileName='',
threads=3,
solver='gurobi',
timeLimit=None,
optimizationSpecs='',
warmstart=False,
CO2Reference=366,
CO2ReductionTargets=None,
saveResults=True,
trackESMs=True):
"""
Optimization function for myopic approach. For each optimization run, the newly installed capacities
will be given as a stock (with capacityFix) to the next optimization run.
:param esM: EnergySystemModel instance representing the energy system which should be optimized by considering the
transformation pathway (myopic foresight).
:type esM: esM - EnergySystemModel instance
:param startYear: year of the first optimization
:type startYear: int
**Default arguments:**
:param endYear: year of the last optimization
:type endYear: int
:param nbOfSteps: number of optimization runs excluding the start year
(minimum number of optimization runs is 2: one optimization for the start year and one for the end year).
|br| * the default value is None
:type nbOfSteps: int or None
:param noOfRepresentedYears: number of years represented by one optimization run
|br| * the default value is None
:type nbOfRepresentedYears: int or None
:param timeSeriesAggregation: states if the optimization of the energy system model should be done with
(a) the full time series (False) or
(b) clustered time series data (True).
|br| * the default value is False
:type timeSeriesAggregation: boolean
:param numberOfTypicalPeriods: states the number of typical periods into which the time series data
should be clustered. The number of time steps per period must be an integer multiple of the total
number of considered time steps in the energy system. This argument is used if timeSeriesAggregation is set to True.
Note: Please refer to the tsam package documentation of the parameter noTypicalPeriods for more
information.
|br| * the default value is 7
:type numberOfTypicalPeriods: strictly positive integer
:param numberOfTimeStepsPerPeriod: states the number of time steps per period
|br| * the default value is 24
:type numberOfTimeStepsPerPeriod: strictly positive integer
:param CO2Reference: gives the reference value of the CO2 emission to which the reduction should be applied to.
The default value refers to the emissions of 1990 within the electricity sector (366kt CO2_eq)
|br| * the default value is 366
:type CO2Reference: float
:param CO2ReductionTargets: specifies the CO2 reduction targets for all optimization periods.
If specified, the length of the list must equal the number of optimization steps, and an object of the sink class
which counts the CO2 emission is required.
|br| * the default value is None
:type CO2ReductionTargets: list of strictly positive integer or None
:param saveResults: specifies if the results are saves in excelfiles or not.
|br| * the default value is True
:type saveResults: boolean
:param trackESMs: specifies if the energy system model instances of each model run should be stored in a dictionary or not.
It´s not recommended to track the ESMs if the model is quite big.
|br| * the default value is True
:type trackESMs: boolean
**Returns:**
:returns myopicResults: Store all optimization outputs in a dictionary for further analyses. If trackESMs is set to false,
nothing is returned.
:rtype myopicResults: dict of all optimized instances of the EnergySystemModel class or None.
Last edited: February 14, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
"""
nbOfSteps, nbOfRepresentedYears = utils.checkAndSetTimeHorizon(startYear, endYear, nbOfSteps, nbOfRepresentedYears)
utils.checkSinkCompCO2toEnvironment(esM, CO2ReductionTargets)
utils.checkCO2ReductionTargets(CO2ReductionTargets, nbOfSteps)
print('Number of optimization runs: ', nbOfSteps+1)
print('Number of years represented by one optimization: ', nbOfRepresentedYears)
mileStoneYear = startYear
if trackESMs:
myopicResults = dict()
for step in range(0,nbOfSteps+1):
mileStoneYear = startYear + step*nbOfRepresentedYears
logFileName = 'log_'+str(mileStoneYear)
utils.setNewCO2ReductionTarget(esM,CO2Reference,CO2ReductionTargets,step)
# Optimization
if timeSeriesAggregation:
esM.cluster(numberOfTypicalPeriods=numberOfTypicalPeriods, numberOfTimeStepsPerPeriod=numberOfTimeStepsPerPeriod)
esM.optimize(declaresOptimizationProblem=True, timeSeriesAggregation=timeSeriesAggregation,
logFileName=logFileName, threads=threads, solver=solver, timeLimit=timeLimit,
optimizationSpecs=optimizationSpecs, warmstart=False)
if saveResults:
standardIO.writeOptimizationOutputToExcel(esM, outputFileName='ESM'+str(mileStoneYear), optSumOutputLevel=2, optValOutputLevel=1)
if trackESMs:
myopicResults.update({'ESM_'+str(mileStoneYear): copy.deepcopy(esM)})
# Get stock if not all optimizations are done
if step != nbOfSteps+1:
esM = getStock(esM, mileStoneYear, nbOfRepresentedYears)
if trackESMs:
return myopicResults
else:
return None
def getStock(esM, mileStoneYear, nbOfRepresentedYears):
'''
Function for determining the stock of all considered technologies for the next optimization period.
If the technical lifetime is expired, the fixed capacities of the concerned components are set to 0.
:param mileStoneYear: Last year of the optimization period
:type mileStoneYear: int
:param nbOfRepresentativeYears: Number of years within one optimization period.
:type nbOfRepresentativeYears: int
:return esM: EnergySystemModel instance including the installed capacities of the previous optimization runs.
:rtype: EnergySystemModel instance
Last edited: February 11, 2020
|br| @author: FINE Developer Team (FZJ IEK-3)
'''
for mdl in esM.componentModelingDict.keys():
compValues = esM.componentModelingDict[mdl].getOptimalValues('capacityVariablesOptimum')['values']
if compValues is not None:
for comp in compValues.index.get_level_values(0).unique():
if 'stock' not in esM.componentModelingDict[mdl].componentsDict[comp].name:
stockName = comp+'_stock'+'_'+str(mileStoneYear)
stockComp = copy.deepcopy(esM.componentModelingDict[mdl].componentsDict[comp])
stockComp.name = stockName
stockComp.lifetime = esM.componentModelingDict[mdl].componentsDict[comp].technicalLifetime - nbOfRepresentedYears
# If lifetime is shorter than number of represented years, skip component
if any(getattr(stockComp,'lifetime') <= 0):
continue
# If capacities are installed, set the values as capacityFix.
if getattr(stockComp, 'capacityFix') is None:
if isinstance(compValues.loc[comp], pd.DataFrame):
stockComp.capacityFix = utils.preprocess2dimData(compValues.loc[comp].fillna(value=-1), discard=False)
else:
# NOTE: Values of capacityMin and capacityMax are not overwritten.
# CapacityFix values set the capacity fix and fulfills the boundary constraints (capacityMin <= capacityFix <= capacityMax)
stockComp.capacityFix = compValues.loc[comp]
esM.add(stockComp)
elif 'stock' in esM.componentModelingDict[mdl].componentsDict[comp].name:
esM.componentModelingDict[mdl].componentsDict[comp].lifetime -= nbOfRepresentedYears
# If lifetime is exceeded, remove component from the energySystemModel instance
if any(getattr(esM.componentModelingDict[mdl].componentsDict[comp],'lifetime') <= 0):
esM.removeComponent(comp)
return esM
| [
"noreply@github.com"
] | OfficialCodexplosive.noreply@github.com |
c96c94c5fc73e6d2124de7dc626da7e4ec4d65ec | 06f6cac49c0ef8ca0a5f441d60fe50a128e31f4b | /personal/admin.py | 4ef6f29ad3dd1ed10e838cb3dfbb03e9aa63ca95 | [] | no_license | kpashko/reddit-like-django | 54b81491f6e53d120ac039bf30530fcbda8a4b4a | f63c5510c4c221aa44c6b898f6e596454c4bc82d | refs/heads/master | 2022-11-28T16:49:23.504575 | 2020-08-07T12:51:49 | 2020-08-07T12:51:49 | 285,237,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
#
# class AccountAdmin(UserAdmin):
# list_display = ('email', 'username', 'date_joined', 'last_login', 'is_admin', 'is_staff')
# search_fields = ('email', 'username')
# readonly_fields = ('date_joined', 'last_login')
#
# filter_horizontal = ()
# list_filter = ()
# fieldsets = ()
| [
"pashkokostya@gmail.com"
] | pashkokostya@gmail.com |
819f256bda020e550dfc263fdbc3f8b3702769a5 | 67c5523319978902772b590c232ce92255edf1cf | /AdventOfCode/2021/day_04/sln.py | bbcb40fe90738738843ae69897b751f4d5bee455 | [
"MIT"
] | permissive | AustinTSchaffer/DailyProgrammer | 7fb4d38de01a5552cf9216233a69e0c8bb122080 | f0a1dcdc9959b67e8dda2e9cbe6c82209c1cd924 | refs/heads/main | 2023-05-13T00:08:35.536723 | 2023-04-27T21:45:19 | 2023-04-27T21:45:19 | 61,325,669 | 2 | 0 | MIT | 2021-04-10T15:13:27 | 2016-06-16T21:02:17 | Python | UTF-8 | Python | false | false | 2,708 | py | #%%
import common
import dataclasses
from typing import List
class BingoBoard:
def __init__(self, data: str):
self.data = [
[
value.strip()
for value in row.split()
if value.strip()
]
for row in data.split('\n')
if row
]
self.marks = [[False for _ in __] for __ in self.data]
def reset_marks(self):
for row_index, row in enumerate(self.marks):
for col_index, _ in enumerate(row):
self.marks[row_index][col_index] = False
def mark_number(self, number):
for row_index, row in enumerate(self.data):
for col_index, value in enumerate(row):
if value == number:
self.marks[row_index][col_index] = True
def is_complete(self) -> bool:
columns = [True for _ in self.marks[0]]
for row in self.marks:
# Check row
if all(row):
return True
# Check columns
for col_index, value in enumerate(row):
columns[col_index] = value and columns[col_index]
return any(columns)
def sum_unmarked(self) -> int:
return sum(
int(value)
for row_index, row in enumerate(self.data)
for col_index, value in enumerate(row)
if not self.marks[row_index][col_index]
)
bingo_numbers = common.get_input(__file__, filename='bingo_numbers.txt')[0].strip().split(',')
bingo_boards = list(map(BingoBoard, ''.join(common.get_input(__file__, filename='bingo_boards.txt')).split('\n\n')))
def run_bingo_simulation():
for number in bingo_numbers:
for bingo_board in bingo_boards:
bingo_board.mark_number(number)
if bingo_board.is_complete():
print("Part 1:", int(number) * bingo_board.sum_unmarked())
return
run_bingo_simulation()
for board in bingo_boards:
board.reset_marks()
def run_bingo_simulation_part_2():
bingo_boards_current = bingo_boards
bingo_boards_next = list(bingo_boards)
for number in bingo_numbers:
for bingo_board in bingo_boards_current:
bingo_board.mark_number(number)
if bingo_board.is_complete():
if len(bingo_boards_current) == 1:
print("Part 2:", int(number) * bingo_board.sum_unmarked())
return
bingo_boards_next.remove(bingo_board)
bingo_boards_current = bingo_boards_next
bingo_boards_next = list(bingo_boards_current)
run_bingo_simulation_part_2()
for board in bingo_boards:
board.reset_marks()
# %%
| [
"schaffer.austin.t@gmail.com"
] | schaffer.austin.t@gmail.com |
c6b390deb804bb7dd4d74a625d00c2632fb71ec9 | d39c000858be265fcd38ea54e42630c4ef55cf8b | /preprocessing_dicom.py | ac082b77a8f4df756f20276ebc72af20144404eb | [] | no_license | do-jo/lung-cancer | 8c5a36f4882ac0482ac2b05bc5513ec788a6525f | 00de2a82cdeb45c150df7e12ebb61d00bc59a48b | refs/heads/master | 2021-03-24T11:57:11.677510 | 2017-03-30T01:35:15 | 2017-03-30T01:35:15 | 79,330,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,774 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 25 23:20:23 2017
@author: Johnny
Kaggle Data Science Bowl 2017
Preprocessing code for DICOM prior to applying machine learning
Some functions taken/modified from kaggle kernel from Guido Zuidhof
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dicom #load dicom images, and import class to work with dicom metadata
import os
import scipy.ndimage
import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
#load the raw/dicom data
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
#extract 3d np array from dicom loaded file; output values in Hounsfield Units (HU)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
#isotropic resampling to normalize spacing
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32)
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')
return image, new_spacing
#optional 3d plotting/ often crashes on my slow laptop
def plot_3d(image, threshold=-300):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
verts, faces = measure.marching_cubes(p, threshold) #Marching cubes is an algorithm to extract a 2D surface mesh from a 3D volume
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.7)
face_color = [0.45, 0.45, 0.75]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
#segmentation to isolate lung tissue
"""
# * Threshold the image (-320 HU is a good threshold, but it doesn't matter much for this approach)
# * Do connected components, determine label of air around person, fill this with 1s in the binary image
# * Optionally: For every axial slice in the scan, determine the largest solid connected component (the body+air around the person), and set others to 0. This fills the structures in the lungs in the mask.
# * Keep only the largest air pocket (the human body has other pockets of air here and there)."""
def largest_label_volume(im, bg=-1):
vals, counts = np.unique(im, return_counts=True)
counts = counts[vals != bg]
vals = vals[vals != bg]
if len(counts) > 0:
return vals[np.argmax(counts)]
else:
return None
def segment_lung_mask(image, fill_lung_structures=True):
# not actually binary, but 1 and 2.
# 0 is treated as background, which we do not want
binary_image = np.array(image > -320, dtype=np.int8)+1
labels = measure.label(binary_image)
# Pick the pixel in the very corner to determine which label is air.
# Improvement: Pick multiple background labels from around the patient
# More resistant to "trays" on which the patient lays cutting the air
# around the person in half
background_label = labels[0,0,0]
#Fill the air around the person
binary_image[background_label == labels] = 2
# Method of filling the lung structures (that is superior to something like
# morphological closing)
if fill_lung_structures:
# For every slice we determine the largest solid structure
for i, axial_slice in enumerate(binary_image):
axial_slice = axial_slice - 1
labeling = measure.label(axial_slice)
l_max = largest_label_volume(labeling, bg=0)
if l_max is not None: #This slice contains some lung
binary_image[i][labeling != l_max] = 1
binary_image -= 1 #Make the image actual binary
binary_image = 1-binary_image # Invert it, lungs are now 1
# Remove other air pockets insided body
labels = measure.label(binary_image, background=0)
l_max = largest_label_volume(labels, bg=0)
if l_max is not None: # There are air pockets
binary_image[labels != l_max] = 0
return binary_image
#normalize the data: Our values currently range from -1024 to around 2000; >400 is not interesting to us (bones with different radiodensity)
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
# Zero centering
""" As a final preprocessing step, zero center your data so that your mean value is 0 (subtract the mean pixel value from all pixels).
# To determine this mean you simply average all images in the whole dataset. Do not zero center with the mean per image! mean ~ 0.25 in the LUNA16 competition.
"""
def zero_center(image):
image = image - PIXEL_MEAN
return image
if __name__ == "__main__":
# Some constants
plot_data = 1
psave = 0
BASE_FOLDER = r'D:\kaggle\lungcancer\preprocessed_sample_images/'
INPUT_FOLDER = r'D:\kaggle\lungcancer\sample_images/' #sample images only
#INPUT_FOLDER = r'H:\kaggle\lung_cancer/' #stage1 images
#parameters for normalization
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
PIXEL_MEAN = 0.25
#load patient data
patients = os.listdir(INPUT_FOLDER)
patients.sort()
#process data
counter = 1
for patient in patients:
print("loading patient {} of {}: {}".format(counter, len(patients), patient))
data = load_scan(INPUT_FOLDER + patient)
stack = get_pixels_hu(data)
# if plot_data == 1:
# plt.hist(stack.flatten(), bins=80, color='c')
# plt.xlabel("Hounsfield Units (HU)")
# plt.ylabel("Frequency")
# plt.show()
# when resampling, save the new spacing! Due to rounding this may be slightly off from the desired spacing
pix_resampled, spacing = resample(stack, data, [1,1,1]) #resample our patient's pixels to an isomorphic resolution of 1 by 1 by 1 mm
print("Shape before and after resampling {} to {}".format(stack.shape, pix_resampled.shape))
#segment just the lungs
#segmented_lungs = segment_lung_mask(pix_resampled, False)
segmented_lungs_fill = segment_lung_mask(pix_resampled, True)
pix_masked = pix_resampled.copy() #or create an empty array
pix_normalized = pix_resampled.copy()
for i in range(pix_resampled.shape[0]): #go through z-stack
mask = segmented_lungs_fill[i,:,:]
pix_normalized[i,:,:] = normalize(pix_resampled[i,:,:])
temp = pix_normalized[i].copy() #consider removing the copy to save computational time
#temp[mask == 0] = -1024
temp[mask == 0] = 0
pix_masked[i,:,:] = temp
#plot an example frame
if plot_data == 1:
i = 75 #frame #
f, axarr = plt.subplots(2, 2)
#axarr[0,0].imshow(pix_normalized[i,:,:], cmap=plt.cm.gray) #original image
axarr[0,1].imshow(pix_resampled[i,:,:], cmap=plt.cm.gray) #resampled image
axarr[1,0].imshow(segmented_lungs_fill[i,:,:], cmap=plt.cm.gray) #mask of resampled image
axarr[1,1].imshow(pix_masked[i,:,:], cmap=plt.cm.gray) #resampled image with mask
#axarr[0, 0].set_title('pix_normalized', fontsize = 10)
axarr[0, 1].set_title('resampled', fontsize = 10)
axarr[1, 0].set_title('mask', fontsize = 10)
axarr[1, 1].set_title('mask on resampled', fontsize = 10)
for j in range(2):
for k in range(2):
axarr[j,k].set_xticks([])
if psave == 1:
outfile = patient + '.npy'
outpath = os.path.join(BASE_FOLDER, outfile)
print("saving preprocessed file: {}".format(outfile))
np.save(outpath, pix_masked)
counter += 1 | [
"doj@berkeley.edu"
] | doj@berkeley.edu |
d5f41e2b479f230c4346baf3e82bcfd8fe826e39 | 60e2b2da49f971e655cf84909e96ba3fa31befa1 | /product_hunt_clone/urls.py | 23ba4024ca62583125230f07e34b029152943a09 | [] | no_license | akjha96/product_hunt_clone | 5c6eef3386df3982cb48398345995e1451b32934 | dced567ff3391355b18f67001c05f097fddfc6ed | refs/heads/master | 2021-03-01T05:21:48.910875 | 2020-03-08T05:21:54 | 2020-03-08T05:21:54 | 230,100,682 | 0 | 0 | null | 2019-12-28T06:03:19 | 2019-12-25T12:35:03 | Python | UTF-8 | Python | false | false | 1,073 | py | """product_hunt_clone URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from products import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('accounts/', include('accounts.urls')),
path('products/', include('products.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"akjha96@github.com"
] | akjha96@github.com |
7ef4a666e8ed5dea2398b20bacf88dc06c4031f7 | 2ef6663a1274a5fecd68e000d9c2269915ed7b2c | /simple group chat/testclient2.py | 253f0e7db9a1552e23dc58fd1b1d68c5f44addd6 | [] | no_license | hackothaurus/simple_group_chat | 1826d30fa3b3bdd630607d4c5943d51dcbb5f2bc | a74ffbaeddbee39eb2ee7ee82c5184fd366e2459 | refs/heads/main | 2023-01-03T05:04:17.336504 | 2020-10-27T13:55:37 | 2020-10-27T13:55:37 | 307,713,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | # Python program to implement client side of chat room.
import socket
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 1234))
while True:
str = input("user2: ");
s.send(str.encode());
if(str == "Bye" or str == "bye"):
break;
print ("N:",s.recv(1024).decode())
s.close();
| [
"noreply@github.com"
] | hackothaurus.noreply@github.com |
e5778fd21fe0e971f31f9f6c2a3857873ee71118 | e50e376abb2910c649846c778807d02d6fff52f7 | /tododler/wsgi.py | 065eab4376751a6e3d17c9bc01cef0bf5be30ec7 | [] | no_license | triterer/tododler | 4f03a30771a4cccf3079c48fac7a46679549685f | c70c37ac50ed2bb33e7c58da8aa580d434c45f59 | refs/heads/master | 2020-09-08T06:00:31.269520 | 2019-11-11T18:00:52 | 2019-11-11T18:00:52 | 221,038,129 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for tododler project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tododler.settings')
application = get_wsgi_application()
| [
"vofka99@rambler.ru"
] | vofka99@rambler.ru |
1d71401127388baa89896b58bf87c9cf172c6b7a | ced985740f22c5d8fa22816aa75dffd2c27442f6 | /Project Code/Populace/manage.py | 5720f059669f125c4e276978696e639f38247162 | [] | no_license | Antusaha2/SU19CSE299S16G03NSU | a5362f52e18587f06dea9836ba322ef2b31c2442 | e9b026eea1bad9f46154a6a607e7325f5c19f9dc | refs/heads/master | 2022-01-23T13:47:22.106906 | 2019-08-21T04:45:46 | 2019-08-21T04:45:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Populace.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"sayeed.k06@gmail.com"
] | sayeed.k06@gmail.com |
e6b363fbf17fd6e22028ac441c93c53264e9a3dc | 4a83e737212f0c5f83e13be33deeeec1a8a0f4f3 | /posts/migrations/0004_auto_20200709_1651.py | e9d0d65adffd08307d0e26319cfd5994851ee0a4 | [] | no_license | 4dragunov/praktikum_social_network | c83e6a9d1eea1d0980f315abdc6df3e68dbdcef3 | c5a521df9bd5641ec2946edaf4fdc0922f32accb | refs/heads/master | 2023-02-13T20:47:08.787317 | 2021-01-12T16:53:14 | 2021-01-12T16:53:14 | 288,222,320 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # Generated by Django 2.2.9 on 2020-07-09 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20200709_1647'),
]
operations = [
migrations.AlterField(
model_name='group',
name='slug',
field=models.SlugField(unique=True),
),
]
| [
"4dragunov@gmail.com"
] | 4dragunov@gmail.com |
986ce149524a8d3f4ff1e85f20ed27a50dabb06e | 9f1b7e6873d95161c45c8cec559077e91a361551 | /flik/common/dateparam.py | 4e155f7100b2e0fddf28a434e83bf17bf56f1ea8 | [
"BSD-2-Clause",
"MIT"
] | permissive | elbkind/flik | 4bf9f5aea2cb5ee5760e58489b9df2a3ec1c755f | bc40381c20cad351c41fe92317ee7d6aab10be9c | refs/heads/master | 2021-01-12T01:43:12.724723 | 2017-01-09T09:36:21 | 2017-01-09T09:36:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | import re
from datetime import datetime
from dateutil.relativedelta import relativedelta, MO, TU, WE, TH, FR, SA, SU
def parse(raw_date):
weekday = {
'monday': MO(-1),
'tuesday': TU(-1),
'wednesday': WE(-1),
'thursday': TH(-1),
'friday': FR(-1),
'saturday': SA(-1),
'sunday': SU(-1),
}
if raw_date == 'today':
date = datetime.now().date()
elif raw_date == 'yesterday':
date = datetime.now().date() - relativedelta(days=1)
elif raw_date in weekday:
date = datetime.now().date() + relativedelta(weekday=weekday[raw_date])
elif re.match('\d{4}-w\d{2}', raw_date) is not None:
fromDate = datetime.strptime(raw_date + '-1', "%Y-W%W-%w").date()
toDate = fromDate + relativedelta(days=7)
return fromDate, toDate
else:
date = datetime.strptime(raw_date, '%Y-%m-%d')
toDate = date + relativedelta(days=1)
return date, toDate
def format(date):
return date.strftime('%Y-%m-%d')
| [
"rsteube@users.noreply.github.com"
] | rsteube@users.noreply.github.com |
fd4b9bbad032fd93f0ca1ccbfe850ab51f7e941f | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/babpls/pancake.py | d28c2b437d135f52472fcf97a0e7317ca7ed9438 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 324 | py | fin = open('pancake.in', 'r')
fout = open('pancake.out', 'w')
count = 0
for line in fin:
if count != 0:
out = 0
cur = '+'
for x in line[:-1][::-1]:
if cur != x:
cur = x
out += 1
fout.write('Case #%d: %s\n' % (count, str(out)))
count += 1
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
4d3f6e13929634e4ad40f569750e3fe92fecc43c | 78bdb461b09f0dbccad474f069fe7dec829c70e3 | /venv/Lib/site-packages/tailor/internal/domain/postgres/sessionfactory.py | 6b69eb983a53d1276369e05bd46d52791eaddfa6 | [] | no_license | OdaNilseng/FLSworkflow | 92d7a9b9d63398517f927fd2a0514eaef787e110 | 780a998741991f17ce14d5b642b228495681e415 | refs/heads/master | 2022-12-03T13:04:36.542191 | 2020-08-21T08:55:53 | 2020-08-21T08:55:53 | 289,224,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from tailor.config import config
from tailor.internal.domain.postgres import AlchemyBase
from tailor.internal.utils import Singleton
@Singleton
class SessionFactory:
def __init__(self):
# an Engine, which the Session will use for connection
# resources
db_url = config['admin_db_uri']
debug_sql = os.environ.get("DEBUG_SQL") == "true"
if debug_sql:
print("Creating admin db at", db_url)
engine = create_engine(db_url, echo=debug_sql)
AlchemyBase.metadata.create_all(engine)
# create a configured "Session" class
self.session_maker = sessionmaker(bind=engine)
self.session = self.__create_new()
def create(self) -> Session:
return self.session
def __create_new(self) -> Session:
# create a new Session
created: Session = self.session_maker()
created.autocommit = True
return created
| [
"oed@sevanssp.com"
] | oed@sevanssp.com |
ca694b19b1ddaa3393d91190f4addb316c5fd96e | 8240abd177ece3a1cf2d753cc5694c1fec478709 | /week1/codeingBat/list-2/04.py | def4b4029205be7d5c428af84f9eba1616343dda | [] | no_license | DastanB/BF-Django | 255001185d8a8318bd19b750fe662a7f86b64d92 | adcd1d968b94ea5097fd3d03338f031d5497d463 | refs/heads/master | 2022-10-27T06:44:56.648527 | 2018-11-24T18:33:35 | 2018-11-24T18:33:35 | 147,125,321 | 1 | 1 | null | 2022-10-19T08:22:54 | 2018-09-02T22:07:22 | Python | UTF-8 | Python | false | false | 242 | py | def sum13(nums):
sum = 0
for i in range (len(nums)):
if nums[i] != 13:
sum += nums[i]
elif nums[i] == 13 and i < len(nums)-1:
nums[i]=0;
nums[i+1] =0
return sum | [
"dastan211298@gmail.com"
] | dastan211298@gmail.com |
96b80fdd8c80d38fff3348a20ed3e1d9e961fbd0 | 7356f77784c9ad3ffb3da4b3b60d844b23bb7b29 | /dt_automator/maker/model/scene.py | 3af1bfa5266ca3679ea24f3ea9652d3b6e46778b | [] | no_license | HsOjo/DTAutomator | 5cc513e41a3eba0a595bb410bcee6ff990140805 | d51c31ea04a79ed767f661ab0f9599b1c0f0bcef | refs/heads/master | 2021-02-13T00:59:06.424434 | 2020-05-03T04:34:12 | 2020-05-03T04:34:12 | 244,647,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | from typing import List
from dt_automator.base.model import BaseModel
from .feature import FeatureModel
from .object import ObjectModel
class SceneModel(BaseModel):
_sub_model = dict(
features=(list, FeatureModel),
objects=(list, ObjectModel),
)
def __init__(self, event: dict):
self._event = event
self.name = ''
self.img = ''
self.features = [] # type: List[FeatureModel]
self.objects = [] # type: List[ObjectModel]
@property
def img_path(self):
return self._event['get_path'](self.img)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
| [
"1134031392@qq.com"
] | 1134031392@qq.com |
fbb829ca8e1fb3c025af62444ebef149db9b56ce | 07996c7f93e7b200146cd314520100cf99d003bd | /raw data/40_tos_with_paragraph/code/crawlParagraph/new-env/bin/conch | 4090c800e7522412274480e3f813286e46384855 | [] | no_license | tjuyanghw/data_policy_analyzer | 31ae683128ca5241fa8f0cb67e2f1132820c2d02 | 010a44ff024bd6d97b21f409f6c62f969e1fdc55 | refs/heads/master | 2022-07-02T19:23:14.141170 | 2020-05-13T16:24:11 | 2020-05-13T16:24:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | #!/Users/huthvincent/Desktop/scrapy/scrapyenv/crawlByOnce/new-env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.10.0','console_scripts','conch'
__requires__ = 'Twisted==19.10.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.10.0', 'console_scripts', 'conch')()
)
| [
"xiaoyue10131748@gmail.com"
] | xiaoyue10131748@gmail.com | |
664761915eb265fca95b1444fe46e40784a50381 | 21e59791db9424fb560b84ff9c9fa9a35380624c | /reducedSkimmer/runOnData.py | be4def8e70f2487b346cddeaac42042af32fe8e9 | [] | no_license | leggat/tW13TeV | 25f9c350d3f3436d7ea1c0577af0ddd524c0d23a | f87b1aeacdec16d2be5bfb821716ba1205e2bac7 | refs/heads/master | 2021-01-21T04:46:53.420856 | 2016-06-09T10:08:52 | 2016-06-09T10:08:52 | 53,487,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | #A script that runs the reduced skimmer in parallel. This is to try and not process a dataset in sensible amounts of time without it breaking.
import subprocess
import sys
nFiles = int(sys.argv[1])
for i in range(1,nFiles,100):
print "/publicfs/cms/user/duncanleg/scripts/tW13TeV/reducedSkimmer/skimmer -d configs/diMuon.cfg -b "+str(i)+" -e "+str(i+99) + " 2>logs/errorData"+str(i)
subprocess.Popen("/publicfs/cms/user/duncanleg/scripts/tW13TeV/reducedSkimmer/skimmer -d configs/diMuon.cfg -b "+str(i)+" -e "+str(i+99) + " 2>logs/errorData"+str(i),shell=True)
#print "/publicfs/cms/user/duncanleg/scripts/tW13TeV/skimmer/tWAnal -d configs/diMuon.cfg -b "+str(finalIt) +" -e "+str(nFiles) + " 2>logs/errorData"+str(finalIt)
| [
"leggat@cern.ch"
] | leggat@cern.ch |
4892c312b31b265fa4dd52ab6ecc38b6e36b269e | 1a3a985eca5f52d312dc1f19154c6f28f0011b2d | /character/character.py | 086c85f5287e02e1163c251a6b4b0ddeebb85063 | [
"BSD-3-Clause"
] | permissive | chrisbrake/PythonSandbox | f2441ca4866f1cbe1f3b1a6bf3b0e9fa5652a431 | 8cd2ea847676d6a300b55c560f49cd980f760b00 | refs/heads/master | 2021-06-06T06:47:37.738105 | 2020-02-17T04:41:01 | 2020-02-17T04:41:01 | 99,748,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | from . import (
abilities, alignments, backgrounds, bonds, classes, flaws, ideals, items,
levels, races
)
class Character(object):
def __init__(
self,
race: races.BaseRace,
class_: classes.BaseClass,
ability: abilities.BaseAbility,
alignment: alignments.BaseAlignment,
ideal: ideals.BaseIdeal,
bond: bonds.BaseBond,
flaw: flaws.BaseFlaw,
background: backgrounds.BaseBackground,
inventory: items.BaseItem,
level: levels.BaseLevel
):
super(Character, self).__init__()
self.race = race
self.class_ = class_
self.ability = ability
self.alignment = alignment
self.ideal = ideal
self.bond = bond
self.flaw = flaw
self.background = background
self.inventory = inventory
self.level = level
| [
"chris.brake@gmail.com"
] | chris.brake@gmail.com |
9341358b738606563076aff520be1d952e206b05 | 3effddcdf55cadfad20894bc49958b4407003cc2 | /bin/main.py | 3219529cbabe3b239510d03d39424badd1874c4c | [] | no_license | aki21j/ascii-image-converter | 5cb5b613de82cd02c2edba7ba693d253a9e559a2 | 5294c3d3ab9ef8a0be485da3ae5f616963f2f08d | refs/heads/master | 2023-04-08T00:11:43.033383 | 2021-04-08T18:43:29 | 2021-04-08T18:43:29 | 356,013,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | from PIL import Image
import math
from colorama import Fore, Style
import sys
BRIGHTNESS_TYPES = {
"AVERAGE": "average",
"LIGHTNESS": "lightness",
"LUMINOSITY": "luminosity"
}
ASCII_SUBSTRATE_SCALE = "`^\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
MAX_BRIGHTNESS = 255
bp_scale = MAX_BRIGHTNESS / len(ASCII_SUBSTRATE_SCALE)
def get_pixel_matrix(img, height):
img.thumbnail((height, 200))
pixels = list(img.getdata())
pixel_matrix = []
for i in range(0, len(pixels), img.width):
pixel_matrix.append(pixels[i:i+img.width])
return pixel_matrix
def create_brightness_matrix(pixel_matrix, brightness_type):
brightness_matrix = []
for row in pixel_matrix:
brightness_row = []
for pixel in row:
if brightness_type == BRIGHTNESS_TYPES.get("AVERAGE"):
brightness_row.append((pixel[0] + pixel[1] + pixel[2]) / 3)
elif brightness_type == BRIGHTNESS_TYPES.get("LIGHTNESS"):
brightness_row.append((max(pixel) + min(pixel)) / 2)
elif brightness_type == BRIGHTNESS_TYPES.get("LUMINOSITY"):
brightness_row.append((0.21 * pixel[0]) + (0.72 * pixel[1]) + (0.07 * pixel[2]))
brightness_matrix.append(brightness_row)
return brightness_matrix
# invert brightness, white to black and vice versa
def invert_brightness_matrix(brightness_matrix):
inverted_matrix = []
for row in brightness_matrix:
inverted_row = []
for pixel in row:
inverted_pixel = MAX_BRIGHTNESS - pixel
inverted_row.append(inverted_pixel)
inverted_matrix.append(inverted_row)
return inverted_matrix
def meth_ascii_matrix(brightness_matrix):
ascii_matrix = []
for row in brightness_matrix:
ascii_row = []
for pixel in row:
try:
ascii_row.append(ASCII_SUBSTRATE_SCALE[round(pixel/bp_scale)])
except Exception as e:
ascii_row.append(ASCII_SUBSTRATE_SCALE[math.floor(pixel/bp_scale)])
ascii_matrix.append(ascii_row)
return ascii_matrix
def main():
infile_path = sys.argv[1]
im = Image.open(infile_path)
pixel_matrix = get_pixel_matrix(im, 1000)
brightness_matrix = create_brightness_matrix(pixel_matrix,BRIGHTNESS_TYPES.get("LUMINOSITY"))
inverted_matrix = invert_brightness_matrix(brightness_matrix)
ascii_matrix = meth_ascii_matrix(inverted_matrix)
for row in ascii_matrix:
line = [p+p+p for p in row]
print("".join(line))
print(Style.RESET_ALL)
if __name__ == "__main__":
main()
| [
"ankitgupta21j@gmail.com"
] | ankitgupta21j@gmail.com |
eee5afae9bf5d46f992fc9c7635509c508214765 | 535536c2a90faf5c04db1c180e669d98165d4463 | /knn-mnist.py | a9a7bb115d283a54ef75801251fd90e6ac455dcf | [] | no_license | nmauger-edu/pls-jravi-tensorflow | 1e3ae139fc7ee6a3d0060c6ef49666e6834fdb4e | 12b245cb4a419150a532a691959568127500793d | refs/heads/master | 2021-04-27T03:46:44.985829 | 2018-02-24T18:22:56 | 2018-02-24T18:22:56 | 122,720,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | import numpy as np
import tensorflow as tf
# see also : https://github.com/aymericdamien/TensorFlow-Examples
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
# Store the MNIST data in /tmp/data
mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
training_digits, training_labels = mnist.train.next_batch(50000)
test_digits, test_labels = mnist.test.next_batch(200)
# None because we don't know how many image in the list... ?!
training_digits_pl = tf.placeholder("float", [None, 784])
test_digits_pl = tf.placeholder("float", [784])
# Nearest neighbour
l1_distance = tf.abs(tf.add(training_digits_pl, tf.negative(test_digits_pl)))
distance = tf.reduce_sum(l1_distance, axis=1)
# Prediction: get the minimum distance index previously calculated
pred = tf.arg_min(distance, 0)
accuracy = 0
# Initialization of variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# loop over each value
for i in range(len(test_digits)):
# Get nearest neighbour
nn_index = sess.run(pred, \
feed_dict={training_digits_pl: training_digits, test_digits_pl: test_digits[i, :]})
# Get nearest neighbor class label and compare it with its true label
print("Test", i, "Predicition:", np.argmax(training_labels[nn_index]),\
"True Label:", np.argmax(test_labels[i]))
# Calculate accuracy
if np.argmax(training_labels[nn_index]) == np.argmax(test_labels[i]):
accuracy += 1/len(test_digits)
print("Done!")
print("Accuracy:", accuracy)
| [
"norbertmauger@gmail.com"
] | norbertmauger@gmail.com |
eaf1784c0d34c71a35a44e1272093d4e512ce762 | e81188e8ff0af121025b52f458ccf4aa9c0461a1 | /watson/framework/views/renderers/jinja2.py | b68a79947b6efa25e8d14438c4b252d3f6d25473 | [
"MIT"
] | permissive | SabatierBoris/watson-framework | f1be30d56a23654d5923ef02e4613786d30f8dfc | cfcdf4d8aedb6f3d49d4261122542354131389b8 | refs/heads/master | 2021-01-17T07:41:58.864120 | 2014-03-11T15:56:15 | 2014-03-11T15:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,362 | py | # -*- coding: utf-8 -*-
import importlib
import types
import jinja2
from watson.common import datastructures
from watson.framework.views.renderers import abc
TEMPLATES = {
'base': '''<!DOCTYPE html>
<html>
<head>
{% block head %}
<style type="text/css">
html, body { font-family: Helvetica, Arial, sans-serif }
{% block styles %}{% endblock %}
</style>
{% endblock %}
</head>
<body>
{% block body %}{% endblock %}
</body>
</html>
''',
'exception_styling': '''
body {
-webkit-font-smoothing: antialiased;
margin: 0; padding: 0;
font-size: 12px;
}
h1, h2 {
background: #232e34;
color: #fff;
margin: 0;
padding: 10px;
font-weight: normal;
}
h1:first-of-type {
padding-bottom: 0;
}
h2 {
color: #3e515a;
font-size: 1.1em;
padding-top: 0;
}
h3 {
color: #333;
margin-left: 10px;
}
p {
margin: 0;
padding: 10px;
}
table {
margin: 10px;
width: 98%;
border-collapse: collapse;
}
table th {
text-align: left;
font-size: 1.1em;
padding: 0 6px 6px;
}
table td {
padding: 6px;
vertical-align: top;
color: #333;
}
.watson-stack-frames > tbody > tr:nth-child(3n+1) {
background-color: #fff;
}
.watson-stack-frames > tbody > tr {
background-color: #f5f5f5;
cursor: pointer;
}
.watson-stack-frames > tbody > tr.watson-stack-frames-frame-vars {
cursor: default;
background: #f1ecc2;
}
.hide {
display: none;
}
table td {
font-family: "Lucida Console", Monaco, monospace;
}
dl {
margin: 0;
padding: 10px;
}
dl.watson-info {
background: #d9f2fe;
color: #1c4d72;
border-bottom: 1px solid #9cb3be;
}
dl.watson-error {
box-shadow: -6px 0 6px rgba(0, 0, 0, 0.05);
border-top: 1px solid #00d9ee;
border-bottom: 1px solid #00b7df;
background: #00bfe3;
color: #fff;
text-shadow: 0 1px #009fcc;
padding-top: 12px;
}
dt {
font-weight: bold;
font-size: 1.1em;
float: left;
width: 160px;
clear: both;
}
dd {
color: #6087af;
margin-bottom: 4px;
margin-left: 160px;
}
dd table {
margin: 0;
table-layout: fixed;
}
dd table td {
font-family: inherit;
padding: 2px 0;
color: inherit;
}
dd table tr > td:first-of-type {
width: 200px;
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
}
dl.watson-error dd {
color: #f6f6f6;
padding-top: 2px;
}
''',
'exception_details': '''
{% if debug %}
<h2>{{ message|escape }}</h2>
<dl class="watson-error">
<dt>Exception Type:</dt>
<dd>{{ type }}</dd>
{% if cause_message %}
<dt>Exception Message:</dt>
<dd>{{ cause_message|escape }}</dd>
{% endif %}
</dl>
<dl class="watson-info">
{% if route_match %}
<dt>Watson Version:<dt>
<dd>{{ version }}</dd>
<dt>Route:</dt>
<dd>{{ route_match.name|e }}</dd>
{% endif %}
<dt>Request:</dt>
<dd>{{ request().url }}</dd>
<dt>Method:</dt>
<dd>{{ request().method }}</dd>
<dt>Session Id:</dt>
<dd>{{ request().session.id }}</dd>
<dt>Headers:</dt>
<dd>
<table>
{% for key, value in request().headers|dictsort %}
<tr><td>{{ key }}</td><td>{{ value }}</td></tr>
{% endfor %}
</table>
</dd>
<dt>Get Vars:</dt>
<dd>
<table>
{% for key, value in request().get|dictsort %}
<tr><td>{{ key }}</td><td>{{ value }}</td></tr>
{% else %}
-
{% endfor %}
</table>
</dd>
<dt>Post Vars:</dt>
<dd>
<table>
{% for key, value in request().post|dictsort %}
<tr><td>{{ key }}</td><td>{{ value }}</td></tr>
{% else %}
-
{% endfor %}
</table>
</dd>
<dt>Server:</dt>
<dd>
<table>
{% for key, value in request().server|dictsort %}
<tr><td>{{ key }}</td><td>{{ value }}</td></tr>
{% endfor %}
</table>
</dd>
</dl>
<h1>Stack Trace</h1>
<table class="watson-stack-frames">
<tr>
<th>Line</th><th>File</th><th>Function</th><th>Code</th>
</tr>
{% for frame in frames %}
<tr class="watson-stack-frames-frame">
<td>{{ frame.line }}</td>
<td>{{ frame.file }}</td>
<td>{{ frame.function }}</td>
<td>{{ frame.code }}</td>
</tr>
{% if frame.vars %}
<tr class="watson-stack-frames-frame-vars">
<td colspan="4" class="hide">
<table class="watson-stack-frames-vars">
<tr><th>Name</th><th>Value</th></tr>
{% for k, v in frame.vars|dictsort %}
<tr>
<td>{{ k|e }}</td>
<td>{{ v|e }}</td>
</tr>
{% endfor %}
</table>
</td>
</tr>
{% endif %}
{% endfor %}
</table>
<script>
Element.prototype.toggleClass = function (className) {
this.className = this.className === className ? '' : className;
};
var frames = document.getElementsByClassName('watson-stack-frames-frame');
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
frame.onclick = function() {
this.nextElementSibling.children[0].toggleClass('hide');
}
}
</script>
{% endif %}
''',
'blank.html': '''{% extends "base" %}
{% block body %}
{{ content }}
{% endblock %}
''',
'errors/404.html': '''{% extends "base" %}
{% block styles %}
{{ super() }}
{% include "exception_styling" %}
{% endblock %}
{% block body %}
<h1>Not Found</h1>
{% include "exception_details" %}
{% if not debug %}
<p>The requested page cannot be found.</p>
{% endif %}
{% endblock %}
''',
'errors/500.html': '''{% extends "base" %}
{% block styles %}
{{ super() }}
{% include "exception_styling" %}
{% endblock %}
{% block body %}
<h1>Internal Server Error</h1>
{% include "exception_details" %}
{% if not debug %}
<p>A non-recoverable error has occurred and an administrator has been notified.</p>
{% endif %}
{% endblock %}
'''
}
class Renderer(abc.Renderer):
_env = None
_debug_mode = False
@property
def env(self):
return self._env
def __init__(self, config=None, application=None):
super(Renderer, self).__init__(config)
self._debug_mode = application.config['debug']['enabled']
self.register_loaders()
_types = ('filters', 'globals')
for _type in _types:
for module in config[_type]:
mod = importlib.import_module(module)
dic = datastructures.module_to_dict(
mod, ignore_starts_with='__')
for name, definition in dic.items():
obj = '{0}.{1}'.format(module, name)
env_type = getattr(self.env, _type)
if isinstance(definition, types.FunctionType):
env_type[name] = definition
else:
env_type[name] = application.container.get(obj)
def register_loaders(self):
user_loaders = [jinja2.FileSystemLoader(path)
for path in self.config.get('paths')]
system_loaders = [jinja2.DictLoader(TEMPLATES)]
if self._debug_mode:
loaders = system_loaders + user_loaders
else:
loaders = user_loaders + system_loaders
kwargs = self.config.get('environment', {})
kwargs['loader'] = jinja2.ChoiceLoader(loaders)
self._env = jinja2.Environment(**kwargs)
def __call__(self, view_model, context=None):
template = self._env.get_template(
'{0}.{1}'.format(view_model.template,
self.config['extension']))
return template.render(context=context or {}, **view_model.data)
| [
"simon.coulton@gmail.com"
] | simon.coulton@gmail.com |
215813b14cf1590bb46d7d0810ba0bcb2b0e2ce2 | 54bcbaa1d3630e78ad883ad91708096f21f1e83a | /my-addon.py | bbed58d16f0663b99545bf98dbd4c32f98325584 | [] | no_license | codeyu/mitm-demo | a747e1de47cbbfe8c27742ca46ee36fd4648d924 | b4d0c3a393bec27a5bc05a41afd05e83edf6de36 | refs/heads/main | 2023-01-10T03:12:40.755387 | 2020-11-13T10:40:51 | 2020-11-13T10:40:51 | 312,541,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | import mitmproxy.http
from mitmproxy import ctx
import requests
class Counter:
def __init__(self):
self.num = 0
def request(self, flow: mitmproxy.http.HTTPFlow):
if flow.request.pretty_url.endswith('.mp3'):
ctx.log.info(flow.request.pretty_url)
r = requests.get(flow.request.pretty_url, allow_redirects=True)
fileName = flow.request.pretty_url.split('/')[-1]
open(fileName, 'wb').write(r.content)
addons = [
Counter()
] | [
"noreply@github.com"
] | codeyu.noreply@github.com |
6334901a0660dd455f942b7127b66eeb3ce93f85 | 7f265952dcf6f15f0535106ecac5167ca82607da | /Basic library/requests/advanced usage/Agent.py | 037fca72d2935c04a406851d203dd306e1625504 | [] | no_license | isPoikilotherm/python-crawler | 9ab86dd7ef3e5504f81b837ceacede800a043bc5 | ff3c076b6e58f85f77b91d03ec047a6aedbbfa3f | refs/heads/master | 2020-09-07T12:25:22.202930 | 2020-06-18T04:45:45 | 2020-06-18T04:45:45 | 220,778,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import requests
proxies={
# 'http':'http://47.107.172.6:8000',
'https':'https://47.106.231.64:3128'
}
r=requests.get('https://www.taobao.com',proxies=proxies)
print(r.text) | [
"18832026962@163.com"
] | 18832026962@163.com |
1f2e7b1fdb24d899b19051ed50eaeaf5aeeb8f4e | b3fd61fdfd6ea82695d805c95321619423b836e6 | /Tom_Sawyer.py | 02e8482d4f8b2e35842c340ef85ed059753499c5 | [] | no_license | sjogleka/General_codes | 761967fd1175c97804d49290af9db10828d4900f | 2772ea7b723c4ca680864b40b41fd34cc197726d | refs/heads/master | 2021-07-16T07:41:05.841942 | 2020-10-14T01:49:12 | 2020-10-14T01:49:12 | 218,369,391 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | def countWays(arr, n):
pos = [0 for i in range(n)]
p = 0
for i in range(n):
if (arr[i] == 1):
pos[p] = i + 1
p += 1
if (p == 0):
return 0
ways = 1
for i in range(p - 1):
ways *= pos[i + 1] - pos[i]
return ways
print(countWays([0,1],2)) | [
"sjogleka@uncc.edu"
] | sjogleka@uncc.edu |
ad01e6e8fa3f1bd3c233bcd83b90890a9c2a88a9 | b407e3658245167e72ad0029cb588d98c341e9cd | /seq2seq/data/__init__.py | 2d352d0bd083de726fd97263bec965beaf0ad766 | [] | no_license | Wooyong-Choi/seq2seq_pytorch | 6276667774ac8977c20a6ed9512c69380243dabb | d05479aa45bde5df2d03e141c4e9a7b620938769 | refs/heads/master | 2021-05-03T06:14:23.479120 | 2019-01-29T16:54:39 | 2019-01-29T16:54:39 | 120,591,208 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from seq2seq.data.dataset import Dataset, sorted_collate_fn
from seq2seq.data.vocab import Vocab
from seq2seq.data.vocab import PAD_TOK, SOS_TOK, EOS_TOK, UNK_TOK
from seq2seq.data.vocab import PAD_IDX, SOS_IDX, EOS_IDX, UNK_IDX | [
"whdrmt12@gmail.com"
] | whdrmt12@gmail.com |
1815fe1b607cb16efedf74a577b3f584a39e2dfc | 44589e4dde52caba695b685feb8b6a6fa20860e3 | /app/app.py | ec9dbe926f875034c398c6a579e49f7fc28825c3 | [] | no_license | venkatesan007/flask | 15a5a25d558accd1ee325f71cfdc0e61c34b8a55 | 0d3467eb3f55e33663ef84e4aa704479c81d50c8 | refs/heads/master | 2022-11-07T00:45:02.376955 | 2020-06-28T17:39:29 | 2020-06-28T17:39:29 | 275,636,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('details.html')
@app.route('/handle_data', methods=['GET', 'POST'])
def handle_data():
if request.method == 'POST':
projectpath = request.form.get('user_name')
gender = request.form['gender']
age = request.form.get('age')
number = request.form.get('number')
address = request.form.get('address')
print(projectpath)
print(gender)
print(age)
print(number)
print(address)
return render_template("result.html")
else:
return render_template('result.html')
if __name__ == '__main__':
app.run() | [
"844609@cognizant.com"
] | 844609@cognizant.com |
c1676f882fff3c9ac735f5aca4ac99eb5f0c70d0 | 37e84c832b4016b7e5b4fa6f99da00977cf010f7 | /python3-tutorial/02 Advanced/1203 CreateCollection.py | 797f6f82ef5cab0f223fb6eb0140ab3b6ed1fd2c | [
"MIT"
] | permissive | CoderDream/python-best-practice | 24e4a09e778cc6d0ce846edf1e7b4fb66c536b0e | 40e6b5315daefb37c59daa1a1990ac1ae10f8cca | refs/heads/master | 2020-08-14T13:57:16.309356 | 2019-10-28T09:20:03 | 2019-10-28T09:20:03 | 215,180,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!/usr/bin/python3
import pymongo
myclient = pymongo.MongoClient('mongodb://chenqingwh.uicp.net:37017/')
mydb = myclient["quant_01"]
mycol = mydb["sites"]
| [
"coderdream@gmail.com"
] | coderdream@gmail.com |
3de504b7887f21aeaa2337763be2b30e95dce805 | 6884f7c16419c0cab6a7e1c1909852621a1166f9 | /input_handlers.py | 1d7597ce946701c94ec8cff3f61487d56f4de1c9 | [] | no_license | ayoung1/libtcodroguelike | 8ff8ae0afd675ae3ad87a6ad8786fd49c422a665 | c01f06bc3442481cd8fa6a1ac0e3a76897546a2e | refs/heads/master | 2020-05-05T12:35:39.031815 | 2019-04-08T07:43:25 | 2019-04-08T07:43:25 | 180,035,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,081 | py | import tcod as libtcod
from game_states import GameStates
def handle_keys(key, game_state):
if game_state == GameStates.PLAYERS_TURN:
return handle_player_turn_keys(key)
elif game_state == GameStates.PLAYER_DEAD:
return handle_player_dead_keys(key)
elif game_state in (GameStates.SHOW_INVENTORY, GameStates.DROP_INVENTORY):
return handle_inventory_keys(key)
elif game_state == GameStates.TARGETING:
return handle_targeting_keys(key)
elif game_state == GameStates.LEVEL_UP:
return handle_level_up_menu(key)
elif game_state == GameStates.CHARACTER_SCREEN:
return handle_character_screen(key)
elif game_state == GameStates.EXIT:
return handle_exit_menu(key)
return {}
def handle_character_screen(key):
if key.vk == libtcod.KEY_ESCAPE:
return {'exit': True}
return {}
def handle_main_menu(key):
key_char = chr(key.c)
if key_char == 'a':
return {'new_game': True}
elif key_char == 'b':
return {'load_game': True}
elif key_char == 'c' or key.vk == libtcod.KEY_ESCAPE:
return {'exit': True}
return {}
def handle_exit_menu(key):
key_char = chr(key.c)
if key_char == 'a':
return {'quit_game': True}
elif key_char == 'b' or key.vk == libtcod.KEY_ESCAPE:
return {'exit': True}
return {}
def handle_mouse(mouse):
(x, y) = (mouse.cx, mouse.cy)
if mouse.lbutton_pressed:
return {'left_click': (x, y)}
elif mouse.rbutton_pressed:
return {'right_click': (x, y)}
return {}
def handle_targeting_keys(key):
if key.vk == libtcod.KEY_ESCAPE:
return {'exit': True}
return {}
def handle_inventory_keys(key):
index = key.c - ord('a')
if index >= 0:
return {'inventory_index': index}
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: toggle full screen
return {'fullscreen': True}
elif key.vk == libtcod.KEY_ESCAPE:
# Exit the menu
return {'exit': True}
return {}
def handle_level_up_menu(key):
if key:
key_char = chr(key.c)
if key_char == 'a':
return {'level_up': 'hp'}
elif key_char == 'b':
return {'level_up': 'str'}
elif key_char == 'c':
return {'level_up': 'def'}
return {}
def handle_player_dead_keys(key):
key_char = chr(key.c)
if key_char == 'i':
return {'show_inventory': True}
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: toggle full screen
return {'fullscreen': True}
elif key.vk == libtcod.KEY_ESCAPE:
# Exit the menu
return {'exit': True}
return {}
def handle_player_turn_keys(key):
key_char = chr(key.c)
# Movement keys
if key_char == 'k':
return {'move': (0, -1)}
elif key_char == 'j':
return {'move': (0, 1)}
elif key_char == 'h':
return {'move': (-1, 0)}
elif key_char == 'l':
return {'move': (1, 0)}
elif key_char == 'y':
return {'move': (-1, -1)}
elif key_char == 'u':
return {'move': (1, -1)}
elif key_char == 'b':
return {'move': (-1, 1)}
elif key_char == 'n':
return {'move': (1, 1)}
elif key_char == 'g':
return {'pickup': True }
elif key_char == 'i':
return {'show_inventory': True}
elif key_char == 'd':
return {'drop_inventory': True}
elif key.vk == libtcod.KEY_ENTER:
return {'take_stairs': True}
elif key_char == 'c':
return {'show_character_screen': True}
elif key_char == 'z':
return {'wait': True}
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: toggle full screen
return {'fullscreen': True}
elif key.vk == libtcod.KEY_ESCAPE:
# Exit the game
return {'exit': True}
# No key was pressed
return {}
| [
"aaronjyoung1@gmail.com"
] | aaronjyoung1@gmail.com |
443078e7b5c9ba43b126cdffff5dbe5295c466bb | b54a6c788ca6fd2b734899fe92e9ac7c288fbe01 | /src/form/panel/ArmIKtoFKPanel.py | 4c977840cce3d97515f2a61ced8c54ad896f8087 | [
"MIT"
] | permissive | fehler001/motion_supporter | 6bff2ed08b0dbf1b7457e6cbea3559f0a0f9fb6b | 4a5db3746b80683bdc0f610211ebdb6e60e6941f | refs/heads/master | 2023-08-01T12:32:07.716946 | 2021-09-24T19:59:35 | 2021-09-24T19:59:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,041 | py | # -*- coding: utf-8 -*-
#
import os
import wx
import wx.lib.newevent
import sys
from form.panel.BasePanel import BasePanel
from form.parts.BaseFilePickerCtrl import BaseFilePickerCtrl
from form.parts.HistoryFilePickerCtrl import HistoryFilePickerCtrl
from form.parts.ConsoleCtrl import ConsoleCtrl
from form.worker.ArmIKtoFKWorkerThread import ArmIKtoFKWorkerThread
from utils import MFormUtils, MFileUtils
from utils.MLogger import MLogger # noqa
logger = MLogger(__name__)
TIMER_ID = wx.NewId()
# イベント定義
(ArmIKtoFKThreadEvent, EVT_ARM_IK_THREAD) = wx.lib.newevent.NewEvent()
class ArmIKtoFKPanel(BasePanel):
def __init__(self, frame: wx.Frame, arm_ik2fk: wx.Notebook, tab_idx: int):
super().__init__(frame, arm_ik2fk, tab_idx)
self.convert_arm_ik2fk_worker = None
self.header_sizer = wx.BoxSizer(wx.VERTICAL)
self.description_txt = wx.StaticText(self, wx.ID_ANY, u"腕IKを腕FK(腕・ひじ・手首)に変換します。元モデルには腕IKが入ったモデルを指定してください。" \
+ "\n捩りは統合しちゃいますので、必要に応じてサイジングで捩り分散をかけてください。"
+ "\n不要キー削除を行うと、キーが間引きされます。キー間がオリジナルから多少ずれ、またそれなりに時間がかかります。", wx.DefaultPosition, wx.DefaultSize, 0)
self.header_sizer.Add(self.description_txt, 0, wx.ALL, 5)
self.static_line01 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
self.header_sizer.Add(self.static_line01, 0, wx.EXPAND | wx.ALL, 5)
# 対象VMDファイルコントロール
self.arm_ik2fk_vmd_file_ctrl = HistoryFilePickerCtrl(self.frame, self, u"対象モーションVMD/VPD", u"対象モーションVMDファイルを開く", ("vmd", "vpd"), wx.FLP_DEFAULT_STYLE, \
u"調整したい対象モーションのVMDパスを指定してください。\nD&Dでの指定、開くボタンからの指定、履歴からの選択ができます。", \
file_model_spacer=46, title_parts_ctrl=None, title_parts2_ctrl=None, file_histories_key="arm_ik2fk_vmd", is_change_output=True, \
is_aster=False, is_save=False, set_no=1)
self.header_sizer.Add(self.arm_ik2fk_vmd_file_ctrl.sizer, 1, wx.EXPAND, 0)
# 対象PMXファイルコントロール(IK)
self.arm_ik2fk_ik_model_file_ctrl = HistoryFilePickerCtrl(self.frame, self, u"腕IKありモデルPMX", u"腕IKありモデルPMXファイルを開く", ("pmx"), wx.FLP_DEFAULT_STYLE, \
u"腕IKモーションを適用したいモデルのPMXパスを指定してください。\nD&Dでの指定、開くボタンからの指定、履歴からの選択ができます。", \
file_model_spacer=60, title_parts_ctrl=None, title_parts2_ctrl=None, file_histories_key="arm_ik2fk_pmx", \
is_change_output=True, is_aster=False, is_save=False, set_no=1)
self.header_sizer.Add(self.arm_ik2fk_ik_model_file_ctrl.sizer, 1, wx.EXPAND, 0)
# 対象PMXファイルコントロール(FK)
self.arm_ik2fk_fk_model_file_ctrl = HistoryFilePickerCtrl(self.frame, self, u"腕IKなしモデルPMX", u"腕IKなしモデルPMXファイルを開く", ("pmx"), wx.FLP_DEFAULT_STYLE, \
u"変換後の腕FKモーションを適用したいモデルのPMXパスを指定してください。\nD&Dでの指定、開くボタンからの指定、履歴からの選択ができます。", \
file_model_spacer=60, title_parts_ctrl=None, title_parts2_ctrl=None, file_histories_key="arm_ik2fk_pmx_fk", \
is_change_output=True, is_aster=False, is_save=False, set_no=1)
self.header_sizer.Add(self.arm_ik2fk_fk_model_file_ctrl.sizer, 1, wx.EXPAND, 0)
# 出力先VMDファイルコントロール
self.output_arm_ik2fk_vmd_file_ctrl = BaseFilePickerCtrl(frame, self, u"出力対象VMD", u"出力対象VMDファイルを開く", ("vmd"), wx.FLP_OVERWRITE_PROMPT | wx.FLP_SAVE | wx.FLP_USE_TEXTCTRL, \
u"調整結果の対象VMD出力パスを指定してください。\n対象VMDファイル名に基づいて自動生成されますが、任意のパスに変更することも可能です。", \
is_aster=False, is_save=True, set_no=1)
self.header_sizer.Add(self.output_arm_ik2fk_vmd_file_ctrl.sizer, 1, wx.EXPAND, 0)
# 不要キー削除処理
self.remove_unnecessary_flg_ctrl = wx.CheckBox(self, wx.ID_ANY, u"不要キー削除処理を追加実行する", wx.DefaultPosition, wx.DefaultSize, 0)
self.remove_unnecessary_flg_ctrl.SetToolTip(u"チェックを入れると、不要キー削除処理を追加で実行します。キーが減る分、キー間が少しズレる事があります。")
self.header_sizer.Add(self.remove_unnecessary_flg_ctrl, 0, wx.ALL, 5)
self.sizer.Add(self.header_sizer, 0, wx.EXPAND | wx.ALL, 5)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
# 多段分割変換実行ボタン
self.arm_ik2fk_btn_ctrl = wx.Button(self, wx.ID_ANY, u"腕IK変換", wx.DefaultPosition, wx.Size(200, 50), 0)
self.arm_ik2fk_btn_ctrl.SetToolTip(u"足FKを足IKに変換したモーションを再生成します。")
self.arm_ik2fk_btn_ctrl.Bind(wx.EVT_LEFT_DOWN, self.on_convert_arm_ik2fk)
self.arm_ik2fk_btn_ctrl.Bind(wx.EVT_LEFT_DCLICK, self.on_doubleclick)
btn_sizer.Add(self.arm_ik2fk_btn_ctrl, 0, wx.ALL, 5)
self.sizer.Add(btn_sizer, 0, wx.ALIGN_CENTER | wx.SHAPED, 5)
# コンソール
self.console_ctrl = ConsoleCtrl(self, self.frame.logging_level, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(-1, 420), \
wx.TE_MULTILINE | wx.TE_READONLY | wx.BORDER_NONE | wx.HSCROLL | wx.VSCROLL | wx.WANTS_CHARS)
self.console_ctrl.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT))
self.console_ctrl.Bind(wx.EVT_CHAR, lambda event: MFormUtils.on_select_all(event, self.console_ctrl))
self.sizer.Add(self.console_ctrl, 1, wx.ALL | wx.EXPAND, 5)
# ゲージ
self.gauge_ctrl = wx.Gauge(self, wx.ID_ANY, 100, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL)
self.gauge_ctrl.SetValue(0)
self.sizer.Add(self.gauge_ctrl, 0, wx.ALL | wx.EXPAND, 5)
self.Layout()
self.fit()
# フレームに変換完了処理バインド
self.frame.Bind(EVT_ARM_IK_THREAD, self.on_convert_arm_ik2fk_result)
def on_wheel_spin_ctrl(self, event: wx.Event, inc=1):
self.frame.on_wheel_spin_ctrl(event, inc)
self.set_output_vmd_path(event)
# ファイル変更時の処理
def on_change_file(self, event: wx.Event):
self.set_output_vmd_path(event, is_force=True)
def set_output_vmd_path(self, event, is_force=False):
output_arm_ik2fk_vmd_path = MFileUtils.get_output_arm_ik2fk_vmd_path(
self.arm_ik2fk_vmd_file_ctrl.file_ctrl.GetPath(),
self.arm_ik2fk_fk_model_file_ctrl.file_ctrl.GetPath(),
self.output_arm_ik2fk_vmd_file_ctrl.file_ctrl.GetPath(), is_force)
self.output_arm_ik2fk_vmd_file_ctrl.file_ctrl.SetPath(output_arm_ik2fk_vmd_path)
if len(output_arm_ik2fk_vmd_path) >= 255 and os.name == "nt":
logger.error("生成予定のファイルパスがWindowsの制限を超えています。\n生成予定パス: {0}".format(output_arm_ik2fk_vmd_path), decoration=MLogger.DECORATION_BOX)
# フォーム無効化
def disable(self):
self.arm_ik2fk_vmd_file_ctrl.disable()
self.arm_ik2fk_ik_model_file_ctrl.disable()
self.arm_ik2fk_fk_model_file_ctrl.disable()
self.output_arm_ik2fk_vmd_file_ctrl.disable()
self.arm_ik2fk_btn_ctrl.Disable()
# フォーム無効化
def enable(self):
self.arm_ik2fk_vmd_file_ctrl.enable()
self.arm_ik2fk_ik_model_file_ctrl.enable()
self.arm_ik2fk_fk_model_file_ctrl.enable()
self.output_arm_ik2fk_vmd_file_ctrl.enable()
self.arm_ik2fk_btn_ctrl.Enable()
def on_doubleclick(self, event: wx.Event):
self.timer.Stop()
logger.warning("ダブルクリックされました。", decoration=MLogger.DECORATION_BOX)
event.Skip(False)
return False
# 多段分割変換
def on_convert_arm_ik2fk(self, event: wx.Event):
self.timer = wx.Timer(self, TIMER_ID)
self.timer.Start(200)
self.Bind(wx.EVT_TIMER, self.on_convert, id=TIMER_ID)
# 多段分割変換
def on_convert(self, event: wx.Event):
self.timer.Stop()
self.Unbind(wx.EVT_TIMER, id=TIMER_ID)
# フォーム無効化
self.disable()
# タブ固定
self.fix_tab()
# コンソールクリア
self.console_ctrl.Clear()
# 出力先を多段分割パネルのコンソールに変更
sys.stdout = self.console_ctrl
self.arm_ik2fk_vmd_file_ctrl.save()
self.arm_ik2fk_ik_model_file_ctrl.save()
self.arm_ik2fk_fk_model_file_ctrl.save()
# JSON出力
MFileUtils.save_history(self.frame.mydir_path, self.frame.file_hitories)
self.elapsed_time = 0
result = True
result = self.arm_ik2fk_vmd_file_ctrl.is_valid() and self.arm_ik2fk_ik_model_file_ctrl.is_valid() and self.arm_ik2fk_fk_model_file_ctrl.is_valid() and result
if not result:
# 終了音
self.frame.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
return result
# 腕IK変換変換開始
if self.arm_ik2fk_btn_ctrl.GetLabel() == "腕IK変換停止" and self.convert_arm_ik2fk_worker:
# フォーム無効化
self.disable()
# 停止状態でボタン押下時、停止
self.convert_arm_ik2fk_worker.stop()
# タブ移動可
self.frame.release_tab()
# フォーム有効化
self.frame.enable()
# ワーカー終了
self.convert_arm_ik2fk_worker = None
# プログレス非表示
self.gauge_ctrl.SetValue(0)
logger.warning("腕IK変換を中断します。", decoration=MLogger.DECORATION_BOX)
self.arm_ik2fk_btn_ctrl.SetLabel("腕IK変換")
event.Skip(False)
elif not self.convert_arm_ik2fk_worker:
# フォーム無効化
self.disable()
# タブ固定
self.fix_tab()
# コンソールクリア
self.console_ctrl.Clear()
# ラベル変更
self.arm_ik2fk_btn_ctrl.SetLabel("腕IK変換停止")
self.arm_ik2fk_btn_ctrl.Enable()
self.convert_arm_ik2fk_worker = ArmIKtoFKWorkerThread(self.frame, ArmIKtoFKThreadEvent, self.frame.is_saving, self.frame.is_out_log)
self.convert_arm_ik2fk_worker.start()
event.Skip()
else:
logger.error("まだ処理が実行中です。終了してから再度実行してください。", decoration=MLogger.DECORATION_BOX)
event.Skip(False)
return result
# 多段分割変換完了処理
def on_convert_arm_ik2fk_result(self, event: wx.Event):
self.elapsed_time = event.elapsed_time
logger.info("\n処理時間: %s", self.show_worked_time())
self.arm_ik2fk_btn_ctrl.SetLabel("腕IK変換")
# 終了音
self.frame.sound_finish()
# タブ移動可
self.release_tab()
# フォーム有効化
self.enable()
# ワーカー終了
self.convert_arm_ik2fk_worker = None
# プログレス非表示
self.gauge_ctrl.SetValue(0)
def show_worked_time(self):
# 経過秒数を時分秒に変換
td_m, td_s = divmod(self.elapsed_time, 60)
if td_m == 0:
worked_time = "{0:02d}秒".format(int(td_s))
else:
worked_time = "{0:02d}分{1:02d}秒".format(int(td_m), int(td_s))
return worked_time
| [
"garnet200521358@gmail.com"
] | garnet200521358@gmail.com |
1c0c4e677f72c5be81c71e0b6677c161e0564d51 | 45be055a5802c5623654aec62e6c0dbf2d8c8e4d | /generateBMITable.py | b0bf30938a4fa1829ff51bf0737451aafa39fc3c | [] | no_license | xiangormirko/python_work | 4deffd8febf74f732132166d9dbe2432adb8f8a3 | 660ba1d442f021095a443d7e78701c88fb583a8f | refs/heads/master | 2021-01-23T13:18:16.440449 | 2015-10-17T04:00:12 | 2015-10-17T04:00:12 | 35,889,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # File: generateBMITable.py
# Author: Mirko (xiang@bu.edu)
# Description:
# Assingment: A09
# Date: 10/01/13
def calculateBMI(height,weight):
kilos= weight*0.4536
meters= height*0.0254
bmi= (kilos/ (meters*meters))
return bmi
def printBMITable(starth,endh,startw,endw):
print "HEIGHT WEIGHT--->"
print " ",
for numbers in range (startw,endw+1,10):
print "%6d" %numbers,
print
print
for numb in range(starth,endh+1):
print numb,
for num in range(startw,endw+1,10):
print "%6.2f" %round(calculateBMI(numb,num),1),
print
def main():
begh=input("Please enter your beginning height:")
endh=input("Please enter your ending height:")
begw=input("Please enter your beginning weight:")
endw=input("Please enter your ending weight:")
printBMITable(begh,endh,begw,endw)
main()
| [
"xiangormirko@gmail.com"
] | xiangormirko@gmail.com |
529870208da3d0084f92244aa87d6cdc866f3929 | b045643b9d6929be35b5ecbb9cdc80892ae2abf3 | /app/nomdb/common.py | 7af82aa4b773e1c3b4292797584f1684113dadfc | [
"BSD-3-Clause"
] | permissive | MapofLife/vernacular-names | bce4e36d78e43d4e8200d048c4dea737dfc89afe | 87729df72284656715fb4eac2aea2387c005da42 | refs/heads/master | 2016-09-06T04:34:22.866407 | 2015-12-16T15:22:29 | 2015-12-16T15:22:29 | 17,350,719 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,965 | py | # vim: set fileencoding=utf-8 :
"""common.py: Common functions and constants used by NomDB."""
import base64
import logging
import re
import urllib
from nomdb.config import DEADLINE_FETCH
from titlecase import titlecase
import urlfetch
__author__ = 'Gaurav Vaidya'
# Helper functions.
# In order to allow code to be used locally, we set up url_get and url_post methods.
# These use google.appengine.api.urlfetch when running on the Google App Engine,
# and the local urlfetch library when running locally.
import importlib
gae_urlfetch = None
try:
gae_urlfetch = importlib.import_module('google.appengine.api.urlfetch')
gae_urlfetch.set_default_fetch_deadline(DEADLINE_FETCH)
except ImportError:
pass
def url_get(url):
"""Retrieve a URL using HTTP GET."""
if gae_urlfetch:
logging.debug("url_get(" + url + ") with GAE")
return gae_urlfetch.fetch(url)
else:
logging.debug("url_get(" + url + ") with urlfetch")
return urlfetch.fetch(url, deadline =DEADLINE_FETCH)
def url_post(url, data):
"""Retrieve a URL using HTTP POST, submitting 'data' as a dict.
'data' is URL-encoded before transmission."""
if gae_urlfetch:
logging.info("url_post(" + url + ") with GAE")
return gae_urlfetch.fetch(url,
payload=urllib.urlencode(data),
method=gae_urlfetch.POST,
headers={'Content-type': 'application/x-www-form-urlencoded'},
deadline=DEADLINE_FETCH
)
else:
logging.info("url_post(" + url + ") with urlfetch")
return urlfetch.post(url, data = data)
def decode_b64_on_psql(text):
"""Prepare a bit of code for PostgreSQL to decode a string on the server side.
You probably don't need to use this."""
base64_only = re.compile(r"^[a-zA-Z0-9+/=]*$")
if not base64_only.match(text):
raise RuntimeError("Error: '" + text + "' sent to decode_b64_on_psql is not base64!")
return "convert_from(decode('" + text + "', 'base64'), 'utf-8')"
def encode_b64_for_psql(text):
"""Encode a Unicode string as base64, then set it up to be decoded on the server.
You probably need to use this."""
return decode_b64_on_psql(base64.b64encode(text.encode('UTF-8')))
# TODO This is something PostgreSQL should be able to handle, so we should refactor this out and delete it entirely.
def group_by(rows, colname):
"""Given a list of rows, divide them until into groups of rows by the values
in the column provided in 'colName'. Return this as a dict.
"""
result_table = dict()
for row in rows:
val = row[colname]
if not val in result_table:
result_table[val] = []
result_table[val].append(row)
return result_table
def get_genus_name(name):
""" Provide genus for a binomial name.
:param name:
:return:
"""
pieces = name.split()
if len(pieces) > 1:
return pieces[0].lower()
return None
| [
"gaurav@ggvaidya.com"
] | gaurav@ggvaidya.com |
3c4c50a8004e1bf40592643c7d7087653aca7c41 | 36c2b7ff4638363debffa83d860c3646847cb2c3 | /Day 4/day4.py | 98a26d866d651536c2d3b5e349247adf67fc4543 | [] | no_license | gormaar/AdventOfCode2020 | 3fbb73f65cf9c40a4fe6694b271c16f557b568c5 | d065c5c2f26d5afdd3cf79fab23bf0b65942fbe2 | refs/heads/main | 2023-01-23T18:03:31.240710 | 2020-12-08T20:56:32 | 2020-12-08T20:56:32 | 317,629,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | import re
# Count number of valid passports
file = open("input.txt", "r")
lines = file.readlines()
part1 = 0
part2 = 0
passports = []
s = ""
lines.append("\n")
for line in lines:
if line != '\n':
strippedLine = line.replace('\n', ' ')
s += strippedLine
else:
fields = s.split(' ')
passports.append(fields)
s = ""
for n in passports:
for k in n:
if k == '':
n.remove(k)
validated_passports = []
for pas in passports:
if len(pas) == 8:
part1 += 1
validated_passports.append(pas)
if len(pas) == 7:
valid = True
for n in pas:
if 'cid' in n:
valid = False
if valid:
part1 += 1
validated_passports.append(pas)
print(len(validated_passports))
for i in validated_passports:
valid = True
for j in i:
key = re.search(r'(.*):', j).group(1)
value = re.search(r'^.+:(.+)$', j).group(1)
if key == 'byr' and not 1920 <= int(value) <= 2002:
valid = False
if key == 'iyr' and not 2010 <= int(value) <= 2020:
valid = False
if key == 'eyr' and not 2020 <= int(value) <= 2030:
valid = False
if key == 'hgt':
height = re.search(r':(\d*)', j).group(1)
if j.endswith('cm') and not 150 <= int(height) <= 193:
valid = False
elif j.endswith('in') and not 59 <= int(height) <= 76:
valid = False
if key == 'hcl':
haircolor = re.search(r':(.*)', j).group(1)
if haircolor[0] != '#' or any([c not in '0123456789abcdef' for c in haircolor[1:]]) or len(haircolor.strip("#")) != 6:
valid = False
if key == 'ecl':
colors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
if value not in colors:
valid = False
if key == 'pid':
if not value.isnumeric() or not len(value) == 9:
valid = False
if valid:
part2 += 1
print("Part 1:", part1)
print("Part 2:", part2)
| [
"gormerik96@gmail.com"
] | gormerik96@gmail.com |
1543c0772723db0dceda90abf8cd110116de0506 | d38dc04dfe635be567bff6fbc6a5ee260dfdc25e | /portal/urls.py | c8edf0f2dd042af2b211613dc1a3c40e3c9d57e8 | [] | no_license | wuyad/ha | 4618cc977e5ccd45bc9be0150f3cd5ec8abcd4fb | 96b907ea7903ae6e47219128ad723a55fa352c69 | refs/heads/master | 2021-01-21T00:52:59.490021 | 2016-07-20T06:14:28 | 2016-07-20T06:14:28 | 31,521,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,286 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao'
import ConfigParser
from utils.web import get, ctx, view, post
from utils import web
from portal.entity import *
config_file = 'portal.conf'
@view('hello.html')
@get('/hello')
def hello():
uname = 'wuya'
return dict(uname=uname)
@view('noauth.html')
@get('/noauth')
def noauth():
return dict()
@get('/hello/')
def hello2():
return hello()
@view('help.html')
@get('/help')
def ha_help():
return dict()
@get('/help/')
def ha_help2():
return ha_help()
@view('index.html')
@get('/')
def index():
heads = dict(ctx.request.headers)
query_string = ctx.request.query_string
if query_string:
heads['query_string'] = query_string
return dict(heads=heads)
def load_request_param(param_list):
d = {}
for p in param_list:
try:
d[p] = ctx.request[p]
except KeyError as e:
pass
return d
def get_gw_mac(comm_code):
if not comm_code:
return ''
from pysimplesoap.client import SoapClient
conf = ConfigParser.ConfigParser()
conf.read(config_file)
loc = conf.get('haoss', 'location')
soap = SoapClient(location=loc,
action=loc,
soap_ns='soap',
trace=False,
ns=False,
exceptions=True)
try:
res = soap.GetAPInfo(comm_code=comm_code.encode('utf-8'))
except Exception as e:
return ''
return str(res.ap_mac_addr)
@view('login.html')
@get('/login/')
def login():
"""/login/?gw_id=808100949391&gw_address=192.168.81.1&gw_port=80&\
mac=aa:bb:cc:dd:cc:ee&url=http://baidu.com&comm_code=xxxxx"""
opt_para = ['gw_id', 'gw_address', 'gw_port', 'url', 'comm_code']
param = load_request_param(opt_para)
# if not param.get('comm_code', ''):
# raise web.redirect('/noauth')
# if not get_gw_mac(param['comm_code']):
# raise web.redirect('/noauth')
gw_id = ''
try:
gw_id = param['gw_id']
gw = GWInfo.get(gw_id)
if gw:
gw.gw_address = param['gw_address']
gw.gw_port = param['gw_port']
gw.update()
else:
gw = GWInfo(gw_id=param['gw_id'], gw_address=param['gw_address'],
gw_port=param['gw_port'])
gw.insert()
except Exception as e:
log.error('gw info update/insert error {%s}' % e)
try:
gw_adv = APAdv.get(gw_id)
if gw_adv:
url = gw_adv.url
else:
url = '#'
except Exception as e:
log.error('ap adv read error')
return dict(param=param, gw_id=gw_id, url=url)
def authenticate(uname, pwd):
return True if User.find_first('where name=? and password=?', uname, pwd) else False
def make_token(uname, pwd):
import hmac
import hashlib
return hmac.new('token', uname+pwd, hashlib.md5).hexdigest().upper()
@post('/redirect/')
def redirect():
"""http://GatewayIP:GatewayPort/wifidog/auth?token=[auth token]"""
opt_para = ['uname', 'password', 'noname', 'gw_id']
param = load_request_param(opt_para)
uname = param['uname']
pwd = param['password']
token = ''
if param.get('noname') is not None and param['noname'].lower() == 'on':
# token = make_token('noname', 'noname')
token = 'B9383A848D283D983699A2A5BC12EC3F' # always OK
elif authenticate(uname, pwd):
token = make_token(uname, pwd)
try:
Token(token=token, uname=uname, password=pwd).insert()
except Exception as e:
pass
else:
token = 'xxxx' # auth fail
gw_id = param['gw_id']
if not gw_id:
raise web.redirect('/noauth')
gw = GWInfo.get(gw_id)
if not gw:
raise web.redirect('/noauth')
gw_url = 'http://{}:{}/wifidog/auth?token={}'.format(gw.gw_address, gw.gw_port, token)
raise web.redirect(gw_url)
@get('/auth/')
def auth():
"""/auth/?stage=counters&ip=7.0.0.107&mac=00:40:05:5F:44:43&token=4f473ae3ddc5c1c2165f7a0973c57a98&
incoming=6031353&outgoing=827770"""
opt_para = ['stage', 'ip', 'mac', 'token', 'incoming', 'outgoing', 'token']
param = load_request_param(opt_para)
_auth = {
'AUTH_DENIED': 0, # - User firewall users are deleted and the user removed.
'AUTH_VALIDATION_FAILED': 6, # - User email validation timeout has occured and user/firewall is deleted
'AUTH_ALLOWED': 1, # - User was valid, add firewall rules if not present
'AUTH_VALIDATION': 5, # - Permit user access to email to get validation email under default rules
'AUTH_ERROR': -1, # - An error occurred during the validation process
}
res_allowed = ['Auth: {}'.format(_auth['AUTH_ALLOWED'])]
res_denied = ['Auth: {}'.format(_auth['AUTH_DENIED'])]
token = ''
try:
token = param['token']
except KeyError as e:
return res_denied
if token == 'xxxx':
return res_denied
elif token == 'B9383A848D283D983699A2A5BC12EC3F':
return res_allowed
token_info = Token.get(token)
if not token_info:
return res_denied
if token_info.status == 2:
return res_denied
return res_allowed
@get('/auth')
def auth2():
return auth()
@view('portal.html')
@get('/portal/')
def portal():
"""portal/?gw_id=%s"""
opt_para = ['gw_id']
gw_id = ''
try:
gw_id = ctx.request['gw_id']
except KeyError:
raise web.redirect('/noauth')
gw = GWInfo.get(gw_id)
if not gw:
raise web.redirect('/noauth')
gw_adv = APAdv.get(gw.gw_id)
note = u'此AP未配置广告信息'
url = u'无url配置'
if gw_adv:
note = gw_adv.remark
url = gw_adv.url
info = (gw_id, url, note)
log.info('portal send')
return dict(info=info)
@get('/portal')
def portal2():
return portal()
@get('/ping')
def ping():
"""/ping/?gw_id=001217DA42D2&sys_uptime=742725&sys_memfree=2604&sys_load=0.03&wifidog_uptime=3861"""
opt_para = ['gw_id', 'sys_uptime', 'sys_memfree', 'sys_load', 'wifidog_uptime']
# param = load_request_param(opt_para)
return 'Pong'
@get('/ping/')
def ping2():
return ping()
| [
"alex_wuya@163.com"
] | alex_wuya@163.com |
ddf009e059f4defdfbe3decfe1529ea22b0f9bf3 | b91e087556dd43faeb506f9985288a927f357d63 | /main_window.py | e0da1a6a313588078c1e4e5d2ec2abc8586e355b | [] | no_license | levidurham/Still | 08257dbf9952b7efe4b28e6ec40230869cc187b9 | 0f7432f0e93a932bd3011cfc188afcd3f67d36b6 | refs/heads/master | 2021-02-12T06:36:20.804854 | 2020-03-03T08:04:09 | 2020-03-03T08:04:09 | 244,569,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,103 | py | """
main_window.py
The main window for the automated still.
"""
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QTimer
import serial
from ui_still import Ui_Still
class MainWindow(QMainWindow, Ui_Still):
"""Main window for the automated still."""
_SER = serial.Serial(
port='/dev/ttyACM0',
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE
)
def serial_connect(self):
"""
Ensure serial connection is up and ready
"""
if self._SER.isOpen():
self._SER.close()
self._SER.open()
self._SER.isOpen()
def check_serial_connection(self):
""" Maintain the serial connection"""
try:
if not self._SER.isOpen():
self.statusbar.showMessage('Disconnected')
self.serial_connect()
else:
self.statusbar.showMessage('Connected')
finally:
self.serial_check_timer = QTimer()
self.serial_check_timer.setInterval(30000)
self.serial_check_timer.timeout.connect(self.check_serial_connection)
self.serial_check_timer.start()
return self._SER.isOpen()
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setupUi(self)
# Set Min/Max on Slider
self.slider.setMinimum(150)
self.slider.setMaximum(250)
self.slider.lowSliderMoved.connect(self.on_low_slider_moved)
self.slider.highSliderMoved.connect(self.on_high_slider_moved)
#Set Temp label to 0
self.temp_label.setText(str(0) + u'\N{DEGREE SIGN}')
#Set Sliders to Min/Max as default
self.slider.set_low(178)
self.slider.set_high(190)
# Data we will request later and an iterator over it
self.data = ['Temp', 'High', 'Low', 'Burner', 'Pump']
# Set up timers
self.iterator = 0
self.serial_check_timer = None
self.read_timer = None
self.serial_connect()
self.check_serial_connection()
self.read_serial()
self.burner = 'Off'
self.pump = 'On'
def read_serial(self):
""" Check serial bus for new data every 0.25 seconds """
try:
serial_in = ''
while self._SER.inWaiting() > 0:
serial_in += self._SER.read(1).decode()
if serial_in != '':
command = serial_in.split()
print(command[0])
if command[0] == 'Temp:':
self.on_temp_change(command[1])
if command[0] == 'High':
self.slider.set_high(int(command[1]))
if command[0] == 'Low':
self.slider.set_low(int(command[1]))
if command[0] == 'Burner':
self.burner_state(command[1])
if command[0] == 'Pump':
self.pump_state(command[1])
self._SER.flushInput()
self._SER.flushOutput()
self.request_serial_data()
finally:
self.read_timer = QTimer()
self.read_timer.setInterval(250)
self.read_timer.timeout.connect(self.read_serial)
self.read_timer.start()
def burner_state(self, state):
if not state == self.burner:
if state == 'On':
self.fire0.setPixmap(QPixmap(":/data/data/redfire.png"))
self.fire1.setPixmap(QPixmap(":/data/data/redfire.png"))
self.fire2.setPixmap(QPixmap(":/data/data/redfire.png"))
self.burner = state
elif state == 'Off':
self.fire0.setPixmap(QPixmap(":/data/data/fire.png"))
self.fire1.setPixmap(QPixmap(":/data/data/fire.png"))
self.fire2.setPixmap(QPixmap(":/data/data/fire.png"))
self.burner = state
def pump_state(self, state):
if not state == self.pump:
if state == 'On':
self.label.setPixmap(QPixmap(":/data/data/pump_on.png"))
self.pump = state
elif state == 'Off':
self.label.setPixmap(QPixmap(":/data/data/pump.png"))
self.pump = state
def request_serial_data(self):
""" Request the sending of data over serial """
if self.iterator == len(self.data):
self.iterator = 0
self._SER.write(('Get ' + self.data[self.iterator] + '\n').encode())
self.iterator += 1
def on_low_slider_moved(self, value):
"""Set low_label on slider change"""
self.low_label.setText(str(value) + u'\N{DEGREE SIGN}')
def on_high_slider_moved(self, value):
"""Set high_label on slider change"""
self.high_label.setText(str(value) + u'\N{DEGREE SIGN}')
def on_temp_change(self, value):
"""Set temperature label on temperature change"""
self.temp_label.setText(str(value) + u'\N{DEGREE SIGN}')
| [
"levi@levidurham.com"
] | levi@levidurham.com |
9e6e89a22d2678d31373d33e7f817a66b671619b | dcc491dd2fa4ece68728255d236fa6e784eef92d | /modules/2.78/bpy/ops/outliner.py | 0825988f0ade8aa85899d877be01bb396b376431 | [
"MIT"
] | permissive | cmbasnett/fake-bpy-module | a8e87d5a95d075e51133307dfb55418b94342f4f | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | refs/heads/master | 2020-03-14T16:06:29.132956 | 2018-05-13T01:29:55 | 2018-05-13T01:29:55 | 131,691,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py | def action_set(action=''):
pass
def animdata_operation(type='CLEAR_ANIMDATA'):
pass
def constraint_operation(type='ENABLE'):
pass
def data_operation(type='SELECT'):
pass
def drivers_add_selected():
pass
def drivers_delete_selected():
pass
def expanded_toggle():
pass
def group_link(object="Object"):
pass
def group_operation(type='UNLINK'):
pass
def id_delete():
pass
def id_operation(type='UNLINK'):
pass
def id_remap(id_type='OBJECT', old_id='', new_id=''):
pass
def item_activate(extend=True, recursive=False):
pass
def item_openclose(all=True):
pass
def item_rename():
pass
def keyingset_add_selected():
pass
def keyingset_remove_selected():
pass
def lib_operation(type='RENAME'):
pass
def lib_relocate():
pass
def material_drop(object="Object", material="Material"):
pass
def modifier_operation(type='TOGVIS'):
pass
def object_operation(type='SELECT'):
pass
def operation():
pass
def orphans_purge():
pass
def parent_clear(dragged_obj="Object", type='CLEAR'):
pass
def parent_drop(child="Object", parent="Object", type='OBJECT'):
pass
def renderability_toggle():
pass
def scene_drop(object="Object", scene="Scene"):
pass
def scene_operation(type='DELETE'):
pass
def scroll_page(up=False):
pass
def select_border(gesture_mode=0, xmin=0, xmax=0, ymin=0, ymax=0):
pass
def selectability_toggle():
pass
def selected_toggle():
pass
def show_active():
pass
def show_hierarchy():
pass
def show_one_level(open=True):
pass
def visibility_toggle():
pass
| [
"nutti.metro@gmail.com"
] | nutti.metro@gmail.com |
cbce24815f5dc6637faaac78f99eb8fe110dbacd | e2013ccfe50b9d21a9d82eea30126d7e31eb74dc | /04_cmle/trainer/featurizer.py | 4b79d53bb18b4df22f4425be42b1ae01cb855f0f | [] | no_license | lfloretta/tf_ecosystem | 5320b8f907aefd3ff0b668f147d6c1490620868f | 6490b165c1af43a99484091c0b8c3d77a66f82b8 | refs/heads/master | 2023-04-10T04:51:41.036024 | 2019-08-30T13:48:28 | 2019-08-30T13:48:28 | 205,386,622 | 0 | 0 | null | 2023-03-24T23:19:31 | 2019-08-30T13:21:31 | Python | UTF-8 | Python | false | false | 4,791 | py | # Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Defines a Wide + Deep model for classification on structured data.
"""
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow_transform import TFTransformOutput
try:
from utils import my_metadata
except ImportError:
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'utils', 'src'))
from utils import my_metadata
def get_deep_and_wide_columns(tft_transform_dir, embedding_size=8):
"""Creates deep and wide feature_column lists.
Args:
tf_transform_dir: (str), directory in which the tf-transform model was written
during the preprocessing step.
embedding_size: (int), the number of dimensions used to represent categorical
features when providing them as inputs to the DNN.
Returns:
[tf.feature_column],[tf.feature_column]: deep and wide feature_column lists.
"""
tft_output = TFTransformOutput(tft_transform_dir)
transformed_feature_spec = tft_output.transformed_feature_spec()
transformed_feature_spec.pop(my_metadata.transformed_name(my_metadata.LABEL_KEY))
deep_columns = {}
wide_columns = {}
for transformed_key, tensor in transformed_feature_spec.items():
# Separate features by deep and wide
if transformed_key in my_metadata.transformed_names(my_metadata.VOCAB_FEATURE_KEYS):
if transformed_key not in my_metadata.transformed_names(my_metadata.CATEGORICAL_FEATURE_KEYS_TO_BE_REMOVED):
wide_columns[transformed_key] = tf.feature_column.categorical_column_with_identity(
key=transformed_key,
num_buckets=tft_output.vocabulary_size_by_name(transformed_key) + my_metadata.OOV_SIZE
)
elif transformed_key in my_metadata.transformed_names(my_metadata.HASH_STRING_FEATURE_KEYS):
if transformed_key not in my_metadata.transformed_names(my_metadata.CATEGORICAL_FEATURE_KEYS_TO_BE_REMOVED):
wide_columns[transformed_key] = tf.feature_column.categorical_column_with_identity(
key=transformed_key,
num_buckets=my_metadata.HASH_STRING_FEATURE_KEYS[my_metadata.original_name(transformed_key)]
)
elif transformed_key in my_metadata.transformed_names(my_metadata.NUMERIC_FEATURE_KEYS):
if transformed_key not in my_metadata.transformed_names(my_metadata.NUMERIC_FEATURE_KEYS_TO_BE_REMOVED):
deep_columns[transformed_key] = tf.feature_column.numeric_column(transformed_key)
elif (
(transformed_key.endswith(my_metadata.transformed_name('_bucketized'))
and transformed_key.replace(
my_metadata.transformed_name('_bucketized'), '') in my_metadata.TO_BE_BUCKETIZED_FEATURE)):
wide_columns[transformed_key] = tf.feature_column.categorical_column_with_identity(
key=transformed_key,
num_buckets=tft_output.num_buckets_for_transformed_feature(transformed_key)
)
else:
raise LookupError('The couple (%s, %s) is not consistent with utils.my_metadata' % (key, tensor))
# # # creating new categorical features
wide_columns.update(
{
'pickup_latitude_bucketized_xf_x_pickup_longitude_bucketized_xf' : tf.feature_column.crossed_column(
['pickup_latitude_bucketized_xf', 'pickup_longitude_bucketized_xf'],
hash_bucket_size=int(1e3)),
}
)
#
# # creating new numeric features from categorical features
deep_columns.update(
{
# Use indicator columns for low dimensional vocabularies
'trip_start_day_xf_indicator': tf.feature_column.indicator_column(wide_columns['trip_start_day_xf']),
# Use embedding columns for high dimensional vocabularies
'company_xf_embedding': tf.feature_column.embedding_column(
wide_columns['company_xf'], dimension=embedding_size)
}
)
return deep_columns.values(), wide_columns.values()
| [
"lfloretta@google.com"
] | lfloretta@google.com |
bc4b7257717acb68613ab655fb80d9e9cf8d3c00 | 4e746bf8741302b8a80fa8661f99d72a669c95ed | /test_tf.py | 184125eb1bbbe6a3e7ad04aedb9ea02bad77bf91 | [] | no_license | casszhao/healthinequality | 15ee2d9ef10e0ec693f7519ef1c0613037e48662 | 773c1686736d026b89cb8d6c7c14d1e40c3850fd | refs/heads/main | 2023-06-24T08:13:53.222113 | 2021-07-25T23:36:25 | 2021-07-25T23:36:25 | 387,254,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,566 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 13:11:32 2019
@author: areej
"""
import os
import sys
import support_methods
import pickle
import numpy as np
import tensorflow as tf
from argparse import ArgumentParser
import pandas as pd
import model_archi_tf as model_archi
import support_methods as sup_methods
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns; sns.set()
def remove_tokens(sent, flag):
if flag:
return sent.replace('eostok sostok',',').replace('sostok','').replace('eostok','')
else:
return sent.replace('sostok','').replace('eostok','')
def evaluate(sentence):
attention_plot = np.zeros((max_summary_len, max_text_len))
if 'sostok' not in sentence:
sentence = support_methods.preprocess_sentence(sentence)
inputs = [x_tokenizer.word_index[i] for i in sentence.split()]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_text_len,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, latent_dim))]
#print("initial hidden shape", len(hidden), len(hidden[0]), len(hidden[0][0]))
enc_out, enc_hidden = encoder(inputs, hidden)
#print('enc_hidden shape', enc_hidden.shape)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([y_tokenizer.word_index['sostok']], 0)
for t in range(max_summary_len):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
#print('attention weights:', attention_weights)
# storing the attention weights to plot later on
if np.all(attention_weights) != None:
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += y_tokenizer.index_word[predicted_id] + ' '
if y_tokenizer.index_word[predicted_id] == 'eostok':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# function for plotting the attention weights
def plot_attention(attention, predicted_sentence, sentence , num):
plt.figure(figsize=(10, 4))
ax=sns.heatmap(attention, xticklabels=sentence, yticklabels=predicted_sentence, cmap="OrRd")
ax.xaxis.set_ticks_position('top')
ax.set_xticklabels(ax.get_xticklabels(),rotation=30)
#plt.show()
plt.draw()
if num <= 10:
plt.savefig('results/attention_'+str(num)+'.png', bbox_inches = 'tight')
def predict(sentence, num):
result, sentence, attention_plot = evaluate(sentence)
if not np.all(attention_plot == 0) :
a = result.strip().split(' ')
b = sentence.strip().split(' ')
attention_plot = attention_plot[:len(a), :len(b)]
plot_attention(attention_plot, a, b, num)
return result, sentence
''' End of Functions '''
parser = ArgumentParser()
parser.add_argument("-d", "--data", required=True, help="data_folder: wiki_tfidf or wiki_sent")
parser.add_argument("-m", "--model_name", required=True, help="model name (bigru_bahdanau_attention)")
parser.add_argument("-ld", "--latent_dim", default=200, type=int, help="Latented dim")
parser.add_argument("-ed", "--embed_dim", default=300,type=int, help="Embedding dim")
parser.add_argument("-opt", "--optimizer", default='adam', help="optimizer algorithm (default adam)")
parser.add_argument("-lr", "--learning_rate",default=0.001, type=float, help="learning rate (default 0.001)")
parser.add_argument("-s", "--sub_set",type=int, help="data subset")
parser.add_argument("-bs", "--batch_size",default=128, type=int, help="batch size (default 128)")
parser.add_argument("-p", "--print",default='False')
parser.add_argument( "--load", required=True, help="load specific model name")
parser.add_argument("-te", "--topic_evaluate", default=False , help="test on different data (deafult False) "\
" - write bhatia_topics to test on topic terms on it own"\
" - write bhatia_topics_tfidf to test on topics+additional terms ")
args = parser.parse_args()
# info
data_name= args.data
model_name=args.model_name
topic_data= args.topic_evaluate
print_flag = args.print
################
# model parameters
latent_dim=args.latent_dim
embedding_dim=args.embed_dim
BATCH_SIZE = args.batch_size
'''
Predicting
Load needed files
'''
# loading X tokenizer
with open('./data/'+data_name+'/x_tokenizer.pickle', 'rb') as handle:
x_tokenizer= pickle.load(handle)
print('number of x_tokenizer words --->', x_tokenizer.num_words)
# loading Y tokenizer
with open('./data/'+data_name+'/y_tokenizer.pickle', 'rb') as handle:
y_tokenizer= pickle.load(handle)
print('number of y_tokenizer words --->', y_tokenizer.num_words)
#size of vocabulary ( +1 for padding token)
x_voc = x_tokenizer.num_words + 1 #use num_words instead of word_index since we have a large vocab words and would like to keep the top frequent only
vocab_inp_size = x_voc
#size of vocabulary
y_voc = y_tokenizer.num_words +1
vocab_tar_size = y_voc
print('number of vocab in x', x_voc)
print('number of vocab in y', y_voc)
'set sequence length (+2 to accommodate for eostok and sostok)'
max_text_len=30 + 2
max_summary_len=8 + 2
if topic_data == False: # test on wiki data
print("load test data from", data_name)
x_test=np.load('./data/'+data_name+'/x_test.npy')
y_test=np.load('./data/'+data_name+'/y_test.npy')
elif topic_data != False: # for inference on different data, load raw data and preprocess using the tokenizer of the data the model was trained on
print("load test data from", topic_data)
data_test= pd.read_csv('./data/'+topic_data+'/'+args.topic_evaluate+'.csv', names=['labels', 'terms'], header=None)
print("first row, labels:", data_test.iloc[0], '\nterms:',data_test.iloc[0]['terms'])
# process topic using specific tokenizer
x_test, y_test = sup_methods.preprocess_using_tokenizer(data_test, x_tokenizer, y_tokenizer, max_text_len)
x_test= tf.keras.preprocessing.sequence.pad_sequences(x_test, maxlen=max_text_len, padding='post')
# prep y by removing ',' and adding [eostok and sostok]
y_test = sup_methods.preprocess_y(y_test)
try:
x_test= x_test[:args.sub_set]
y_test= y_test[:args.sub_set]
print("test on parts of the data, subset=", args.sub_set)
except:
pass
print("testing on", len(x_test))
if 'bigru_bahdanau_attention' == model_name:
print("BiGRU + attention")
encoder = model_archi.Encoder_bigru_attention(vocab_inp_size, embedding_dim, latent_dim, BATCH_SIZE, x_tokenizer )
decoder = model_archi.Decoder_bigru_attention(vocab_tar_size, embedding_dim, latent_dim, BATCH_SIZE, y_tokenizer)
else:
print("model name not found")
sys.exit(0)
'''
Optimizer
'''
optimizer = sup_methods.get_optimizer(args.optimizer, args.learning_rate, args.latent_dim)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
# restoring from specific checkpoint
checkpoint_dir = './training_checkpoints/'+ data_name
print("Load specific model:", args.load)
best_model_name = args.load
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
print("restore from", checkpoint_dir+'/'+best_model_name)
checkpoint.restore((checkpoint_dir+'/'+best_model_name))
''' start testing '''
golds, preds, topics=[],[],[]
import time
start= time.time()
for i in range(0,len(x_test)):
topic=support_methods.seq2text(x_tokenizer, x_test[i])
if args.topic_evaluate == False:
gold = support_methods.seq2text(y_tokenizer, y_test[i])
else:
gold = y_test[i]
pred, topic =predict(topic, i)
# remove end and start tokens
if args.topic_evaluate != False and 'tfidf' in topic_data:
topic = list(data_test['terms'])[i]
topic= ' '.join(topic.split()[:10])
topic =remove_tokens(topic, False)
gold= remove_tokens(gold, True)
pred = remove_tokens(pred, False)
preds.append(pred)
golds.append(gold)
topics.append(topic)
if i%500==0:
end= time.time()
print("reached %i/%i took %.2f minutes "%(i, len(x_test),(end-start)/60))
start = time.time()
################# write predictions to file to use BERTScore
if args.topic_evaluate == False:
pred_path= 'results/'+ data_name+'/'+model_name+'_pred.out'
gold_path= 'results/'+ data_name+'/'+model_name+'_gold.out'
top_path= 'results/'+ data_name+'/'+model_name+'_topics.out'
else:
pred_path= 'results/'+ data_name+'/'+model_name+'_'+topic_data+'_pred.out'
gold_path= 'results/'+ data_name+'/'+model_name+'_'+topic_data+'_gold.out'
top_path= 'results/'+ data_name+'/'+model_name+'_'+topic_data+'_topics.out'
# create folder if not exist
if not os.path.exists('results/'+ data_name):
os.makedirs('results/'+ data_name)
'''
in bhatia_topics_tfidf the same topic has many labels we have to combine labels of the same topic to be ready for evaluation
'''
if topic_data != False and 'tfidf' in topic_data:
new_preds={}
new_golds={}
for topic, gold, pred in zip(topics, golds, preds):
if topic not in new_preds:
new_preds[topic]= [pred]
new_golds[topic] =[gold]
else:
new_preds[topic].append(pred)
new_golds[topic].append(gold)
topics= list(new_preds.keys())
print("number of topics", len(topics))
golds= list(new_golds.values())
print("number of golds:", len(golds))
preds= list(new_preds.values())
print("number of preds:", len(preds))
with open(pred_path, 'w') as p:
with open(gold_path, 'w') as g:
with open(top_path, 'w') as t:
for i in range (len(golds)):
try:
p.write(preds[i]+'\n')
except:
p.write(','.join(preds[i])+'\n')
try:
g.write(golds[i]+'\n')
except:
g.write(','.join(golds[i])+'\n')
t.write(topics[i]+'\n') | [
"noreply@github.com"
] | casszhao.noreply@github.com |
80f4a4a9ec66bd7807446a94141921e8f9318466 | e1ec9a4d4db3cae2d2414017d998469846ae3459 | /commits/get_file_commits.py | ac22f868e246245d0aea900b749c20fe6e5f02c8 | [] | no_license | mhassany/code-readability | 8927aef3a55119907a021705b9b93d32544fd810 | 8ecaef29a32dfac403794299c634d4bbed489ee9 | refs/heads/master | 2020-09-15T05:33:18.022228 | 2019-12-17T21:37:06 | 2019-12-17T21:37:06 | 223,357,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | from commits_helper import commits
from collections import defaultdict
def get_file_line_changes():
""" yeild file name, line_added, line_removed """
# file => (add+del) count
line_changes = defaultdict(int)
line_added = defaultdict(int)
line_removed = defaultdict(int)
for commit in commits():
for file in commit.files:
line_added[file.name] += file.line_added
line_removed[file.name] += file.line_removed
line_changes[file.name] += file.line_added + file.line_removed
sorted_files = sorted(line_changes.items(),
key=lambda kv: kv[1], reverse=True)
for file in sorted_files:
# name, line_added, line_removed
yield file[0], line_added[file[0]], line_removed[file[0]]
for line_change in get_file_line_changes():
print(line_change)
| [
"mhassany@stevens.edu"
] | mhassany@stevens.edu |
6daa0f6ca0ec15a3661dc69769cc530be5110fb4 | ecbc312f6c5733a4c8ebcc9c3fccdba8bc35fd2f | /text_normalizer/collection/eng_basic.py | 0432800ebf5e9c3e77fbc5fa67ff71ed84217fbd | [
"MIT"
] | permissive | Yoctol/text-normalizer | d200a4e020618e70162cbc52a3099d9a9203aab9 | 3609c10cd229c08b4623531e82d2292fc370734c | refs/heads/master | 2020-03-11T00:56:25.337539 | 2018-11-06T04:08:37 | 2018-11-06T04:08:37 | 129,676,388 | 17 | 3 | MIT | 2018-11-06T04:08:38 | 2018-04-16T02:57:34 | Python | UTF-8 | Python | false | false | 439 | py | from .base_collection import BaseCollection
from ..library import (
whitespace_char_text_normalizer,
pure_strip_text_normalizer,
eng_lowercase_text_normalizer,
)
eng_basic_text_normalizer_collection = BaseCollection()
eng_basic_text_normalizer_collection.add_text_normalizers(
text_normalizers=[
eng_lowercase_text_normalizer,
whitespace_char_text_normalizer,
pure_strip_text_normalizer,
],
)
| [
"s916526000@gmail.com"
] | s916526000@gmail.com |
211dc4152498ce7967b1fc4828f9e7be31a98caf | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/096_Unique_Binary_Search_Trees.py | 6b846c2a2a8141cdb599d93d5826000dc142e497 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 419 | py | c_ Solution o..
___ numTrees n
"""
:type n: int
:rtype: int
"""
# https://leetcode.com/discuss/86650/fantastic-clean-java-dp-solution-with-detail-explaination
dp = [0] * (n + 1)
dp[0] = 1
dp[1] = 1
___ level __ r.. 2, n + 1
___ root __ r.. 1, level + 1
dp[level] += dp[level - root] * dp[root - 1]
r_ dp[n] | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
403157814d970ad27e47a62924b88c665308ac35 | a1b375c3e98fe059dafc4d74cbcbcb99a0571e44 | /accounts/migrations/0001_initial.py | ffea45cf723bf87cb80e5a6a39898021cf2970d0 | [
"MIT"
] | permissive | mohsenamoon1160417237/Social_app | 478a73552ceed8001c167be6caaf550cd58626bd | 79fa0871f7b83648894941f9010f1d99f1b27ab3 | refs/heads/master | 2022-12-09T16:03:53.623506 | 2020-09-21T05:59:22 | 2020-09-21T06:02:03 | 297,242,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # Generated by Django 2.2 on 2020-09-10 17:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"dramatic225@gmail.com"
] | dramatic225@gmail.com |
bc7ef643b4364467d1b9c34df38bbe8bab4183cd | a60c7913f74d07c0688adcd2b3bee7c590032bb2 | /nobody/magnet/crawler/spiders/zhihu.py | 01b5463e121bbcaac9b2478e22f02a47b0ddd6c8 | [] | no_license | z-fork/nobody | 98506be00126b6bf725d982087d7f0c833d19f82 | ed82318dbac56191d0d8aeda02ef216a26f3acf8 | refs/heads/master | 2021-01-09T21:49:08.815867 | 2015-12-31T07:21:19 | 2015-12-31T07:21:19 | 43,546,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | # -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from ..mixin.login import LoginMixin
class ZhihuSipder(LoginMixin, CrawlSpider):
name = "zhihu"
allowed_domains = ["www.zhihu.com"]
start_urls = [
"http://www.zhihu.com/topic"
]
rules = (
Rule(SgmlLinkExtractor(
allow=r'http://www\.zhihu\.com/question/\d+'),
callback='parse_page'),
)
login_data = {
'prepare_url': 'http://www.zhihu.com/#signin',
'url': 'http://www.zhihu.com/login/email',
'cookie': {
'cap_id': '"MDI1ODkxZjBlMTAzNDYxOTliMjk1MGE0ZjNjY2Y2NGE=|1440748116|3d8e8a1b61f1d21b53bf29f218c7ab8c4f19d8e2"', # noqa
},
'data': {
'email': '253673883@163.com',
'password': 'qweqwe',
'rememberme': 'y',
}
}
_XSRF_XPATH = '//input[@name="_xsrf"]/@value'
_TITLE_XPATH = '//h2[@class="zm-item-title zm-editable-content"]/text()'
_DESCRIPTION_XPATH = '//div[@class="zm-editable-content"]/text()'
def prepare_login(self, response):
xsrf = response.xpath(self._XSRF_XPATH).extract()[0]
return {
'_xsrf': xsrf,
}
def parse_page(self, response):
item = {
'url': response.url,
'title': response.xpath(self._TITLE_XPATH).extract(),
'description': response.xpath(self._DESCRIPTION_XPATH).extract(),
}
print item
| [
"mongoo@sina.cn"
] | mongoo@sina.cn |
84bd2cc154649f00dfb292a4cce30268f53332b9 | 01aa16542f9f3b3b088bc3870e0fa70c2ff596a7 | /Proyecto1/vectorInvaders.py | 23a604ab7287ceddc2cccfd78dc95e10f5ca71aa | [] | no_license | AndresFidel/Graficaci-n-openGL-2019 | a6ea6beaa7621e378900318741351946898ad02d | 22b070e503da09949afb9f78ac077b23b8929edf | refs/heads/master | 2020-08-03T13:50:11.451015 | 2019-11-02T03:07:25 | 2019-11-02T03:07:25 | 211,773,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,061 | py | #Proyecto No. 1 Vector Invaders
#Materia: Introducción a la graficación por compuntadora
#Integrantes:
#-Andrés Fidel García González
#-Cruz Alejandro Cordova Alanis
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
import math
import random
import sys
import subprocess
import time
#Musica de fondo
global bgmusica
global posicionNave
global vidas
global Puntuacion
global banderas, banderasB, banderasC
global disparoYA
global disparoYB
global estado
global puntoCentralX, puntoCentralY
global disparoPosicion
#Variables para la interaccion con la interfaz
global visible
global menu
global terminar
#Variable para la dificultad del juego
global dificultad
global limite
global apuntar
#Variables para los enemigos
global enemigoX1, enemigoX2
global enemigoY1, enemigoY2
global direccion
global enemigos
#Variabes para los proyectiles enemigos
global laserEAX, laserEBX
global laserEAY, laserEBY
global estadoLaserE
global direccionE
#Variables para la nave extra
global rango, posicionNaveExtra, estadoExtra
global disminuir
#Metodo para la creación de texto. A
def escritura(font, string):
for char in string:
glutBitmapCharacter(font,ord(char))
#Metodo para la creacion de texto B
def escritura2(font, string):
for char in string:
glutStrokeCharacter(fond,ord(char))
#Metodo para mostrar los mensajes en la terminal
def mostrarMensajes():
print ("Proyecto 1: Vector Invaders")
print ("Integrantes:")
print ("Andrés Fidel García González")
print ("Cruz Alejandro Cordova Alanis")
#Metodo para la inicializacion del escenario
def init():
global posicionNave
global vidas
global Puntuacion
global banderas, banderasB, banderasC
global disparoYA
global disparoYB
global estado
global puntoCentralX, puntoCentralY
global disparoPosicion
#Para la animacion de destruccion
global visible
global contador
#Dificultad del juego
global dificultad, limite
global apuntar
#Proyectiles del enemigo IMPORTANTE
global ProyectilAY, ProyectilBY
global ProyectilCentroX, ProyectilCentroY
global EstadoEnemigo
#Para el menu y final del juego
global menu
global finalizar
global terminar
#Variables para los enemigo
global enemigoX1, enemigoX2
global enemigoY1, enemigoY2
global direccion #Direccion de los enemigos
global enemigos
#Variables para los proyectiles enemigos
global laserEAX, laserEBX
global laserEAY, laserEBY
global estadoLaserE
global direccionE
#Nave extra
global rango, posicionNaveExtra, estadoExtra
global disminuir
glClearColor(0.0, 0.0, 0.0, 0.0)
posicionNave=0
vidas=3
Puntuacion=0
disparoYA=87.5
disparoYB=92.5
estado=False
puntoCentralX=0.0
puntoCentralY=0.0
disparoPosicion=0.0
visible=True
contador=0.0
menu=True
finalizar=False
terminar=False
enemigoX1=0.0
enemigoX2=0.0
enemigoY1=550
enemigoY2=580
direccion=True
direccionE=True
enemigos=True
#Banderas para los bloques
banderas=[[True for col in range(0,12)] for fila in range(0,5)]
banderasB=[[True for col in range(0,12)] for fila in range(0,5)]
banderasC=[[True for col in range(0,12)] for fila in range(0,5)]
estadoExtra=True
enemigos = [[True for col in range(0,7)] for fila in range(0,5)]
estadoLaserE = [[True for col in range(0,7)] for fila in range(0,5)]
apuntar = [[False for col in range(0,7)] for fila in range(0,5)]
disminuir = [[0 for col in range(0,7)] for fila in range(0,5)]
laserEAX=0.0
laserEAY=0.0
laserEBX=0.0
laserEBY=0.0
rango=True
posicionNaveExtra=-200
dificultad=0
limite=4
#Metodo para dibujar la vida del jugador
def mostrarVida():
glColor3f(0.9, 0.9, 0.9)
glRectf(10.0, 15.0, 40, 30.0);
glColor3f(0.9, 0.2, 0.0)
glRectf(20.0, 30.0, 30.0, 40.0);
#Metodo para mostrar el menu principal
def mostrarTitulo():
glColor3f(0.0, 0.0, 0.0)
glRectf(0 , 0, 400, 650)
glColor3f(1.0, 1.0, 1.0)
glRasterPos3f(85.0, 400.0, 0.0)
escritura(GLUT_BITMAP_TIMES_ROMAN_24,"VECTOR INVADERS")
glRasterPos3f(130.0, 300.0, 0.0)
escritura(GLUT_BITMAP_HELVETICA_18,"PRESS SPACE")
glColor3f(1.0, 1.0, 0.0)
glRasterPos3f(30.0, 200.0, 0.0)
escritura(GLUT_BITMAP_HELVETICA_18,"Proyecto No. 1")
glRasterPos3f(30.0, 175.0, 0.0)
escritura(GLUT_BITMAP_HELVETICA_18,"Integrantes: ")
glColor3f(1.0, 1.0, 1.0)
glRasterPos3f(30.0, 145.0, 0.0)
escritura(GLUT_BITMAP_HELVETICA_18,"-> Andrés Fidel García González")
glRasterPos3f(30.0, 115.0, 0.0)
escritura(GLUT_BITMAP_HELVETICA_18,"-> Cruz Alejandro Cordova Alanis")
def finDelJuego():
global Puntuacion
glColor3f(0.0, 0.0, 0.0)
glRectf(0 , 0, 400, 650)
glColor3f(1.0, 1.0, 1.0)
glRasterPos3f(115.0, 400.0, 0.0)
escritura(GLUT_BITMAP_TIMES_ROMAN_24,"GAME OVER")
glRasterPos3f(130.0, 280.0, 0.0)
escritura(GLUT_BITMAP_HELVETICA_18,"HI-SCORE: "+str(Puntuacion))
#Metodo para mostrar los mensajes en el escenario
def mensajesInterfaz():
global Puntuacion
global vidas
maxPuntuacion=99999999
glColor3f(1.0, 1.0, 0.0)
glLineWidth(2.0);
glBegin(GL_LINES);
glVertex3f(0.0, 60.0, 0.0);
glVertex3f(400.0, 60.0, 0.0);
glEnd();
if vidas>1:
mostrarVida()
if vidas>2:
glPushMatrix()
glTranslatef(40, 0.0, 0.0)
mostrarVida()
glPopMatrix()
glColor3f(0.9, 0.3, 0.0)
glRasterPos3f(140.0, 40.0, 0.0)
escritura(GLUT_BITMAP_8_BY_13,"JUGADOR: ")
glColor3f(1.0, 1.0, 0.0)
glRasterPos3f(140.0, 20.0, 0.0)
escritura(GLUT_BITMAP_9_BY_15,"Player 1")
if Puntuacion>maxPuntuacion:
Puntuacion=maxPuntuacion
strPuntuacion = str(Puntuacion)
lenPuntuacion = len(strPuntuacion)
printPuntuacion = (8-lenPuntuacion)*'0'+strPuntuacion
glColor3f(1.0, 1.0, 0.0)
glRasterPos3f(280.0, 40.0, 0.0)
escritura(GLUT_BITMAP_8_BY_13,"HI-SCORE")
glRasterPos3f(280.0, 20.0, 0.0)
escritura(GLUT_BITMAP_9_BY_15,printPuntuacion)
#Metodo para el efecto de destrucción del jugador
def destruirJugador():
global contador
global visible
contador+=0.6
#Diagonales
glColor3f(1.0, 0.0+contador/20, 0.0)
glPushMatrix()
glTranslatef(contador, contador, 0.0)
glRectf(posicionNave+15, 72, posicionNave+20, 77)
glPopMatrix()
glPushMatrix()
glTranslatef(contador*-1, contador, 0.0)
glRectf(posicionNave+15, 72, posicionNave+20, 77)
glPopMatrix()
glPushMatrix()
glTranslatef(contador*-1, contador*-1, 0.0)
glRectf(posicionNave+15, 72, posicionNave+20, 77)
glPopMatrix()
glPushMatrix()
glTranslatef(contador, contador*-1, 0.0)
glRectf(posicionNave+15, 72, posicionNave+20, 77)
glPopMatrix()
#Horizontales
glColor3f(0.0+contador/20, 1.0-contador/20, 0.0)
glPushMatrix()
glTranslatef(contador, 0.0, 0.0)
glRectf(posicionNave+10, 72, posicionNave+20, 83)
glPopMatrix()
glPushMatrix()
glTranslatef(contador*-1, 0.0, 0.0)
glRectf(posicionNave+10, 72, posicionNave+20, 83)
glPopMatrix()
glPushMatrix()
glTranslatef(0.0, contador, 0.0)
glRectf(posicionNave+10, 72, posicionNave+20, 83)
glPopMatrix()
glPushMatrix()
glTranslatef(0.0, contador*-1, 0.0)
glRectf(posicionNave+10, 72, posicionNave+20, 83)
glPopMatrix()
if contador >= 40:
visible=True
contador=0
#Metodo para generar nave extra
def dibujarNaveExtra():
global posicionNaveExtra
global estadoExtra, estado
global puntoCentralX, puntoCentralY
global Puntuacion
if puntoCentralX >= posicionNaveExtra and puntoCentralX <= posicionNaveExtra+20 and puntoCentralY >= 620 and estadoExtra==True:
estadoExtra=False
estado=False
disparoYA=87.5
disparoYB=92.5
Puntuacion+=100
if estadoExtra == True:
glBegin(GL_TRIANGLES)
glVertex3f(posicionNaveExtra, 5*math.sin(posicionNaveExtra*0.5)+635, 0)
glVertex3f(posicionNaveExtra+20, 5*math.sin(posicionNaveExtra*0.5)+635, 0)
glVertex3f(posicionNaveExtra+10, 5*math.sin(posicionNaveExtra*0.5)+615, 0)
glEnd()
#Metodo para actualizar el escenario
def idle():
global posicionNaveExtra
global rango
if rango:
if posicionNaveExtra<600:
posicionNaveExtra+=50/100
else:
rango=False
glutPostRedisplay()
if rango == False:
if posicionNaveExtra>-200:
posicionNaveExtra-=50/100
else:
rango=True
glutPostRedisplay()
#Dibujado de naves
def DibujarNavePrincipal():
global posicionNave
global disparoYA, disparoYB
global puntoCentralX, puntoCentralY
global visible
glColor3f(0.9, 0.2, 0.0)
glRectf(posicionNave+10.0, 85.0, posicionNave+20.0, 95.0);
glColor3f(0.9, 0.9, 0.9)
glRectf(posicionNave, 70.0, posicionNave+30, 85.0);
#Guardando las coordenadas del punto central del disparo.
puntoCentralX=posicionNave+15
puntoCentralY=disparoYA+2.5
if visible == False:
destruirJugador()
glutPostRedisplay()
#Metodo para hacer que el jugador dispare
def disparar():
global disparoYA, disparoYB
global disparoPosicion
posicionNave=0.0
posicionNave=disparoPosicion
glColor3f(1.0, 1.0, 0.0)
glRectf(posicionNave+12.5, disparoYA, posicionNave+17.5, disparoYB);
disparoYA += 5
disparoYB += 5
#Metodo para generar los disparos de los enemigos
def ataqueEnemigo():
global laserEnemigoAX, laserEnemigoBY
global laserEAX, laserEAY, laserEBX, laserEBY
global estadoLaserE
global direccionE
global posicionNave
global limite, dificultad, apuntar, disminuir
global puntoCentralX, puntoCentralY
espacioX=0.0
espacioY=0.0
total=0
for i in range(5):
for j in range(7):
glColor3f(0.0, 0.0, 1.0)
total+=1
azari=random.randint(1, 35)
if total == azari and dificultad <= limite and apuntar[i][j] == False and estadoLaserE[i][j] == True:
dificultad+=1
apuntar[i][j]=True
auxX=laserEAX+espacioX
auxY=laserEBY+espacioY
if estadoLaserE[i][j] == True and apuntar[i][j] == False:
#Posicionando disparo en el centro del enemigo
glRectf(72.5+laserEAX+espacioX, 562.5+laserEAY+espacioY, 77.5+laserEBX+espacioX, 567.5+laserEBY+espacioY)
else:
if apuntar[i][j] == True:
disminuir[i][j]-=2
glRectf(72.5+auxX, 562.5+auxY+disminuir[i][j], 77.5+auxX, 567.5+auxY+disminuir[i][j])
#Limitando disparo
if 562.5+auxY+disminuir[i][j] <= 0 or puntoCentralX >= 72.5+auxX and puntoCentralX <= 77.5+auxX and puntoCentralY >= 562.5+auxY+disminuir[i][j] or estadoLaserE==False:
apuntar[i][j]=False
dificultad-=1
espacioX+=40
if (60+laserEAX+espacioX) >= 400 and direccionE == True and estadoLaserE[i][j] == True:
laserEAY-=10
laserEBY-=10
direccionE=False
if (laserEAX+espacioX) <= 0 and direccionE == False and estadoLaserE[i][j] == True:
laserEAY-=10
laserEBY-=10
direccionE=True
if j == 6:
espacioY-=50
espacioX=0
if direccion == True:
laserEAX+=50/100
laserEBX+=50/100
if direccion == False:
laserEAX-=50/100
laserEBX-=50/100
#Metodo para dibujar las naves enemigas normales
def EnemigoNormal():
glColor3f(2.0, 1.0, 0.0)
espacioX=0.0
espacioY=0.0
global enemigoX1, enemigoX2
global enemigoY1, enemigoY2
global direccion
global enemigos
global disparoYA, disparoYB
global estado
global estadoLaserE
global dificultad
global Puntuacion
for i in range(5):
for j in range(7):
if puntoCentralX >= 60+espacioX+enemigoX1 and puntoCentralX <= 90+espacioX+enemigoX2 and puntoCentralY >= enemigoY1+espacioY and puntoCentralY <= espacioY+enemigoY2 and enemigos[i][j]==True:
enemigos[i][j]=False
estado=False
estadoLaserE[i][j]=False
disparoYA=87.5
disparoYB=92.5
if i == 4:
Puntuacion += 1
if i == 3:
Puntuacion += 5
if i == 2:
Puntuacion += 10
if i == 1:
Puntuacion +=20
if i == 0:
Puntuacion +=30
#Verificando existencia del enemigo
if enemigos[i][j] == True:
print ("Enemigo fila : ", i, "col: ",j, "valor: ",enemigos[i][j])
glRectf(60+espacioX+enemigoX1, espacioY+enemigoY1, 90+espacioX+enemigoX2, espacioY+enemigoY2)
espacioX+=40
if (60+enemigoX2+espacioX) >= 400 and direccion == True and enemigos[i][j] == True:
enemigoY1-=10
enemigoY2-=10
direccion=False
if (enemigoX1+espacioX) <= 0 and direccion == False and enemigos[i][j] == True :
enemigoY1-=10
enemigoY2-=10
direccion=True
if j == 6:
espacioY-=50
espacioX=0
if direccion == True:
enemigoX1+=50/100
enemigoX2+=50/100
if direccion == False:
enemigoX1-=50/100
enemigoX2-=50/100
#Metodo para dibujar las defensas en el escenario.
def dibujarDefesa():
global banderas, banderasB, banderasC, estado
global puntoCentralX, puntoCentralY
global disparoYA, disparoYB
x1=40
y1=110
x2=45
y2=115
#Dibujando defensa numero 1
glColor3f(1.0, 0.0, 0.0);
for i in range(5):
for j in range(12):
print ("puntoCentralX: ",puntoCentralX, " puntoCentralY: ",puntoCentralY, "punto Y= ",y1)
if puntoCentralX >= x1 and puntoCentralX <= x2 and puntoCentralY >= y1 and banderas[i][j]==True and puntoCentralY<=140:
estado=False
banderas[i][j]=False
disparoYA=87.5
disparoYB=92.5
if puntoCentralY >= 650:
estado=False
disparoYA=87.5
disparoYB=92.5
if banderas[i][j] == True:
glRectf(x1, y1, x2, y2);
x1=x1+5
x2=x2+5
print ("i: ",i," j: ",j)
print ("Bandera: ",banderas[i][j])
if j == 11:
y1=y1+5
y2=y2+5
x1=40
x2=45
x1=170
y1=110
x2=175
y2=115
#Dibujando defensa numero 2
glColor3f(1.0, 0.0, 0.0);
for i in range(5):
for j in range(12):
print ("puntoCentralX: ",puntoCentralX, " puntoCentralY: ",puntoCentralY, "punto Y= ",y1)
if puntoCentralX >= x1 and puntoCentralX <= x2 and puntoCentralY >= y1 and banderasB[i][j]==True and puntoCentralY<=140:
estado=False
banderasB[i][j]=False
disparoYA=87.5
disparoYB=92.5
if puntoCentralY >= 650:
estado=False
disparoYA=87.5
disparoYB=92.5
if banderasB[i][j] == True:
glRectf(x1, y1, x2, y2);
x1=x1+5
x2=x2+5
print ("i: ",i," j: ",j)
print ("Bandera: ",banderasB[i][j])
if j == 11:
y1=y1+5
y2=y2+5
x1=170
x2=175
x1=300
y1=110
x2=305
y2=115
#Dibujando defensa numero 3
glColor3f(1.0, 0.0, 0.0);
for i in range(5):
for j in range(12):
print ("puntoCentralX: ",puntoCentralX, " puntoCentralY: ",puntoCentralY, "punto Y= ",y1)
if puntoCentralX >= x1 and puntoCentralX <= x2 and puntoCentralY >= y1 and banderasC[i][j]==True and puntoCentralY<=140:
estado=False
banderasC[i][j]=False
disparoYA=87.5
disparoYB=92.5
glutPostRedisplay()
#Limitando el disparo
if puntoCentralY >= 650:
estado=False
disparoYA=87.5
disparoYB=92.5
if banderasC[i][j] == True:
glRectf(x1, y1, x2, y2);
x1=x1+5
x2=x2+5
print ("i: ",i," j: ",j)
print ("Bandera: ",banderasC[i][j])
if j == 11:
y1=y1+5
y2=y2+5
x1=300
x2=305
#Ajustes del escenario.
def redimensionar(w, h):
glViewport(0, 0, w, h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 400.0, 0.0, 650.0, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity()
#Mostrando el escenario del juego
def mostrarEscenario():
glClear(GL_COLOR_BUFFER_BIT)
#Mostrando titulo del juego
if menu == True:
mostrarTitulo()
else:
mensajesInterfaz()
glColor3f(0.0, 0.0, 0.0)
DibujarNavePrincipal()
dibujarDefesa()
EnemigoNormal()
if estado == True:
disparar()
glutPostRedisplay()
ataqueEnemigo()
dibujarNaveExtra()
if terminar == True:
finDelJuego()
glFlush()
#Finalizando la musica al terminar el programa.
def finalizar():
global bgmusica
print (" Termino Ventana")
bgmusica.terminate()
sys.exit(0)
#Metodo para las funciones del teclado.
def teclado(key, x, y):
global Puntuacion
global vidas
global posicionNave
global bgmusica # Tambien con
global estado
global posicionNave
global disparoPosicion
if key == b'a': # izquierda
if posicionNave>0:
posicionNave-=2
glutPostRedisplay();
if key == b'\033':
#bgmusica.kill()
sys.exit(0)
if key == b'd':
if posicionNave<400-30:
posicionNave+=2
glutPostRedisplay()
#Base para cuando el jugador pierde una vida
if key == b't':
vidas=vidas-1
posicionNave=0
glutPostRedisplay()
if key == b' ':
global menu
menu=False
glutPostRedisplay()
if key == b'k':
estado=True
disparoPosicion=posicionNave
#fuego = subprocess.Popen(['mplayer', './rifleHaz.mp3'])
glutPostRedisplay()
if key == b'l':
print ("Pruebas para proyectiles enemigos")
glutPostRedisplay()
#Prueba para destruir nave
if key == b'u':
global visible
visible=False
glutPostRedisplay()
if key == b'f':
global terminar
terminar=True
glutPostRedisplay()
#Metodo principal main
def main():
global bgmusica
#bgmusica = subprocess.Popen(['mplayer', './bgbattletoad.mp3', '-loop','0','&'])
mostrarMensajes()
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowPosition(450, 100)
glutInitWindowSize(400, 560)
glutCreateWindow("Proyecto No.1 , Vector Ivaders")
glutReshapeFunc(redimensionar)
glutDisplayFunc(mostrarEscenario)
glutKeyboardFunc(teclado)
glutIdleFunc(idle)
glutWMCloseFunc(finalizar)
init()
glutMainLoop()
main() | [
"noreply@github.com"
] | AndresFidel.noreply@github.com |
9eee3e806f5023bbfc47169fdcfb5b2f76fc9c00 | 3c69fc10b9376cdfd5e5fab79d3965f8fa294802 | /blog_admin/blog/migrations/0004_comment_comment.py | 3ce659c2750cab9b44886b3207bcd1859973cc1b | [] | no_license | kaushal-89/pythonproject | 9ac404b834c955dafa02f0be9294f4884b4d1307 | e1def9b284c21e2258e67435320d18a867d65e98 | refs/heads/master | 2023-08-25T15:42:21.574064 | 2021-10-12T10:02:45 | 2021-10-12T10:02:45 | 413,140,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 3.1.2 on 2020-10-19 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20201018_1841'),
]
operations = [
migrations.AddField(
model_name='comment',
name='comment',
field=models.TextField(blank=True),
),
]
| [
"roykaushal89@gmail.com"
] | roykaushal89@gmail.com |
54a14fd7fe3b34f16e2aeb1941edc8508fb04f2d | 3f0f9bf65634a3a926362c132d5e8595df4bc675 | /single_linked.py | 78216ef50a4386de9039bb775fea720ee804b9ca | [] | no_license | mubasheerusain/Python-programs | c29225b1a8fab1621c59b6fd8f986db463e2ea99 | aa1c62931fc9bc3fc8c522e472db13c544d884e2 | refs/heads/master | 2023-04-02T04:56:40.737719 | 2021-04-12T02:11:27 | 2021-04-12T02:11:27 | 357,029,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insert(self,newNode):
if self.head is None:
self.head = newNode
else:
lastnode = self.head
while True:
if lastnode.next is None:
break
lastnode = lastnode.next
lastnode.next = newNode
def insert_head(self,newnode):
if self.head is None:
self.head = newnode
temp = self.head
self.head = newnode
self.head.next = temp
def insert_At(self,position,newnode):
currentNode = self.head
currentIndex = 0
while True:
if currentIndex == position:
previousNode.next = newnode
newnode.next = currentNode
break
else:
previousNode = currentNode
currentNode = currentNode.next
currentIndex += 1
def delete_head(self):
temp = self.head
self.head = self.head.next
del temp
def deleteAt(self,position):
currentNode = self.head
pos = 0
while True:
if position == pos:
temp = currentNode
previousNode.next = currentNode.next
del temp
break
else:
previousNode = currentNode
currentNode = currentNode.next
pos += 1
def printlist(self):
currentNode = self.head
while True:
if currentNode is None:
break
print(currentNode.data)
currentNode = currentNode.next
#n = int(input("Enter the number of inputs: "))
#i = 0
#data = []
#while i<n:
# s = input("Enter a String: ")
# data.append(s)
# i += 1
#linkedlist = LinkedList()
#for j in data:
# linked = Node(j)
# linkedlist.insert(linked)
#ans = input("Enter a string: ")
#pos = int(input("Enter the index: "))
#result = Node(ans)
#linkedlist.insert_At(pos,result)
#linkedlist.deleteAt(pos)
#linkedlist.printlist()
| [
"mohamed.hussain@atmecs.com"
] | mohamed.hussain@atmecs.com |
e6a83c518c0ad5a0e277d860ea3388efff7b2f63 | 44a7330dfa4fe321eb432ee57a32328578dec109 | /milk/unsupervised/som.py | abe3f6dbe5b47773ecfa3cb5f58852d2d220e17f | [
"MIT"
] | permissive | tzuryby/milk | 7cb6760fad600e9e0d0c9216dc749db289b596fb | a7159b748414d4d095741978fb994c4affcf6b9b | refs/heads/master | 2020-12-29T02:45:33.044864 | 2011-03-15T20:23:29 | 2011-03-15T20:25:11 | 1,485,748 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,261 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2010, Luis Pedro Coelho <lpc@cmu.edu>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from ..utils import get_pyrandom
from . import _som
def putpoints(grid, points, L=.2, radius=4, iterations=1, shuffle=True, R=None):
'''
putpoints(grid, points, L=.2, radius=4, iterations=1, shuffle=True, R=None)
Feeds elements of `points` into the SOM `grid`
Parameters
----------
grid : ndarray
Self organising map
points : ndarray
data to feed to array
L : float, optional
How much to influence neighbouring points (default: .2)
radius : integer, optional
Maximum radius of influence (in L_1 distance, default: 4)
iterations : integer, optional
Number of iterations
shuffle : boolean, optional
Whether to shuffle the points before each iterations
R : source of randomness
'''
if radius is None:
radius = 4
if type(L) != float:
raise TypeError("milk.unsupervised.som: L should be floating point")
if type(radius) != int:
raise TypeError("milk.unsupervised.som: radius should be an integer")
if grid.dtype != np.float32:
raise TypeError('milk.unsupervised.som: only float32 arrays are accepted')
if points.dtype != np.float32:
raise TypeError('milk.unsupervised.som: only float32 arrays are accepted')
if len(grid.shape) == 2:
grid = grid.reshape(grid.shape+(1,))
if shuffle:
random = get_pyrandom(R)
for i in xrange(iterations):
if shuffle:
random.shuffle(points)
_som.putpoints(grid, points, L, radius)
def closest(grid, f):
'''
y,x = closest(grid, f)
Finds the coordinates of the closest point in the `grid` to `f`
::
y,x = \\argmin_{y,x} { || grid[y,x] - f ||^2 }
Parameters
----------
grid : ndarray of shape Y,X,J
self-organised map
f : ndarray of shape J
point
Returns
-------
y,x : integers
coordinates into `grid`
'''
delta = grid - f
delta **= 2
delta = delta.sum(2)
return np.unravel_index(delta.argmin(), delta.shape)
def som(data, shape, iterations=1000, L=.2, radius=4, R=None):
'''
grid = som(data, shape, iterations=1000, L=.2, radius=4, R=None):
Self-organising maps
Parameters
----------
points : ndarray
data to feed to array
shape : tuple
Desired shape of output. Must be 2-dimensional.
L : float, optional
How much to influence neighbouring points (default: .2)
radius : integer, optional
Maximum radius of influence (in L_1 distance, default: 4)
iterations : integer, optional
Number of iterations
R : source of randomness
Returns
-------
grid : ndarray
Map
'''
R = get_pyrandom(R)
d = data.shape[1]
if data.dtype != np.float32:
data = data.astype(np.float32)
grid = np.array(R.sample(data, np.product(shape))).reshape(shape + (d,))
putpoints(grid, data, L=L, radius=radius, iterations=iterations, shuffle=True, R=R)
return grid
| [
"lpc@cmu.edu"
] | lpc@cmu.edu |
76a04411310d413052c770de758b42430bb0bd34 | 01fc9343d2e79a394f55c1fa45a7ba37543ea3eb | /MTA 98-381 Demo/8.字典和映射/08-02 计算字母出现频率.py | cc03723757d7d2ff241fb67911c41bc50606c8da | [] | no_license | xccu/PythonDemo | 35e8e61cac0653a894d6e9d224c4f5213c14e451 | 784b24d3ae66a914db22215d0aeaab1259fe7ecb | refs/heads/master | 2023-06-07T14:11:38.328193 | 2021-07-03T14:42:08 | 2021-07-03T14:42:08 | 302,647,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | #打印字母在单词中的出现频率
def histotgram(word):
d=dict()
for c in word:
if c not in d:
d[c]=1
else:
d[c]+=1
return d
d = histotgram('pneumonoultramicroscopicsilicovolcanoconiosis')
# {'p': 2, 'n': 4, 'e': 1, 'u': 2, 'm': 2, 'o': 9, 'l': 3, 't': 1, 'r': 2, 'a': 2, 'i': 6, 'c': 6, 's': 4, 'v': 1}
print(d) | [
"264672790@qq.com"
] | 264672790@qq.com |
5880bf69ecbf7bf3b345232120daff2fe9ddd768 | b39ecc49e49ff41f860f84791bde4d3601f74c04 | /Ch02/07_decimal.py | c0b238ef08c27bff238f0ee2809b3def51459d15 | [] | no_license | gaebalJJang/Algorithm_Study_YL | fd0435305cd44cf5aed293611099ce5162ff78c0 | ef173a5cea7bf23de14e4b9628d24fcb004155a3 | refs/heads/master | 2023-05-05T04:30:47.267903 | 2021-05-27T15:55:58 | 2021-05-27T15:55:58 | 362,849,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
problem 7 : 소수 (에라토스테네스 체)
작성자 : 안예린 (girlwcode)
작성일 : 21-05-08
"""
import sys
sys.stdin=open("input.txt","rt")
num = int(input())
cnt = 0
for i in range (2, num+1) :
c = 0
for j in range (1, i+1) :
if(i % j == 0) :
c += 1
if (c >= 3) :
break
if (c == 2) :
cnt += 1
print(cnt) | [
"yelynahn@gmail.com"
] | yelynahn@gmail.com |
c7646ecb7025dceca9d5309f9af3cf7330fb767d | 6307583489b4e4c311653920072f958df5aefaf5 | /apps/labs/module03/SenseHatLed.py | 562654823ed1c7732bb448757826c42226db762f | [] | no_license | user-amanshah/iot-device | fffc99899b83018cf5a32a6efda694e542b778e3 | f84a160534b7500e5c2d2dce2e62225e7c0df42b | refs/heads/master | 2021-05-25T19:36:24.319013 | 2020-03-25T02:19:26 | 2020-03-25T02:19:26 | 253,893,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | '''
Created on Feb 10, 2020
@author: amanshah
'''
from sense_hat import SenseHat
class SenseHatLed(object):
'''
classdocs
'''
sense=SenseHat()
def __init__(self):
'''
Constructor
'''
"""
show + on led
"""
def increasePattern(self):
#self.sense.show_letter("+",text_colour=[255,0,0])
print("+ sign print")
"""
show - on led
"""
def decreasePattern(self):
#self.sense.show_letter("-",text_colour=[0,0,255])
print("- sign print") | [
"shah.ama@husky.neu.edu"
] | shah.ama@husky.neu.edu |
6359d227072d54f981d2eab11b0d3c4bb4efbc90 | 9187db411e709b4472e5a934ca123045d65d0136 | /env/Scripts/django-admin.py | 9f18f034e392510c26292c38f78723cb9fa86e4c | [] | no_license | Higgins-Sam/ReactAndDjangoAPI | f65af1770ab7abb4402b2909eb9a7c921bb8979d | f5fd347950421b2e1310999d2bcb1806c4d8b30b | refs/heads/main | 2023-02-03T22:03:18.592744 | 2020-12-23T11:15:34 | 2020-12-23T11:15:34 | 323,404,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #!c:\projects\python\django\reactondjangoapi\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"sam.higgins@bjss.com"
] | sam.higgins@bjss.com |
8ee621ecc5d502aa91e74f949d4018fe7a136214 | 39be2b4a9ddc9b3191873d3a63eb04f64a8d28b7 | /mypage/urls.py | 69d96488298deef0e1621ac4709bea87249ffc2d | [] | no_license | juyeunkim/dna_railro | 4c77af900978422bb97eb39ff98c49815b1ab27f | 38f0b6902d69eafa2b0035e270fd0aaf4bc95df2 | refs/heads/master | 2020-03-19T16:42:47.325763 | 2018-06-07T05:06:30 | 2018-06-07T05:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.user_leave, name='user_leave'),
url(r'^user_leave', views.user_leave, name='user_leave'),
url(r'^past_list', views.user_travellist, name='user_travellist'),
url(r'^selectList?', views.selectList, name='selectList'),
]
| [
"ahlim721@gmail.com"
] | ahlim721@gmail.com |
9c447f788141ad946b70c44d3eb4d35529a48880 | 7194781966c0d930f7c6dbe5e8d639d339f9f1eb | /src/com/leetcode/stack/Problem_0302_minStack.py | 1d15f4df3d298fb101b44b1749a21c767730970b | [] | no_license | StringsLi/py-target-offer | fd0c232cffe947308a1646480f8da8ac72b53860 | f474fbe0d006043cebf1b3cf3056bf209e118407 | refs/heads/master | 2023-03-25T03:11:07.611396 | 2021-03-25T09:57:11 | 2021-03-25T09:57:11 | 265,808,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | """
每当入栈的数小于等于栈顶元素时,随主栈都进行一次 push 操作;
当出栈的数与最小栈的栈顶元素相等时,最小栈也随主栈进行一次 pop 操作。
"""
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.minStack = []
def push(self, x: int) -> None:
self.stack.append(x)
if not self.minStack or x <= self.minStack[-1]:
self.minStack.append(x)
def pop(self) -> None:
x = self.stack.pop()
if x == self.minStack[-1]:
self.minStack.pop()
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.minStack[-1]
| [
"lixin4009@163.com"
] | lixin4009@163.com |
54f59acba3e28e9e73601f99667ca553cc1f9529 | 738b6d6ec4572f5848940b6adc58907a03bda6fb | /tests/nutmeg4_pymcell4/0625_prob_changed_notification_disabled/model.py | bb49dac21464576f72e6b5d1f13578c087a464db | [
"Unlicense",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | mcellteam/mcell_tests | 09cd1010a356e0e07c88d7e044a73c5606c6e51a | 34d2d967b75d56edbae999bf0090641850f4f4fe | refs/heads/master | 2021-12-24T02:36:24.987085 | 2021-09-24T14:19:41 | 2021-09-24T14:19:41 | 174,733,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | #!/usr/bin/env python3
import sys
import os
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
params = m.bngl_utils.load_bngl_parameters('test.bngl')
ITERATIONS = int(params['ITERATIONS'])
DUMP = True
EXPORT_DATA_MODEL = True
# ---- load bngl file ----
model = m.Model()
model.load_bngl('test.bngl')
rxn = model.find_reaction_rule('rxn')
assert(rxn)
var_rate_react_a_plus_b = [
[0, 0],
[1e-05, 9.98334e+06],
[2e-05, 1.98669e+07],
[3e-05, 2.9552e+07],
[4e-05, 3.89418e+07],
[5e-05, 4.79426e+07],
[6e-05, 5.64642e+07]
]
rxn.variable_rate = var_rate_react_a_plus_b
# ---- configuration ----
model.config.total_iterations = ITERATIONS
model.notifications.rxn_probability_changed = False
model.initialize()
#model.dump_internal_state()
model.run_iterations(ITERATIONS)
model.end_simulation()
| [
"ahusar@salk.edu"
] | ahusar@salk.edu |
8cccd7ea62ffc890e4fc1ca40567533548dff83f | 25177624f1b9c246e04df1e91c3796e29630ea11 | /conics7f.py | 7a0c0ec2b21e1bbab943f25ead51eb263c0deedf | [] | no_license | Aish13-12/Conics-final | b5fa5bee91a351eb05f8f390491ba81d4bcc3290 | e8359642241093d100bd202ee1206edd03b593da | refs/heads/master | 2020-08-30T23:00:30.250158 | 2019-10-30T11:44:34 | 2019-10-30T11:44:34 | 218,514,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | import numpy as np
import matplotlib.pyplot as plt
import math
from numpy import linalg as LA
A_1=np.array([4,3])
K_1=12
A_2=np.array([3,4])
K_2=12
A=np.vstack((A_1,A_2))
K=np.array((K_1,K_2))
K=K.T
C=K@LA.inv(A)
print(C)
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
#Circle parameters
a_1=np.array([0,1])
a_2=np.array([1,0])
V=np.vstack((a_1,a_2))
u = -(C.T@V)/2
u=u.T
c=-u
F =0
#defining centre and radius of Circle C1
c=-u
r=np.sqrt(c.T@c-F)
#Generating points on the circle C1
len = 100
theta = np.linspace(0,2*np.pi,len)
x_circ = np.zeros((2,len))
x_circ[0,:] = r*np.cos(theta)
x_circ[1,:] = r*np.sin(theta)
x_circ = (x_circ.T + c).T
plt.plot(x_circ[0,:],x_circ[1,:],label='$Locus$')
plt.plot(C[0], C[1], 'o')
plt.text(C[0] * (1 + 0.1), C[1] * (1 - 0.1) , 'C')
plt.plot(c[0], c[1], 'o')
plt.text(c[0] * (1 + 0.1), c[1] * (1 - 0.1) , 'O')
ax.plot()
plt.xlabel('$x$');plt.ylabel('$y$')
plt.legend(loc='best')
plt.grid()
plt.show()
| [
"noreply@github.com"
] | Aish13-12.noreply@github.com |
8bde1e7c8d3f15fa84f32773e315e26557bde33f | 6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f | /apps/detailQuestion/migrations/0001_initial.py | f55e7aa24232e3c288aacd4cef66e2d65e699b32 | [] | no_license | reo-dev/bolt | 29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54 | d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e | refs/heads/master | 2023-07-13T04:05:57.856278 | 2021-08-27T09:07:03 | 2021-08-27T09:07:03 | 382,195,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | # Generated by Django 3.0.8 on 2021-01-12 02:20
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('project', '0013_proposaltype_name'),
]
operations = [
migrations.CreateModel(
name='DetailQuestionTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField(verbose_name='질문제목')),
('createdAt', models.DateTimeField(default=django.utils.timezone.now, verbose_name='작성일')),
],
options={
'verbose_name': ' 질문제목',
'verbose_name_plural': ' 질문제목',
},
),
migrations.CreateModel(
name='DetailQuestionSelect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('select', models.TextField(verbose_name='질문 선택지')),
('createdAt', models.DateTimeField(default=django.utils.timezone.now, verbose_name='작성일')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='detailQuestion.DetailQuestionTitle', verbose_name='질문제목')),
],
),
migrations.CreateModel(
name='DetailQuestionSave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createdAt', models.DateTimeField(default=django.utils.timezone.now, verbose_name='작성일')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='detailQuestion.DetailQuestionTitle', verbose_name='질문제목')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project.Request', verbose_name='의뢰서')),
('select', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='detailQuestion.DetailQuestionSelect', verbose_name='질문 선택지')),
],
),
]
| [
"75593016+reo-dev@users.noreply.github.com"
] | 75593016+reo-dev@users.noreply.github.com |
b1108a7b98041abfbed588f01044c2dbd17cd34a | 30c806dc9553f485781c9816ec6e6fd42536bf07 | /post.py | 1f36c0adb923a995aa4163eab6b75c694ec214c5 | [] | no_license | singhcpt/bloomwabot | bf1e80fae1337fdfba0991944a37cee622e0395b | dbaf0242c8a706fdc6dcec24739b7ee9995af126 | refs/heads/master | 2022-11-24T05:19:42.809527 | 2020-07-30T21:57:47 | 2020-07-30T21:57:47 | 272,800,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import uuid
import time
class Post:
def __init__(self, user_id, type, quantity, location, price):
self.user_id = user_id
self.crop = type
self.quantity = quantity
self.location = location
self.price = price
def setCrop(self, crop):
self.crop = crop
return
def setQuantity(self, quantity):
self.quantity = quantity
return
def setLocation(self, location):
self.location = location
return
def setPrice(self, price):
self.price = price
return
def __str__(self):
return "Crop: " + self.crop + " | Quantity: " + str(self.quantity) + " kgs | Location: " + self.location + " | Price: " + str(self.price) + " sh "
__repr__=__str__
| [
"noreply@github.com"
] | singhcpt.noreply@github.com |
b4956847e31f20079841e278a95180928ce3010d | 833424ff161a178b783e726ae4eea52302db2154 | /test/command_line/test_sequence_to_stills.py | ca3b30fca2baf23b7ae432b6c28560c91da4bd3b | [
"BSD-3-Clause"
] | permissive | JBlaschke/dials | 16e4f6d7503754b546d23bbf346b24b344c65af9 | 83fbc79864411b67933bd9708bc2d5f7d8f0859b | refs/heads/master | 2022-12-17T01:18:28.036153 | 2020-09-11T11:35:08 | 2020-09-11T11:35:08 | 294,831,151 | 0 | 0 | BSD-3-Clause | 2020-09-11T23:22:57 | 2020-09-11T23:22:56 | null | UTF-8 | Python | false | false | 1,089 | py | from __future__ import absolute_import, division, print_function
import os
import procrunner
from dxtbx.model.experiment_list import ExperimentListFactory
def test_sequence_to_stills(dials_regression, tmpdir):
path = os.path.join(
dials_regression, "refinement_test_data", "radiation_damaged_thaumatin"
)
input_experiments = os.path.join(path, "refined_experiments_P42.json")
input_reflections = os.path.join(path, "indexed.pickle")
result = procrunner.run(
[
"dials.sequence_to_stills",
input_experiments,
input_reflections,
"domain_size_ang=500",
"half_mosaicity_deg=0.1",
"max_scan_points=10",
],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("stills.expt").check(file=1)
assert tmpdir.join("stills.refl").check(file=1)
experiments = ExperimentListFactory.from_json_file(
tmpdir.join("stills.expt").strpath, check_format=False
)
assert len(experiments) == 10
| [
"noreply@github.com"
] | JBlaschke.noreply@github.com |
82d6571768226c18f89118be61d91d458b3efd09 | 056a9130976b427345f36557d853ca55723734d7 | /config.py | a741aa2695964c642c0c3e1be06b1e53980fa6c2 | [] | no_license | allyroo/DroneInventory | 3e719e1704279ea41853d48d993c9b3c32dc6e62 | 4949a931abe56b811a0f5ce59a3d71541b0d4029 | refs/heads/master | 2023-06-15T21:08:34.819459 | 2021-07-07T21:34:10 | 2021-07-07T21:34:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
# be able to make sure path works;
#Give access to the project in ANY OS that we find ourselves in
# ALlow outside files/folders to be added to the project from the base directory
class Config():
"""
Set Config variables for the flask app.
Using Enviornment variables where available
Otherwise create the config variable if not done already.
"""
FLASK_APP = os.getenv('FLASK_APP')
FLASK_ENV = os.getenv('FLASK_ENV')
SECRET_KEY = os.environ.get('SECRET_KEY') or 'You will never guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DEPLOY_DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False # Turn off update messaes from sqlalchemy | [
"allyse.arucan@gmail.com"
] | allyse.arucan@gmail.com |
422c59d8736d23a0933e5a59a3eba61ee434fe8f | 13dd4a972d0d8d3ca23b9a510e30266ec6e048f8 | /agoda.py | c94c9e52322ec59bc5821285aaf485a8bbbfb53c | [] | no_license | wodend/hotel-scraper | 5ccd9f7ad751d7e90c84065a051fba55484fe667 | bb5020b04d32fc148ad4d3aab33b645253b3a1ed | refs/heads/master | 2022-07-03T16:03:08.467555 | 2019-05-31T17:25:08 | 2019-05-31T17:25:08 | 188,231,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | import queue
import requests
import socket
import sys
import time
import config
import util
BASE_URL = "https://{0}.agoda.com/"
DEFAULT_TIME = 'T00:00:00'
LOCATION = {
'ny': 318,
'sf': 13801,
'lon': 233,
'frank': 15847,
'am': 13868,
'ban': 4923,
'sin': 4064,
'tor': 17052,
}
def search(location):
stay_length = config.CHECK_OUT.day - config.CHECK_IN.day
query = {
'SearchType': 1,
'PageSize': config.RESULT_COUNT,
'CityId': location,
'CheckIn': str(config.CHECK_IN) + DEFAULT_TIME,
'CheckOut': str(config.CHECK_OUT) + DEFAULT_TIME,
'LengthOfStay': stay_length,
'Adults': config.ADULT_COUNT,
'Children': config.CHILD_COUNT,
'Rooms': config.ROOM_COUNT,
}
session = requests.Session()
session.headers = config.HEADERS
session.get(BASE_URL.format('www'))
response = session.post(
BASE_URL.format('ash') + 'api/en-us/Main/GetSearchResultList',
json=query,
)
try:
json = response.json()
except ValueError:
json = {}
return json.get('ResultList')
def parse_result(result):
hotel = -1
price = -1
hotel_attr = result.get('HotelID')
price_string = result.get('FormattedDisplayPrice')
if hotel_attr:
hotel = hotel_attr
if price_string:
price = int(price_string.replace(',', ''))
return hotel, price
def scrape(queue, location_code, rate):
utc = time.gmtime()
location_curr = socket.gethostname()
results = search(LOCATION[location_code])
for index, result in enumerate(results):
hotel, price_local = parse_result(result)
price = util.usd(price_local, config.CURRENCY[location_curr], rate)
queue.put(['ag', location_curr, location_code, index, hotel,
price_local, price, utc.tm_yday, utc.tm_hour])
def main(argv):
rate = {
'EUR': 1,
'USD': 1.116931,
'CAD': 1.506685,
'GBP': 0.882129,
'INR': 78.05954,
'SGD': 1.540751,
}
que = queue.Queue()
scrape(que, argv[1], rate)
while not que.empty():
print(que.get())
if __name__ == '__main__':
main(sys.argv)
| [
"wodend@tuta.io"
] | wodend@tuta.io |
3a864c7b963ab09cc0cce9f326c1fa6ca80f1a7a | b784afb57d55f9a694d39696fd6d2d44e94f1890 | /pybrexer0005.py | 3e6d1d5c1e8abff29ec1f047e0ae694c6b90047d | [] | no_license | lucianoww/Python | 796af4e1525a70cbde2bf7a63a6ea58945b552c7 | 6da7a003ad3f131abb49a6f0992419e03a1b7f50 | refs/heads/main | 2023-04-09T12:26:32.687875 | 2021-04-24T14:03:09 | 2021-04-24T14:03:09 | 361,176,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | '''
Solução para exercicios https://wiki.python.org.br/EstruturaSequencial
#05)Faça um Programa que converta metros para centímetros.
'''
metro = 100
valorcm = float(input('Digite valor em centimetros:'))
print('{} centimetros equivale a {} metros'.format(valorcm, valorcm/metro))
| [
"noreply@github.com"
] | lucianoww.noreply@github.com |
f61bdae50d86ce241bed7acf42c6de5feba31dde | 24b3346b95e08575646e7bb976084d01bd80210e | /Shopping_List-master/app/__init__.py | b106ba0b7beca6a3a807de950b8b41f1fdaedf07 | [] | no_license | Jameswafy/VirtualMart | bf914e84c2a75ce51401bfbb95f25a0c5965aebe | 5574b1e72611a2e778be6f936879581c4adb84b9 | refs/heads/master | 2023-03-04T01:45:34.441871 | 2023-02-22T06:01:33 | 2023-02-22T06:01:33 | 235,806,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | from flask import Flask
app= Flask(__name__)
from app import views
| [
"noreply@github.com"
] | Jameswafy.noreply@github.com |
9e037e5e43c94a023c833c65ede67c184dbaa8a3 | ad21a90663ab11167da3f7de58bd9c869b6cce88 | /ntt_tool/apps/cloud/models.py | 5835eb2183012dfb334b0e8e9aaf1f4cffc1b9df | [] | no_license | nicholasi/ntt_tool_python | 7863d1b767694b4dd7a69996f72b4f607c700383 | a7098e493e7682020a9e61d09be25d68f52d9d91 | refs/heads/master | 2020-12-30T19:11:36.256853 | 2016-02-29T11:40:57 | 2016-02-29T11:40:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Cloud(models.Model):
name = models.CharField(max_length=256)
keystone_auth_url = models.CharField(max_length=2083)
keystone_user = models.CharField(max_length=100)
keystone_password = models.CharField(max_length=250)
keystone_tenant_name = models.CharField(max_length=100)
# creator = models.ForeignKey(User) #ToDo: Facing some issues in serializer validation
created_on = models.DateTimeField(auto_now=True)
updated_on = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "cloud_details"
class CloudTraffic(models.Model):
TYPE_CHOICES = (
('all', 'All'),
('intra-tenant', 'Intra Tenant'),
('inter-tenant', 'Inter Tenant'),
('south-north', 'South to North'),
('north-south', 'North to South'),
)
name = models.CharField(max_length=256)
cloud = models.ForeignKey(Cloud)
allowed_delta_percentage = models.FloatField()
test_result_path = models.CharField(max_length=250)
number_of_workers = models.IntegerField()
remote_user = models.CharField(max_length=100)
remote_pass = models.CharField(max_length=100)
test_method = models.CharField(max_length=100)
iperf_duration = models.IntegerField()
tenant_type = models.CharField(max_length=20, choices=TYPE_CHOICES, default='all')
external_host = models.CharField(max_length=100, blank=True, null=True)
# creator = models.ForeignKey(User)
created_on = models.DateTimeField(auto_now=True)
updated_on = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "cloud_traffic"
class CloudTrafficTenants(models.Model):
cloud_traffic = models.ForeignKey(CloudTraffic, related_name="cloud_traffic_tenants")
tenant_name = models.CharField(max_length=256)
ssh_gateway = models.CharField(max_length=256, blank=True, null=True)
creator = models.ForeignKey(User, related_name="cloud_traffic_tenants_creator")
created_on = models.DateTimeField(auto_now=True)
updated_on = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "cloud_traffic_tenants"
| [
"gfr786@gmail.com"
] | gfr786@gmail.com |
a88524be820b8141ba2700ef02283fe69fe301c4 | 39bc55c2a4457bbe7ff4136ea660a29ff88ee66d | /skued/simulation/tests/test_structure_factors.py | 7513665650da42ed29e663bb4456ea09438f61dd | [
"MIT"
] | permissive | KOLANICH-physics/scikit-ued | c72b3219e547e33ae067c5d36a93439d2f9045e2 | c13472129df33105312b57427ce588e66d20391f | refs/heads/master | 2022-01-22T05:47:04.286449 | 2018-09-24T15:06:00 | 2018-09-24T15:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | # -*- coding: utf-8 -*-
import unittest
import numpy as np
from random import randint
from .. import structure_factor, bounded_reflections, affe
from ... import Crystal, Atom
class TestElectronFormFactor(unittest.TestCase):
def test_side_effects(self):
nG = np.random.random(size = (16, 32))
nG.setflags(write = False) # if nG is written to, Exception is raised
affe(Atom('He', coords = [0,0,0]), nG)
def test_out_shape(self):
nG = np.random.random(size = (16, 32))
eff = affe(Atom('He', coords = [0,0,0]), nG)
self.assertSequenceEqual(eff.shape, nG.shape)
def test_int(self):
""" Test that affe(int, ...) also works """
atomic_number = randint(1, 103)
nG = np.random.random(size = (16, 32))
from_int = affe(atomic_number, nG)
from_atom = affe(Atom(atomic_number, [0,0,0]), nG)
self.assertTrue(np.allclose(from_int, from_atom))
def test_str(self):
""" Test that affe(str, ...) also works """
# Try with Chlorine (Z = 17)
atomic_number = 17
nG = np.random.random(size = (16, 32))
from_int = affe(atomic_number, nG)
from_str = affe('Cl', nG)
self.assertTrue(np.allclose(from_int, from_str))
class TestStructureFactor(unittest.TestCase):
def setUp(self):
self.crystal = Crystal.from_database(next(iter(Crystal.builtins)))
def test_shape_and_dtype(self):
""" Test that output of structure_factor is same shape as input,
and that the dtype is complex """
h, k, l = np.meshgrid([1, 2, 3], [1, 2, 3], [1, 2, 3])
sf = structure_factor(self.crystal, h, k, l)
self.assertSequenceEqual(sf.shape, h.shape)
self.assertEqual(sf.dtype, np.complex)
class TestBoundedReflections(unittest.TestCase):
def setUp(self):
self.crystal = Crystal.from_database(next(iter(Crystal.builtins)))
def test_bounded_reflections_negative(self):
""" Test that negative reflection bounds raise an Exception.
Otherwise, an infinite number of reflections will be generated """
with self.assertRaises(ValueError):
hkl = list(bounded_reflections(self.crystal, -1))
def test_bounded_reflections_zero(self):
""" Check that bounded_reflections returns (000) for a zero bound """
h, k, l = bounded_reflections(self.crystal,0)
[self.assertEqual(len(i), 1) for i in (h, k, l)]
[self.assertEqual(i[0], 0) for i in (h, k, l)]
def test_bounded_reflections_all_within_bounds(self):
""" Check that every reflection is within the bound """
bound = 10
Gx, Gy, Gz = self.crystal.scattering_vector(*bounded_reflections(self.crystal,nG = bound))
norm_G = np.sqrt(Gx**2 + Gy**2 + Gz**2)
self.assertTrue(np.all(norm_G <= bound))
if __name__ == '__main__':
unittest.main() | [
"laurent.decotret@outlook.com"
] | laurent.decotret@outlook.com |
d7d85139a37f7f292f1ba76e3179ea86e9cf7735 | e1158d265dd9a9f316762ba80e56d3848e3483fb | /DsdTools/AverageDsds/DsdAverage-matrix.py | 94e61dab83c5022691b812069efb68f446fb40ca | [] | no_license | kdoroschak/tufts-dsd-confidence | ee1baa69523cbfcc77e295425ccbcbb333d8b963 | 691fb1f519dfc52d59f24d8777e2c1db30a50b2d | refs/heads/master | 2021-01-23T19:46:03.395786 | 2013-08-08T13:55:36 | 2013-08-08T13:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,975 | py | #!/usr/bin/env python2.7
# Huge memory requirement, must be run on a server like pulsar or meteor.
import os, sys, argparse, numpy, time
def main(argv):
time_entireprogram = time.clock()
useMasterMatrix = False
methodOptions = ['geometricmean', 'upperthreshold', 'arithmeticmean']
# Build command line argument parser
parser = argparse.ArgumentParser(description='Take the average of multiple DSD files using one of several averaging methods.')
parser.add_argument('--dsdFiles', '-i', required=True, type=str, help='directory containing only the DSD files to be averaged')
parser.add_argument('-o', required=True, type=str, help='name of the output (averaged) file')
parser.add_argument('--originalDsd', '-d', required=True, type=str, help='master matrix file, typically the original DSD file. header contains all possible proteins that show up in the input files')
parser.add_argument('--threshold', '-t', required=False, type=float, help='threshold value from 0 to 1 of top t%% of values to use in output')
parser.add_argument('--method', '-m', required=True, choices=methodOptions, help='select the averaging method from the above choices')
parser.add_argument('-a', '--xtraarg', required=False, type=str, help='additional argument if the specific method requires it. TODO add more info about which args are required')
args = parser.parse_args()
try:
dsdPath = args.dsdFiles
dsdFiles = os.listdir(dsdPath)
numDsdFiles = len(dsdFiles)
except:
print 'Error opening path to DSD files.'
outputFile = args.o
masterMatrixFile = args.originalDsd
threshold = args.threshold
method = args.method
arg=args.xtraarg
# ======= IMPORT MODULE AND SET VARIABLES BASED ON METHOD =======
# Method: Geometric mean
if method == "geometricmean":
print "Using geometric mean module to average DSDs."
if threshold is not None:
print " Warning: Threshold is not used and will be ignored."
if arg is not None:
print " Warning: Adt'l argument is not used and will be ignored."
useMasterMatrix = False
import module_geometricmean as averager
# Method: Upper threshold
elif method == "upperthreshold":
print "Using upper threshold module to average DSDs."
if threshold is None:
print " Error: Please specify threshold."
exit()
if arg is not None:
print " Warning: Adt'l argument is not used and will be ignored."
useMasterMatrix = False
import module_upperthreshold as averager
# Method: Arithmetic mean - all possible scores
if method == "arithmeticmean":
print "Using arithmetic mean module to average DSDs."
if threshold is not None:
print " Warning: Threshold is not used and will be ignored."
if arg is not None:
print " Warning: Adt'l argument is not used and will be ignored."
useMasterMatrix = False
import module_arithmeticmean as averager
# Template method
elif method == "":
print "Using ____ module to average DSDs."
# is additional arg required?
# is threshold required/not being used?
# is master matrix needed?
# import module_X as averager
if (threshold > 1 or threshold < 0) and (threshold is not None):
print "Error: Value of threshold should be from 0 to 1."
exit()
# ========== CREATE MATRIX FRAMEWORK ==========
print "Creating the matrix framework...",
sys.stdout.flush()
time_framework = time.clock()
# Read in header of master file
with open(masterMatrixFile) as masterMatrix:
labels = masterMatrix.readline()
labels = labels.split('\t')[1:]
labels = [label.strip() for label in labels]
numLabels = len(labels)
# Create map from label -> master index
mapLabelToIdx = {}
for label_idx in xrange(numLabels):
mapLabelToIdx[labels[label_idx].strip()] = label_idx
# Create giant matrix using all edges in header, initialized to 0.0
allDsds = numpy.zeros(shape=(numLabels, numLabels, numDsdFiles), dtype=float)
print " took " + str(time.clock() - time_framework) + "s."
# ========== READ ORIGINAL DSD ==========
if useMasterMatrix:
print "Reading in the original DSD...",
sys.stdout.flush()
time_loadfile = time.clock()
masterMatrix = numpy.loadtxt(masterMatrixFile, delimiter='\t', usecols=xrange(1,numLabels+1), skiprows=1)
print " took " + str(time.clock() - time_loadfile) + "s."
else:
masterMatrix = ''
# ========== POPULATE GIANT MATRIX ==========
# Add each element from individual files to giant matrix
print "Processing all files at " + dsdPath + "."
dsdFiles.sort()
currentCount = 0
for currentDsdFile in dsdFiles:
time_processfile = time.clock()
dsdFileWithPath = dsdPath + "/" + currentDsdFile
print "Working on file " + str(currentCount + 1) + "/" + str(numDsdFiles) + ": " + dsdFileWithPath
# Read file to be averaged into numpy array
print " Reading file into numpy array...",
sys.stdout.flush()
time_loadfile = time.clock()
dsdFile = open(dsdFileWithPath)
localLabels = dsdFile.readline().split('\t')[1:]
localLabels = [label.strip() for label in localLabels]
numLocalLabels = len(localLabels)
dsdMatrix = numpy.loadtxt(dsdFileWithPath, delimiter='\t', usecols=xrange(1,numLocalLabels+1), skiprows=1, dtype=float)
print " took " + str(time.clock() - time_loadfile) + "s."
print " Matching indices and populating big matrix...",
sys.stdout.flush()
time_lineupfile = time.clock()
# Line up the labels and populate the big array
for i in xrange(numLocalLabels):
for j in xrange(numLocalLabels):
if i < j:
xMasterIdx = mapLabelToIdx[localLabels[i]]
yMasterIdx = mapLabelToIdx[localLabels[j]]
score = dsdMatrix[i][j]
allDsds[xMasterIdx, yMasterIdx, currentCount] = score
print " took " + str(time.clock() - time_lineupfile) + "s."
print " Entire file took " + str(time.clock() - time_processfile) + "s."
currentCount += 1
dsdMatrix = '' # clean up memory
# ========== CALCULATE AVERAGE SCORES ==========
print "Calculating average scores..."
time_averagescores = time.clock()
matrixAverageScores = averager.calculateAverage(allDsds, masterMatrix, threshold, arg)
allDsds = '' # clean up memory
masterMatrix = '' # clean up memory
print " Averaging took " + str(time.clock() - time_averagescores) + "s."
# ========== WRITE RESULTS TO FILE ==========
print "Writing results to file...",
sys.stdout.flush()
time_writetofile = time.clock()
labels_row = numpy.array((labels), dtype='|S12')[numpy.newaxis]
matrixAverageScores = numpy.concatenate((labels_row, matrixAverageScores), 0)
labels_col = numpy.insert(labels_row, 0, " ")[numpy.newaxis].T
matrixAverageScores = numpy.concatenate((labels_col, matrixAverageScores), 1)
with open(outputFile, 'w') as outFile:
for row in matrixAverageScores:
outFile.write('\t'.join(row))
outFile.write('\n')
print " took " + str(time.clock() - time_writetofile) + "s."
print "Done."
print " Total execution time: " + str(time.clock() - time_entireprogram)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"doro0072@umn.edu"
] | doro0072@umn.edu |
de4798e70d7c9c101c756128701b3dde305bd873 | 006ff11fd8cfd5406c6f4318f1bafa1542095f2a | /Validation/CheckOverlap/test/python/runFP420_cfg.py | bb8cd03847118c55541afbb0a89d58fb4eb5fa73 | [] | permissive | amkalsi/cmssw | 8ac5f481c7d7263741b5015381473811c59ac3b1 | ad0f69098dfbe449ca0570fbcf6fcebd6acc1154 | refs/heads/CMSSW_7_4_X | 2021-01-19T16:18:22.857382 | 2016-08-09T16:40:50 | 2016-08-09T16:40:50 | 262,608,661 | 0 | 0 | Apache-2.0 | 2020-05-09T16:10:07 | 2020-05-09T16:10:07 | null | UTF-8 | Python | false | false | 2,204 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Geometry.CMSCommonData.cmsAllGeometryXML_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('G4cout', 'G4cerr'),
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
G4cout = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
G4cerr = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
)
),
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
generator = cms.untracked.uint32(456789),
g4SimHits = cms.untracked.uint32(9876),
VtxSmeared = cms.untracked.uint32(12345)
),
sourceSeed = cms.untracked.uint32(98765)
)
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.p1 = cms.Path(process.generator*process.g4SimHits)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'
process.g4SimHits.Physics.DummyEMPhysics = True
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
type = cms.string('CheckOverlap'),
Resolution = cms.untracked.int32(1000),
NodeNames = cms.untracked.vstring('FP420E')
))
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
442bf86ccac1d097b67f928e10a2d28a7d1a246a | 390a9771799a8264b3c0c8c61cc7e1bf97ef2d79 | /day23.py | ee7b0c961b026c7a9198fb1b34e91d632c061fa0 | [] | no_license | Goldenlion5648/AdventOfCode2017 | 2bbf96d03017eceaac1279413dc3387359d03a6f | 482f2c0d5eba49a29c4631ea131753945cfe3baa | refs/heads/master | 2022-12-12T06:20:41.812048 | 2020-09-19T05:08:35 | 2020-09-19T05:08:35 | 289,359,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | from collections import Counter
a='''set b 99
set c b
jnz a 2
jnz 1 5
mul b 100
sub b -100000
set c b
sub c -17000
set f 1
set d 2
set e 2
set g d
mul g e
sub g b
jnz g 2
set f 0
sub e -1
set g e
sub g b
jnz g -8
sub d -1
set g d
sub g b
jnz g -13
jnz f 2
sub h -1
set g b
sub g c
jnz g 2
jnz 1 3
sub b -17
jnz 1 -23'''
positions = Counter([chr(i) for i in range(97, 97 + 8)])
for i in positions:
positions[i] -= 1
print(positions)
instructs = a.split("\n")
curInstruct = 0
# for i in instructs:
count = 0
while curInstruct < len(instructs):
i = instructs[curInstruct]
inst, b, c = i.split(" ")
jumped = False
try:
b = int(b)
except:
pass
try:
c = int(c)
except:
pass
if inst == "set":
if type(b) == type(2):
positions[chr(b)+97] = c if type(c) == type(3) else positions[c]
else:
positions[b] = c if type(c) == type(3) else positions[c]
elif inst == "sub":
if type(b) == type(2):
positions[chr(b)+97] -= c if type(c) == type(3) else positions[c]
else:
positions[b] -= c if type(c) == type(3) else positions[c]
elif inst == "mul":
if type(b) == type(2):
positions[chr(b)+97] *= c if type(c) == type(3) else positions[c]
else:
positions[b] *= c if type(c) == type(3) else positions[c]
count += 1
elif inst == "jnz":
if type(b) == type(2):
if b != 0:
curInstruct += c if type(c) == type(3) else positions[c]
jumped = True
else:
if positions[b] != 0:
curInstruct += c if type(c) == type(3) else positions[c]
jumped = True
if jumped == False:
curInstruct += 1
print(count)
#part 1 done in 16:57, worked first try | [
"coboudinot@gmail.com"
] | coboudinot@gmail.com |
e287f62af9691dc1b39de1311b347e412c320ff9 | 20a9b7ff545620513b5607e9364ade8594b112c5 | /losantrest/flows.py | 0941b0083a765a3ddb11fab23ed514fb663f2a19 | [
"MIT"
] | permissive | jaredjlobo/losant-rest-python | fdf01c2dd3cf2df113969bc644b3a4fb2ce963cb | fe1ddd921a0cc6f3cf4b2396ad9592a5512d0501 | refs/heads/master | 2022-04-24T08:05:18.430505 | 2020-04-30T17:46:06 | 2020-04-30T17:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,616 | py | """
The MIT License (MIT)
Copyright (c) 2020 Losant IoT, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
""" Module for Losant API Flows wrapper class """
# pylint: disable=C0301
class Flows(object):
""" Class containing all the actions for the Flows Resource """
def __init__(self, client):
self.client = client
def get(self, **kwargs):
"""
Returns the flows for an application
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.read, flows.*, or flows.get.
Parameters:
* {string} applicationId - ID associated with the application
* {string} sortField - Field to sort the results by. Accepted values are: name, id, creationDate, lastUpdated
* {string} sortDirection - Direction to sort the results by. Accepted values are: asc, desc
* {string} page - Which page of results to return
* {string} perPage - How many items to return per page
* {string} filterField - Field to filter the results by. Blank or not provided means no filtering. Accepted values are: name
* {string} filter - Filter to apply against the filtered field. Supports globbing. Blank or not provided means no filtering.
* {string} flowClass - Filter the workflows by the given flow class. Accepted values are: edge, cloud, customNode, experience
* {hash} triggerFilter - Array of triggers to filter by - always filters against default flow version. (https://api.losant.com/#/definitions/flowTriggerFilter)
* {string} includeCustomNodes - If the result of the request should also include the details of any custom nodes referenced by the returned workflows
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - Collection of flows (https://api.losant.com/#/definitions/flows)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "sortField" in kwargs:
query_params["sortField"] = kwargs["sortField"]
if "sortDirection" in kwargs:
query_params["sortDirection"] = kwargs["sortDirection"]
if "page" in kwargs:
query_params["page"] = kwargs["page"]
if "perPage" in kwargs:
query_params["perPage"] = kwargs["perPage"]
if "filterField" in kwargs:
query_params["filterField"] = kwargs["filterField"]
if "filter" in kwargs:
query_params["filter"] = kwargs["filter"]
if "flowClass" in kwargs:
query_params["flowClass"] = kwargs["flowClass"]
if "triggerFilter" in kwargs:
query_params["triggerFilter"] = kwargs["triggerFilter"]
if "includeCustomNodes" in kwargs:
query_params["includeCustomNodes"] = kwargs["includeCustomNodes"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/flows".format(**path_params)
return self.client.request("GET", path, params=query_params, headers=headers, body=body)
def get_by_version(self, **kwargs):
"""
Returns the flows by version for an application
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Organization, all.Organization.read, all.User, all.User.read, flows.*, or flows.getByVersion.
Parameters:
* {string} applicationId - ID associated with the application
* {string} sortField - Field to sort the results by. Accepted values are: name, id, creationDate, lastUpdated
* {string} sortDirection - Direction to sort the results by. Accepted values are: asc, desc
* {string} page - Which page of results to return
* {string} perPage - How many items to return per page
* {string} filterField - Field to filter the results by. Blank or not provided means no filtering. Accepted values are: name
* {string} filter - Filter to apply against the filtered field. Supports globbing. Blank or not provided means no filtering.
* {string} flowClass - Filter the workflows by the given flow class. Accepted values are: edge, cloud, customNode, experience
* {string} version - Return the workflow versions for the given version.
* {hash} triggerFilter - Array of triggers to filter by - always filters against default flow version. (https://api.losant.com/#/definitions/flowTriggerFilter)
* {string} includeCustomNodes - If the result of the request should also include the details of any custom nodes referenced by the returned workflows
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - Collection of flow versions (https://api.losant.com/#/definitions/flowVersions)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "sortField" in kwargs:
query_params["sortField"] = kwargs["sortField"]
if "sortDirection" in kwargs:
query_params["sortDirection"] = kwargs["sortDirection"]
if "page" in kwargs:
query_params["page"] = kwargs["page"]
if "perPage" in kwargs:
query_params["perPage"] = kwargs["perPage"]
if "filterField" in kwargs:
query_params["filterField"] = kwargs["filterField"]
if "filter" in kwargs:
query_params["filter"] = kwargs["filter"]
if "flowClass" in kwargs:
query_params["flowClass"] = kwargs["flowClass"]
if "version" in kwargs:
query_params["version"] = kwargs["version"]
if "triggerFilter" in kwargs:
query_params["triggerFilter"] = kwargs["triggerFilter"]
if "includeCustomNodes" in kwargs:
query_params["includeCustomNodes"] = kwargs["includeCustomNodes"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/flows/version".format(**path_params)
return self.client.request("GET", path, params=query_params, headers=headers, body=body)
def api_import(self, **kwargs):
"""
Import a set of flows and flow versions
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, flows.*, or flows.import.
Parameters:
* {string} applicationId - ID associated with the application
* {hash} importData - New flow and flow version information (https://api.losant.com/#/definitions/flowsImportPost)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 201 - Successfully imported workflows (https://api.losant.com/#/definitions/flowsImportResult)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "importData" in kwargs:
body = kwargs["importData"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/flows/import".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
def post(self, **kwargs):
"""
Create a new flow for an application
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Organization, all.User, flows.*, or flows.post.
Parameters:
* {string} applicationId - ID associated with the application
* {hash} flow - New flow information (https://api.losant.com/#/definitions/flowPost)
* {string} includeCustomNodes - If the result of the request should also include the details of any custom nodes referenced by the returned workflows
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 201 - Successfully created flow (https://api.losant.com/#/definitions/flow)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "flow" in kwargs:
body = kwargs["flow"]
if "includeCustomNodes" in kwargs:
query_params["includeCustomNodes"] = kwargs["includeCustomNodes"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/flows".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body)
| [
"michael@losant.com"
] | michael@losant.com |
94746edf9a932d875147668aab6a36f4409afd21 | a0a04e207f4ba8d48f449f246c23f1db4f06f3a3 | /028-字符串的排列/test_permutation.py | 62162b8c7f5e06632feb4af9257a1f18de73135a | [
"Apache-2.0"
] | permissive | Jay54520/Learn-Algorithms-With-Python | e9a0fb70630368dea52d2b2307766a3190b0551d | 5fdd3a607ee3828e9b229cac8104fcccf1a2770d | refs/heads/master | 2021-06-06T15:55:34.297862 | 2020-02-03T13:06:48 | 2020-02-03T13:06:48 | 142,644,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # -*- coding: utf-8 -*-
import unittest
from permutation import Solution
s = Solution()
class Test(unittest.TestCase):
def test_empty(self):
self.assertEqual([], s.Permutation(''))
def test_not_empty(self):
ss = 'abcc'
want = ['abcc',
'acbc',
'accb',
'bacc',
'bcac',
'bcca',
'cabc',
'cacb',
'cbac',
'cbca',
'ccab',
'ccba']
got = s.Permutation(ss)
self.assertEqual(want, got)
| [
"jsm0834@175game.com"
] | jsm0834@175game.com |
88917a546cf6b78403ff35ece587c512e0f076ee | 622a338ee1f856e542e14757b761546aa4267604 | /confu/isa.py | ddfa5a58dfcb9268fd62c9b758090d785344f16b | [
"MIT"
] | permissive | Maratyszcza/confu | ad8f30998d6d6ed4b37b72b6d63b7fd8ba549f1d | 4f3d0e73d20dbae54c154817d70f74b6a63940e1 | refs/heads/master | 2023-06-05T15:49:05.476642 | 2020-04-12T20:00:19 | 2020-04-12T20:14:52 | 79,974,006 | 14 | 14 | MIT | 2020-01-06T22:34:03 | 2017-01-25T01:55:44 | Python | UTF-8 | Python | false | false | 1,086 | py | from copy import copy
class InstructionSet:
def __init__(self, tags=None, generate_flags_fn=None):
if tags is None:
self.tags = set()
elif isinstance(tags, str):
self.tags = set((tags,))
else:
self.tags = set(tags)
self.generate_flags = generate_flags_fn
def get_flags(self, compiler):
if self.generate_flags is not None:
return self.generate_flags(self.tags, compiler)
else:
return list()
def __str__(self):
return self.name
def __add__(self, instruction_set):
if not isinstance(instruction_set, InstructionSet):
raise TypeError("Invalid instruction set type; InstructionSet expected")
if self.generate_flags is not None and self.generate_flags is not instruction_set.generate_flags:
raise ValueError("Instruction sets %s and %s are mutually incompatible" %
(self.tags[-1], instruction_set.tags[0]))
return InstructionSet(self.tags.union(instruction_set.tags), self.generate_flags)
| [
"maratek@gmail.com"
] | maratek@gmail.com |
8de1a3271ecd12fe08217ce896537bf1fe3ca5e0 | 9797814d5334273f538279c3c015957e8a1f06c0 | /search/views_api.py | 9a2d0182784d4525a2b0481b51d452137d9e74c0 | [] | no_license | gurkarangulati/yapster_api | a7b4584ef10985d9d9071fc8ef5a4945c6b344fb | 2ef7523a9a4bbb6f49498cca71b4e22ea2886025 | refs/heads/master | 2020-12-11T09:03:18.622804 | 2015-04-20T22:44:42 | 2015-04-20T22:44:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,666 | py | from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import generics
from rest_framework.views import APIView
from django.contrib.auth.models import User
from django.utils import timezone
from yapster_utils import check_session
from users.models import *
from users.serializers import *
from search.models import *
from search.serializers import *
from yap.serializers import *
import datetime
from datetime import timedelta
from django.db.models import Count
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
class DefaultSearch(APIView):
def post(self,request,**kwargs):
request = {k:v for k,v in request.DATA.iteritems()}
if 'user_id' in request:
user = User.objects.get(pk=request['user_id'])
check = check_session(user,request['session_id'])
if check[1]:
pass
else:
Response({"valid":False,"message":check[0]})
else:
user = None
if "search_id" in request:
search_id = request['search_id']
search = Search.objects.get(pk=search_id)
else:
screen = request.get("screen")
text = request.get("text")
longitude = request.get("longitude", None)
latitude = request.get("latitude", None)
if screen == "dashboard_subscribed":
search = Search.objects.create(user=user,origin_dashboard_subscribed_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "dashboard_subscribed_view_all_users":
search = Search.objects.create(user=user,origin_dashboard_subscribed_view_all_users_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "dashboard_subscribed_view_all_libraries":
search = Search.objects.create(user=user,origin_dashboard_subscribed_view_all_libraries_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "dashboard_explore":
search = Search.objects.create(user=user,origin_dashboard_explore_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "dashboard_explore_view_all_users":
search = Search.objects.create(user=user,origin_dashboard_explore_view_all_users_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "dashboard_explore_view_all_libraries":
search = Search.objects.create(user=user,origin_dashboard_explore_view_all_libraries_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "dashboard_explore_view_all":
search = Search.objects.create(user=user,origin_dashboard_explore_view_all_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "profile":
search = Search.objects.create(user=user,origin_profile_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "library_details":
search = Search.objects.create(user=user,origin_profile_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
elif screen == "web":
search = Search.objects.create(user=user,origin_web_screen_flag=True,text=text,latitude=latitude,longitude=longitude)
search_type = request.get("search_type","all")
page = request.get('page',1)
amount = request.get('amount',5)
if search_type == "all":
serialized = SearchResultsSerializer(search,context={'user':user})
return Response({"valid":True,"search_id":search.pk,"data":serialized.data})
elif search_type == "users":
search_results = search.default_user_search()
elif search_type == "libraries":
search_results = search.default_library_search()
elif search_type == "yaps":
search_results = search.default_yap_search()
paginator = Paginator(object_list=search_results,per_page=amount,allow_empty_first_page=False)
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
return Response({"valid":False,"message":"Page is not an integer."})
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
return Response({"valid":True,"data":None})
if search_type == "users":
serialized = UserSerializer(results,many=True,context={'user':user})
elif search_type == "libraries":
serialized = LibraryPreviewSerializer(results,many=True,context={'user':user})
elif search_type == "yaps":
serialized = AbstractYapSerializer(results,many=True,context={'user':user})
return Response({"valid":True,"search_id":search.pk,"data":serialized.data})
class ExploreScreenStatistics(APIView):
def post(self,request,**kwargs):
request = {k:v for k,v in request.DATA.iteritems()}
user = User.objects.get(pk=request['user_id'])
check = check_session(user,request['session_id'])
if check[1]:
serialized = ExploreScreenStatisticsSerializer(user)
return Response(serialized.data)
else:
return Response(check[0])
class Top12PopularHashtags(APIView):
def post(self,request,format=None):
request = {k:v for k,v in request.DATA.iteritems()}
user = User.objects.get(pk=request['user_id'])
check = check_session(user,request['session_id'])
if check[1]:
minutes = 2880
amount = 12
time = datetime.datetime.now() - datetime.timedelta(minutes=minutes)
yaps = Yap.objects.filter(hashtags_flag=True,is_active=True,is_private=False,date_created__gte=time)
hashtags_list = Hashtag.objects.filter(yaps__in=yaps,is_blocked=False)
hashtags = sorted(set(hashtags_list),key=hashtag_trending_score,reverse=True)[:amount]
if isinstance(hashtags,str):
return Response(None)
else:
serialized = HashtagSerializer(hashtags,data=self.request.DATA,many=True)
return Response(serialized.data)
else:
return Response(check[0])
| [
"gsgulati14@gmail.com"
] | gsgulati14@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.