index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,000 | 5c846b3f0ce0d4e019f29302d6ed1f5f4d320c74 | #!/usr/bin/python
# -*- coding: utf8 -*-
# ################################################################################
# ## Redistributions of source code must retain the above copyright notice
# ## DATE: 2011-11-27
# ## AUTHOR: SecPoint ApS
# ## MAIL: info@secpoint.com
# ## SITE: http://www.secpoint.com
# ##
# ## LICENSE: BSD http://www.opensource.org/licenses/bsd-license.php
# ################################################################################
# ## 1.0 initial release
# ## 1.1 google query generating option (-q)
# ## 1.2 generating HTML output
# ## 1.3 added support for multiple sites generation (-m option)
# ## 1.5 friendly output and examples, database update
from optparse import OptionParser
import os.path
from urllib import quote_plus
VERSION = '1.5'
SAMPLES="""
Command line examples:
1-generate list of search strings for finding login pages
./googleDB-tool.py "login_pages.txt"
2-generate list of Google queries for finding login pages
./googleDB-tool.py "login_pages.txt" -q
3-same as 2, but in HTML format
./googleDB-tool.py "login_pages.txt" -q -t
4-same as 3, but save to "OUT.html"
./googleDB-tool.py "login_pages.txt" -q -t -o "OUT.html"
5-generate queries as in 4, but only for site.com
./googleDB-tool.py "login_pages.txt" -q -t -o "OUT.html" -s site.com
6-all of the above, for multiple sites from "sites.txt" list
./googleDB-tool.py "login_pages.txt" -q -t -o OUT.html -s site.com -m sites.txt
"""
def get_strings(src_file):
"""getting strings from file"""
res = []
try:
res = open(src_file,'r').readlines()
res = [x.strip() for x in res]
except:
res = []
return res
def append_sitename(strs,site):
"""adding site name to strings"""
strs = [x+' site:'+site for x in strs]
return strs
def gen_google_query(strs):
"""generating Google query strings for each line"""
qs=[]
gstring = "http://www.google.com/search?q="
for x in strs:
qs.append(gstring+quote_plus(x))
return [strs,qs]
def gen_html_output(strs,q):
"""generating pretty HTML output"""
res = []
res.append('<html>\n')
res.append('<head><title>SecPoint.com GoogleDB queries strings</title></head>\n')
res.append('<body>\n')
res.append('<p>Generated by: <a href="http://www.secpoint.com/">SecPoint.com</a> GoogleDB tool</p>\n')
res.append('\t<ul>\n')
for (x,v) in zip(strs,q):
res.append('\t\t<li><a href="%s">%s</a></li>\n'%(v,x))
res.append('\t</ul>\n')
res.append('</body>\n</html>')
return res
def save_output(strs,out_f):
"""saving/printing results"""
res = "\n".join(strs)
if out_f:
try:
open(out_f,'w').write(res)
except:
print "Error! Couldn't save output file!"
exit()
else:
print res
def main():
"""Parsing options and starting engine"""
parser = OptionParser(usage="%prog <sourcefile> [-s site] [-q] [-t] [-f outfile]",
version="SecPoint.com %prog "+VERSION,
epilog="SecPoint.com Google Penetration Testing Hack Database v. "+VERSION)
parser.add_option("-o", "--output", dest="filename",
help="save output to file", metavar="FILE")
parser.add_option("-s", "--site", dest="sitename",
help="generate queries for the SITE", metavar="SITE")
parser.add_option("-m", "--multiple", dest="listfilename",
help="generate queries for multiple sites listed in LISTFILE", metavar="LISTFILE")
parser.add_option("-q", "--query",
action="store_true", dest="gen_query", default=False,
help="generate google query urls for each line")
parser.add_option("-t", "--html",
action="store_true", dest="gen_html", default=False,
help="generate output in HTML format (implies -q)")
(options, args) = parser.parse_args()
if len(args) != 1:
print """SecPoint.com Google Penetration Testing Hack Database
The Portable Penetrator - Wifi Recovery - Vulnerability Scanner
http://www.secpoint.com/portable-penetrator.html
"""
parser.print_help()
print SAMPLES
exit()
#parser.error("please set source file (could be found in 'db' dir)")
#all options
site_name = options.sitename
gen_html = options.gen_html
gen_query = options.gen_query
out_file = options.filename
multlist_file = options.listfilename
db_dir = os.path.join(os.path.dirname(__file__),'db')
source_file = os.path.join(db_dir,args[0])
if not os.path.isfile(source_file):
parser.error("could not find source file! Please check if it exists in 'db' dir")
#starting!
strs = get_strings(source_file)
if not strs:
print "Can't get data from your source file!"
exit()
queries = []
if site_name and multlist_file:
print "Please use -s OR -m switches alone!"
exit()
if site_name:
strs = append_sitename(strs,site_name)
if multlist_file:
if not os.path.isfile(multlist_file):
print "Could not find file from -m switch!"
exit()
mlst = open(multlist_file).read().split('\n')
strsnew = [] #using multiple sites to create queries
for i in mlst:
strsnew.extend(append_sitename(strs,i))
strs = strsnew
if gen_query:
[strs,queries] = gen_google_query(strs)
if gen_html:
if not gen_query: #if not previuosly generated
[strs,queries] = gen_google_query(strs)
strs = gen_html_output(strs,queries)
else:
if queries:
strs = queries
save_output(strs,out_file)
if __name__ == "__main__":
main()
|
985,001 | 37a3f098a300346b80592cdca43d0cca0b80faf7 | #
# CS1010FC --- Programming Methodology
#
import random
import copy
from numpy.ma import absolute
import constants
class Logic:
"""
Has the "logic" of the grid
It can move/merge the grid (when user/AI wants to go in a sertain direction this class will perform
the movement of the data/matrix/grid)
It reads the values on the grid = get: score, game state
Extra: (NOT USED in this implementation) can reverse previous movements
to do so you need to use history matrix in your parent class...
"""
@staticmethod
def new_game(n):
"""
Initiates new game = matrix set up
:param n:
:return matrix:
"""
matrix = []
for i in range(n):
matrix.append([0] * n)
return matrix
@staticmethod
def add_tile(matrix):
"""
Official way -> Adds new tile, value 2 (80% chance) or 4 (20% chance), to the given matrix
Game rules: after a move in the game you have to add a new tile
:param matrix:
:return matrix:
"""
a = random.randint(0, len(matrix)-1)
b = random.randint(0, len(matrix)-1)
while matrix[a][b] != 0:
a = random.randint(0, len(matrix)-1)
b = random.randint(0, len(matrix)-1)
# setting chance of getting tile : value 2 (80% chance) or 4 (20% chance), to the given matrix
population = [2, 4]
weights = [0.8, 0.2]
matrix[a][b] = random.choices(population, weights)[0]
return matrix
@staticmethod
def add_two(matrix):
"""
Adds new tile, value 2
Game rules: after a move in the game you have to add a new tile
:param matrix:
:return matrix:
"""
a = random.randint(0, len(matrix) - 1)
b = random.randint(0, len(matrix) - 1)
while matrix[a][b] != 0:
a = random.randint(0, len(matrix) - 1)
b = random.randint(0, len(matrix) - 1)
matrix[a][b] = 2
return matrix
@staticmethod
def game_score_count_tile_values(matrix):
"""
Counts the total score of a matrix/game: sum of the tile values
:param matrix:
:return total_score:
"""
total_score = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
total_score += matrix[i][j]
return total_score
@staticmethod
def game_score(matrix):
"""
Official way to score points -> higher tiles get more points than just their shown value
Counts the total score of a matrix/game: please see implementation, example: tile value 8 = (2+2) + (2+2) + (4+4),
Rule: tile of value 2 = score 0
:param matrix:
:return total_score:
"""
total_score = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
value_to_multiply_tile = 0
tile_copy = copy.deepcopy(matrix[i][j])
# a tile of worth 0 = an empty tile in game
if tile_copy != 0 or tile_copy != 2:
while tile_copy > 2:
value_to_multiply_tile += 1
tile_copy /= 2
total_score += matrix[i][j] * value_to_multiply_tile
return total_score
# using weight matrix
@staticmethod
def game_score_with_weight_matrix(matrix):
"""
Counts the total score of a matrix/game: using formula of game_score() + multiplying it by the positional weight
of the tile (WEIGHT_MATRIX in constants.py)
:param matrix:
:return total_score:
"""
total_score = 0
# formula of game_score()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
value_to_multiply_tile = 0
tile_copy = copy.deepcopy(matrix[i][j])
# a tile of worth 0 = an empty tile in game
if tile_copy != 0 or tile_copy != 2:
while tile_copy > 2:
value_to_multiply_tile += 1
tile_copy /= 2
# multiplying by positional weight
total_score += (matrix[i][j] * value_to_multiply_tile) * constants.WEIGHT_MATRIX[i][j]
# multiplying the positional weight to a tile with value 2
if matrix[i][j] == 2:
total_score += matrix[i][j] * constants.WEIGHT_MATRIX[i][j]
return total_score
# using weight matrix zigzag
@staticmethod
def game_score_with_weight_matrix_zig_zag(matrix):
"""
Counts the total score of a matrix/game: using formula of game_score() + multiplying it by the positional weight
of the tile (WEIGHT_MATRIX2 in constants.py)
:param matrix:
:return total_score:
"""
total_score = 0
# formula of game_score()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
value_to_multiply_tile = 0
tile_copy = copy.deepcopy(matrix[i][j])
# a tile of worth 0 = an empty tile in game
if tile_copy != 0 or tile_copy != 2:
while tile_copy > 2:
value_to_multiply_tile += 1
tile_copy /= 2
# multiplying by positional weight
total_score += (matrix[i][j] * value_to_multiply_tile) * constants.WEIGHT_MATRIX2[i][j]
# multiplying the positional weight to a tile with value 2
if matrix[i][j] == 2:
total_score += matrix[i][j] * constants.WEIGHT_MATRIX2[i][j]
return total_score
# using weight matrix and penalty score
@staticmethod
def game_score_with_weight_matrix_and_penalty(matrix):
"""
Counts the total score of a matrix/game: using formula of game_score_with_weight_matrix() + penalty score
penalty score =
:param matrix:
:return total_score:
"""
total_score = 0
penalty = 0
neighbourScore = 0
# formula of game_score()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
value_to_multiply_tile = 0
tile_copy = copy.deepcopy(matrix[i][j])
# a tile of worth 0 = an empty tile in game
if tile_copy != 0 or tile_copy != 2:
while tile_copy > 2:
value_to_multiply_tile += 1
tile_copy /= 2
# multiplying by positional weight
total_score += (matrix[i][j] * value_to_multiply_tile) * constants.WEIGHT_MATRIX[i][j]
# multiplying the positional weight to a tile with value 2
if matrix[i][j] == 2:
total_score += matrix[i][j] * constants.WEIGHT_MATRIX[i][j]
if matrix[i][j] == 0:
# freeSpaceScore += 50
pass
# Getting the neighbours tile values of current tile
# If tile at position [i][j] doesn't exist... just skip to the next
try:
neighbourScore += matrix[i][j+1]
except IndexError:
pass
try:
neighbourScore += matrix[i+1][j]
except IndexError:
pass
try:
neighbourScore += matrix[i][j - 1]
except IndexError:
pass
try:
neighbourScore += matrix[i-1][j]
except IndexError:
pass
penalty += absolute(matrix[i][j] - neighbourScore)
return total_score + penalty
@staticmethod
def game_state(matrix):
"""
Made to play even beyond getting a 2048 tile
Gives the current game state:
'lose' = no empty tiles + no merges possible (no moves)
'not over" = no 'lose' or 'win', keep on playing
:param matrix:
:return game_state: 'lose' | 'not over'
"""
"""
# To set winning tile
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 2048:
# return 'win'
# return 'not over'
"""
for i in range(len(matrix)-1):
# intentionally reduced to check the row on the right and below
# more elegant to use exceptions but most likely this will be their solution
for j in range(len(matrix[0])-1):
if matrix[i][j] == matrix[i+1][j] or matrix[i][j+1] == matrix[i][j]:
return 'not over'
for i in range(len(matrix)): # check for any zero entries
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
return 'not over'
for k in range(len(matrix)-1): # to check the left/right entries on the last row
if matrix[len(matrix)-1][k] == matrix[len(matrix)-1][k+1]:
return 'not over'
for j in range(len(matrix)-1): # check up/down entries on last column
if matrix[j][len(matrix)-1] == matrix[j+1][len(matrix)-1]:
return 'not over'
return 'lose'
@staticmethod
def game_state_win_text(matrix):
"""
Gives the current game state:
'win' = gotten a tile of 2048 (NOT USED)
'lose' = no empty tiles + no merges possible (no moves)
'not over" = no 'lose' or 'win', keep on playing
:param matrix:
:return game_state: 'win' | 'lose' | 'not over'
"""
# To set winning tile
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 2048:
return 'win'
# return 'not over'
for i in range(len(matrix) - 1):
# intentionally reduced to check the row on the right and below
# more elegant to use exceptions but most likely this will be their solution
for j in range(len(matrix[0]) - 1):
if matrix[i][j] == matrix[i + 1][j] or matrix[i][j + 1] == matrix[i][j]:
return 'not over'
for i in range(len(matrix)): # check for any zero entries
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
return 'not over'
for k in range(len(matrix) - 1): # to check the left/right entries on the last row
if matrix[len(matrix) - 1][k] == matrix[len(matrix) - 1][k + 1]:
return 'not over'
for j in range(len(matrix) - 1): # check up/down entries on last column
if matrix[j][len(matrix) - 1] == matrix[j + 1][len(matrix) - 1]:
return 'not over'
return 'lose'
@staticmethod
def highest_tile(matrix):
tile = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] > tile:
tile = matrix[i][j]
return tile
@staticmethod
def reverse(matrix):
"""
(NOT USED in this implementation) can reverse previous movements
to do so you need to use history matrix in your parent class...
:param matrix:
:return new:
"""
new = []
for i in range(len(matrix)):
new.append([])
for j in range(len(matrix[0])):
new[i].append(matrix[i][len(matrix[0])-j-1])
return new
# NEXT METHODS ARE USED FOR MATRIX CHANGES... NOT IN SCOPE OF ASSIGNMENT
@staticmethod
def transpose(matrix):
new = []
for i in range(len(matrix[0])):
new.append([])
for j in range(len(matrix)):
new[i].append(matrix[j][i])
return new
@staticmethod
def cover_up(matrix):
new = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
done = False
for i in range(4):
count = 0
for j in range(4):
if matrix[i][j] != 0:
new[i][count] = matrix[i][j]
if j != count:
done = True
count += 1
return new, done
@staticmethod
def merge(matrix):
done = False
for i in range(4):
for j in range(3):
if matrix[i][j] == matrix[i][j+1] and matrix[i][j] != 0:
matrix[i][j] *= 2
score = matrix[i][j] *2
matrix[i][j+1] = 0
done = True
return matrix, done
@staticmethod
def up(game):
# print("up")
# return matrix after shifting up
game = Logic.transpose(game)
game, done = Logic.cover_up(game)
temp = Logic.merge(game)
game = temp[0]
done = done or temp[1]
game = Logic.cover_up(game)[0]
game = Logic.transpose(game)
return game, done
@staticmethod
def down(game):
# print("down")
game = Logic.reverse(Logic.transpose(game))
game, done = Logic.cover_up(game)
temp = Logic.merge(game)
game = temp[0]
done = done or temp[1]
game = Logic.cover_up(game)[0]
game = Logic.transpose( Logic.reverse(game))
return game, done
@staticmethod
def left(game):
# print("left")
# return matrix after shifting left
game, done = Logic.cover_up(game)
temp = Logic.merge(game)
game = temp[0]
done = done or temp[1]
game = Logic.cover_up(game)[0]
return game, done
@staticmethod
def right(game):
# print("right")
# return matrix after shifting right
game = Logic.reverse(game)
game, done = Logic.cover_up(game)
temp = Logic.merge(game)
game = temp[0]
done = done or temp[1]
game = Logic.cover_up(game)[0]
game = Logic.reverse(game)
return game, done
|
985,002 | 50cbfdc495fb477dc4ae7ea8610eba7ca4caa1b6 | import time
import torch
import torch.multiprocessing as mp
import numpy as np
import os
from A3C_Cnt.model import A3C_MLP, A3C_CONV
from A3C_Cnt.env import create_env
from A3C_Cnt.test import test
from A3C_Cnt.train import train
from A3C_Cnt.shared_optim import SharedRMSprop, SharedAdam
os.environ["OMP_NUM_THREADS"] = "1"
class A3C_Cnt_Trainer():
def __init__(self, env_name, random_seed=1, lr=0.0001,
gamma=0.99, tau=0.99, workers=32, num_steps=10,
max_episode_length=100000, shared_optimizer=True, save_max=True,
optimizer=False, model='MLP', stack_frames=1, gpu_ids=-1, amsgrad=True, threshold=None):
self.algorithm_name = 'a3c_cnt'
self.env_name = env_name
self.stack_frames = stack_frames
self.env = env = create_env(self.env_name, self.stack_frames)
self.threshold = threshold
self.random_seed = random_seed
self.shared_optimizer = shared_optimizer
self.optimizer = optimizer
self.lr = lr
self.amsgrad = amsgrad
self.workers = workers
self.gamma = gamma
self.tau = tau
self.num_steps = num_steps
self.model = model
self.save_max = save_max
self.max_episode_length = max_episode_length
self.log_dir = './log/'
self.save_dir = './saved_models/'
self.load = True
if not threshold == None:
self.threshold = threshold
else:
self.threshold = self.env.spec.reward_threshold
if gpu_ids == -1:
self.gpu_ids = [-1]
else:
self.gpu_ids = gpu_ids
torch.cuda.manual_seed(self.random_seed)
if self.random_seed:
print("Random Seed: {}".format(self.random_seed))
self.env.seed(self.random_seed)
torch.manual_seed(self.random_seed)
np.random.seed(self.random_seed)
if self.model == 'MLP':
self.shared_model = A3C_MLP(env.observation_space.shape[0], env.action_space, self.stack_frames)
if self.model == 'CONV':
self.shared_model = A3C_CONV(self.stack_frames, env.action_space)
if self.load:
try:
saved_state = torch.load('{0}{1}.dat'.format(
self.save_dir, self.env_name), map_location=lambda storage, loc: storage)
self.shared_model.load_state_dict(saved_state)
except:
pass
print ("Model loaded")
self.shared_model.share_memory()
if self.shared_optimizer:
if self.optimizer == 'RMSprop':
self.optimizer = SharedRMSprop(self.shared_model.parameters(), lr=self.lr)
if self.optimizer == 'Adam':
self.optimizer = SharedAdam(self.shared_model.parameters(), lr=self.lr, amsgrad=self.amsgrad)
self.optimizer.share_memory()
else:
self.optimizer = None
def train(self):
print ("Training started ... ")
args = {
'env': self.env_name,
'gpu_ids': self.gpu_ids,
'log_dir': self.log_dir,
'seed': self.random_seed,
'stack_frames': self.stack_frames,
'model': self.model,
'save_max': self.save_max,
'save_model_dir': self.save_dir,
'lr': self.lr,
'num_steps': self.num_steps,
'gamma': self.gamma,
'tau': self.tau,
'max_episode_length': self.max_episode_length,
'render': False,
'save_gif': False,
'threshold': self.threshold
}
self.processes = []
p = mp.Process(target=test, args=(args, self.shared_model))
p.start()
self.processes.append(p)
time.sleep(0.1)
for rank in range(0, self.workers):
p = mp.Process(target=train, args=(
rank, args, self.shared_model, self.optimizer))
p.start()
self.processes.append(p)
time.sleep(0.1)
for p in self.processes:
time.sleep(0.1)
p.join()
def test(self):
try:
saved_state = torch.load('{0}{1}.dat'.format(self.save_dir, self.env_name), map_location=lambda storage, loc: storage)
self.shared_model.load_state_dict(saved_state)
except:
print ("Cant load test")
pass
args = {
'env': self.env_name,
'gpu_ids': self.gpu_ids,
'log_dir': self.log_dir,
'seed': self.random_seed,
'stack_frames': self.stack_frames,
'model': self.model,
'save_max': self.save_max,
'save_model_dir': self.save_dir,
'lr': self.lr,
'num_steps': self.num_steps,
'gamma': self.gamma,
'tau': self.tau,
'max_episode_length': self.max_episode_length,
'render': True,
'save_gif': True,
'threshold': self.threshold
}
test(args, self.shared_model) |
985,003 | c5e4ded89f63031ad130eaa53e2977745f0f3d08 | from Tkinter import *
import tkFileDialog
import tkMessageBox
import os
import string
import time
import datetime
import csv
import numpy
import calendar
from scipy import stats
from scipy import optimize
from scipy import linspace
import math
import pylab as P
from matplotlib.backends.backend_pdf import PdfPages
import fnmatch
#confidence interval for linear regression analysis
confidence_interval=90.0
resultsfoldername = 'C:\DATA_TOOLS_UOFS'
def MergeResults():
global sampletimefilename
global mergedresultsfilename
global tempfilename
inputfoldername = 'C:\UserDataiN2O18'
opensampletimefile=open(sampletimefilename, 'rb')
sampletimes = numpy.genfromtxt(opensampletimefile, delimiter=',', dtype=None, names=True)
sampleepochtimes=[];
c1=0
# run through sampletimefile and find first and last date
for row in sampletimes['SampleName']:
samplestartstr=str(sampletimes['Year'][c1])+" "+str(sampletimes['Month'][c1])+" "+str(sampletimes['Day'][c1])+" "+str(sampletimes['Hour'][c1])+" "+str(sampletimes['Minute'][c1]) +" "+str(sampletimes['Second'][c1])
samplestructtime=time.strptime(samplestartstr, "%Y %m %d %H %M %S")
sampleepochtime=calendar.timegm(samplestructtime)
sampleepochtimes.append(sampleepochtime)
c1=c1+1
sampleepochtimes=sorted(sampleepochtimes)
firstsampletime=min(sampleepochtimes)
lastsampletime=max(sampleepochtimes)
print sampleepochtimes
os.chdir(resultsfoldername)
mergedresultsfilename=datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '_merged_results.txt'
openresultsfile=open(mergedresultsfilename, 'wb')
resultswriter = csv.writer(openresultsfile, delimiter='\t')
resultswriter.writerow(['Date','Local_Time','Epoch_Time','N2O_ppmv','d15N', 'd15Nalpha', 'd15Nbeta','d18O', 'CO2','OutletValve'])
os.chdir(inputfoldername)
for dirpath, dirs, files in os.walk(inputfoldername):
for filename in fnmatch.filter(files, '*.dat'):
openinputfile=open(os.path.join(dirpath, filename),'rb')
#print filename
junk,datestr,junk2=filename.split("-",2)
#print datestr
YMD=int(datestr)
yearoffile=YMD/10000
monthoffile=(YMD-yearoffile*10000)/100
dayoffile=YMD-yearoffile*10000-monthoffile*100
filetimestr=str(yearoffile)+" "+str(monthoffile)+" "+str(dayoffile)+" 00 00 00"
filestructtime=time.strptime(filetimestr, "%Y %m %d %H %M %S")
fileepochtime=calendar.timegm(filestructtime)
if fileepochtime > firstsampletime-86400 and fileepochtime < lastsampletime+86400:
fd = numpy.genfromtxt(openinputfile, dtype=None, names=True)
c1=0
for row in fd['TIME']:
temptime,junk=row.split(".",1)
datetimestr=str(fd['DATE'][c1])+" "+str(temptime)
tempstructtime=time.strptime(datetimestr, "%Y-%m-%d %H:%M:%S")
tempepochtime=time.mktime(tempstructtime)
tempepochtime=tempepochtime
tempstructtime=time.localtime(tempepochtime)
gooddate=time.strftime("%Y-%m-%d", tempstructtime)
goodtime=(time.strftime("%H:%M:%S", tempstructtime))
resultswriter.writerow([gooddate,goodtime,fd['EPOCH_TIME'][c1],fd['N2O'][c1],fd['d15N'][c1],fd['d15Nalpha'][c1],fd['d15Nbeta'][c1],fd['d18O'][c1],'0',fd['OutletValve'][c1]])
c1=c1+1
openinputfile.close()
openresultsfile.close()
def askopenresultsfilename():
global sampletimefilename # file with the sample names and times (switcherlog)
global mergedresultsfilename
global tempfilename
# get filename
fileopen_opt = options = {}
options['defaultextension'] = '.csv'
options['filetypes'] = [('csv files', '.csv'),('all files', '.*')]
options['initialdir'] = 'C:\UserDataiN2O18'
options['initialfile'] = 'sample_times_names.csv'
options['parent'] = root
options['title'] = 'Choose a csv file with samplenames and times to open'
sampletimefilename = tkFileDialog.askopenfilename(**fileopen_opt)
# open file
if sampletimefilename:
tempfilename=datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
MergeResults()
opensampletimefile=open(sampletimefilename, 'rb')
os.chdir(resultsfoldername)
openinputfile=open(mergedresultsfilename, 'rb')
resultsfileName=tempfilename + '_results.csv'
openresultsfile=open(resultsfileName, 'wb')
pdffile1 = PdfPages(tempfilename +'_charts_sample_only.pdf')
pdffile2 = PdfPages(tempfilename +'_charts_whole_run.pdf')
sampletimes = numpy.genfromtxt(opensampletimefile, delimiter=',', dtype=None, names=True)
print "amount samples"
print len(sampletimes['SampleName'])
iN2Odata = numpy.genfromtxt(openinputfile, delimiter='\t', dtype=None, names=True)
print "amount datalines"
print len(iN2Odata['Epoch_Time'])
amountrows=len(iN2Odata['Epoch_Time'])
resultswriter = csv.writer(openresultsfile, dialect='excel')
resultswriter.writerow(['SampleName', 'Rundate','Runtime', 'Position', 'N2Omean',
'N2Oslope', 'N2Ointercept','d15Nmean','d15Nslope','d15Nintercept', 'd15Amean',
'd15Aslope', 'd15Aintercept','d15Bmean', 'd15Bslope', 'd15Bintercept',
'd18Omean','d18Ointercept','d18Oslope','CO2mean','OutletValvemean'])
stabilizesec=float(pretimeentry.get())
sampletimesec=float(sampletimeentry.get())*60
# just a counter c1 for keeping track of where we are in the samplelist file
c1=0
# just a counter c2 for keeping track of where we are in the results file
c2=0
for row in sampletimes['SampleName']:
xsec=[]; yN2O=[]; yd15N=[]; yd15A=[]; yd15B=[]; yd18O=[]; yCO2=[]; yOutletValve=[];
xsecs=[]; yN2Os=[]; yd15Ns=[]; yd15As=[]; yd15Bs=[]; yd18Os=[]; yCO2s=[]; yOutletValves=[];
samplestartstr=str(sampletimes['Year'][c1])+" "+str(sampletimes['Month'][c1])+" "+str(sampletimes['Day'][c1])+" "+str(sampletimes['Hour'][c1])+" "+str(sampletimes['Minute'][c1]) +" "+str(sampletimes['Second'][c1])
samplestructtime=time.strptime(samplestartstr, "%Y %m %d %H %M %S")
#sampleepochtime=time.mktime(samplestructtime)
sampleepochtime=calendar.timegm(samplestructtime)
sampleepochtime=sampleepochtime-(int(UTCoffsetentry.get())*3600)
print sampletimes['SampleName'][c1]
print time.strftime("%d %b %Y %H:%M:%S ", samplestructtime)
print sampleepochtime
# discard data before sample is started and stabilized
while sampleepochtime > iN2Odata['Epoch_Time'][c2]:
c2=c2+1
while sampleepochtime+stabilizesec > iN2Odata['Epoch_Time'][c2]:
xsec.append(iN2Odata['Epoch_Time'][c2]-sampleepochtime)
yN2O.append(iN2Odata['N2O_ppmv'][c2])
yd15N.append(iN2Odata['d15N'][c2])
yd15A.append(iN2Odata['d15Nalpha'][c2])
yd15B.append(iN2Odata['d15Nbeta'][c2])
yd18O.append(iN2Odata['d18O'][c2])
yCO2.append(iN2Odata['CO2'][c2])
yOutletValve.append(iN2Odata['OutletValve'][c2])
c2=c2+1
while sampleepochtime+stabilizesec+sampletimesec > iN2Odata['Epoch_Time'][c2]:
xsecs.append(iN2Odata['Epoch_Time'][c2]-sampleepochtime)
yN2Os.append(iN2Odata['N2O_ppmv'][c2])
yd15Ns.append(iN2Odata['d15N'][c2])
yd15As.append(iN2Odata['d15Nalpha'][c2])
yd15Bs.append(iN2Odata['d15Nbeta'][c2])
yd18Os.append(iN2Odata['d18O'][c2])
yCO2s.append(iN2Odata['CO2'][c2])
yOutletValves.append(iN2Odata['OutletValve'][c2])
c2=c2+1
while sampleepochtime+stabilizesec+sampletimesec+120 > iN2Odata['Epoch_Time'][c2]:
xsec.append(iN2Odata['Epoch_Time'][c2]-sampleepochtime)
yN2O.append(iN2Odata['N2O_ppmv'][c2])
yd15N.append(iN2Odata['d15N'][c2])
yd15A.append(iN2Odata['d15Nalpha'][c2])
yd15B.append(iN2Odata['d15Nbeta'][c2])
yd18O.append(iN2Odata['d18O'][c2])
yCO2.append(iN2Odata['CO2'][c2])
yOutletValve.append(iN2Odata['OutletValve'][c2])
c2=c2+1
c2=0
print 'amount readings for this sample:' + str(len(yN2Os))
rundate=time.strftime("%Y%m%d", samplestructtime)
runtime=time.strftime("%H%M%S", samplestructtime)
if len(yN2Os)>2:
N2Omean=numpy.mean(yN2Os)
d15Nmean=numpy.mean(yd15Ns)
d15Amean=numpy.mean(yd15As)
d15Bmean=numpy.mean(yd15Bs)
d18Omean=numpy.mean(yd18Os)
CO2mean=numpy.mean(yCO2s)
OutletValvemean=numpy.mean(yOutletValves)
N2Oslope, N2Ointercept, N2Olinr, N2Olinp, N2Ostd_err = stats.linregress(xsecs,yN2Os)
d15Nslope, d15Nintercept, d15Nlinr, d15Nlinp, d15Nstd_err = stats.linregress(xsecs,yd15Ns)
d15Aslope, d15Aintercept, d15Alinr, d15Alinp, d15Astd_err = stats.linregress(xsecs,yd15As)
d15Bslope, d15Bintercept, d15Blinr, d15Blinp, d15Bstd_err = stats.linregress(xsecs,yd15Bs)
d18Oslope, d18Ointercept, d18Olinr, d18Olinp, d18Ostd_err = stats.linregress(xsecs,yd18Os)
resultswriter.writerow([sampletimes['SampleName'][c1],rundate,runtime, sampletimes['Port'][c1],
N2Omean, N2Oslope, N2Ointercept, d15Nmean, d15Nslope, d15Nintercept, d15Amean, d15Aslope, d15Aintercept, d15Bmean, d15Bslope, d15Bintercept,
d18Omean, d18Ointercept, d18Oslope, CO2mean, OutletValvemean])
#______________ SAMPLE ONLY PDF_______________________________
fig = P.figure(figsize=(16, 16))
xs = numpy.array(xsecs)
y1s = numpy.array(yN2Os)
y2s = numpy.array(yd15As)
y3s = numpy.array(yd15Bs)
y4s = numpy.array(yd18Os)
y5s = numpy.array(yOutletValves)
x = numpy.array(xsec)
y1 = numpy.array(yN2O)
y2 = numpy.array(yd15A)
y3 = numpy.array(yd15B)
y4 = numpy.array(yd18O)
y5 = numpy.array(yOutletValve)
# (m,b)=P.polyfit(xs,y1s,1)
# y12 = P.polyval([m,b],x)
# (m,b)=P.polyfit(xs,y2s,1)
# y22 = P.polyval([m,b],x)
# (m,b)=P.polyfit(xs,y3s,1)
# y32 = P.polyval([m,b],x)
# (m,b)=P.polyfit(xs,y4s,1)
# y42 = P.polyval([m,b],x)
# (m,b)=P.polyfit(xs,y5s,1)
# y52 = P.polyval([m,b],x)
line1=fig.add_subplot(511)
line1.scatter(xs, y1s)
line1.set_xlim(left=0)
line1.grid()
line1.set_title('Sample Name: '+str(sampletimes['SampleName'][c1])+' time: '+time.strftime("%d %b %Y %H:%M:%S ", samplestructtime))
line1.set_ylabel('N2O concentration (ppmv)', color='b')
line2=fig.add_subplot(512)
line2.scatter(xs, y2s)
line2.set_xlim(left=0)
line2.grid()
line2.set_ylabel('d15N alpha', color='b')
line3=fig.add_subplot(513)
line3.scatter(xs, y3s)
line3.set_xlim(left=0)
line3.grid()
line3.set_ylabel('d15N beta', color='b')
line4=fig.add_subplot(514)
line4.scatter(xs, y4s)
line4.set_xlim(left=0)
line4.grid()
line4.set_ylabel('d18O', color='b')
line5=fig.add_subplot(515)
line5.scatter(xs, y5s)
line5.set_xlim(left=0)
line5.grid()
line5.set_ylabel('OutletValve', color='b')
line5.set_xlabel('time (seconds)', color='b')
pdffile1.savefig(dpi=150)
P.close()
#________________________ WHOLE RUN PDF_______________________________
fig = P.figure(figsize=(16, 16))
line1=fig.add_subplot(511)
line1.scatter(xs, y1s)
line1.scatter(x, y1, marker='+')
line1.set_xlim(left=0)
line1.grid()
line1.set_title('Sample Name: '+str(sampletimes['SampleName'][c1])+' time: '+time.strftime("%d %b %Y %H:%M:%S ", samplestructtime))
line1.set_ylabel('N2O concentration (ppmv)', color='b')
line2=fig.add_subplot(512)
line2.scatter(xs, y2s)
line2.scatter(x, y2, marker='+')
line2.set_xlim(left=0)
line2.grid()
line2.set_ylabel('d15N alpha', color='b')
line3=fig.add_subplot(513)
line3.scatter(xs, y3s)
line3.scatter(x, y3, marker='+')
line3.set_xlim(left=0)
line3.grid()
line3.set_ylabel('d15N beta', color='b')
line4=fig.add_subplot(514)
line4.scatter(xs, y4s)
line4.scatter(x, y4, marker='+')
line4.set_xlim(left=0)
line4.grid()
line4.set_ylabel('d18O', color='b')
line5=fig.add_subplot(515)
line5.scatter(xs, y5s)
line5.scatter(x, y5, marker='+')
line5.set_xlim(left=0)
line5.grid()
line5.set_ylabel('OutletValve', color='b')
line5.set_xlabel('time (seconds)', color='b')
pdffile2.savefig(dpi=150)
P.close()
else:
resultswriter.writerow([sampletimes['SampleName'][c1],rundate,runtime, sampletimes['Port'][c1],
'na', 'na', 'na', 'na','na','na','na', 'na', 'na', 'na','na', 'na', 'na','na'])
print 'NO DATA FOUND FOR THIS SAMPLE'
print '----------------------------------------------'
c1=c1+1
openinputfile.close()
openresultsfile.close()
pdffile1.close()
pdffile2.close()
#____________________________________________________________________________________________________________
#--------------------GUI-----------------------------------------------------------------------------------
#_____________________________________________________________________________________________________________
# create a root TkInter frame
root = Tk()
root.title('iN2O results calculator 20150708')
#__________________________________LOGO&TITLE________________________________________
bigtitle = Label(root, anchor=W, font=('times', 20, 'bold'), fg='white',bg='darkgreen', text="iN2O 15N 18O calculator ")
bigtitle.grid(row=0,column=0,columnspan=10,sticky=[N,S,E,W])
#____________________________OPTIONS______________________________________________________
optionstitle = Label(root, anchor=W, font=('times', 12, 'bold'), text="options:")
optionstitle.grid(row=1,column=0, columnspan=3, sticky=[N,S,E,W])
pretimeentrytitle = Label(root, anchor=W, text="stabilizing time to ignore at start (s):")
pretimeentrytitle.grid(row=3,column=0, columnspan=1, sticky=[E])
pretimeentry= Entry(root,width=4)
pretimeentry.insert(0,"270")
pretimeentry.grid(row=3,column=1, columnspan=1, sticky=[W])
sampletimeentrytitle = Label(root, anchor=W, text="sampling time to include (min):")
sampletimeentrytitle.grid(row=4,column=0, columnspan=1, sticky=[E])
sampletimeentry= Entry(root,width=4)
sampletimeentry.insert(0,"5")
sampletimeentry.grid(row=4,column=1, columnspan=1, sticky=[W])
UTCoffsettitle = Label(root, anchor=W, text="Offset local time UTC (SK: -6):")
UTCoffsettitle.grid(row=13,column=0, columnspan=1, sticky=[E])
UTCoffsetentry= Entry(root,width=4)
UTCoffsetentry.insert(0,"-6")
UTCoffsetentry.grid(row=13,column=1, columnspan=1, sticky=[W])
# _______________________CALC INDIVIDUAL FLUXES_____________________________________________
f0=Frame(root,height=1, width=450, bg="grey")
f0.grid(row=24,column=0, columnspan=4, pady=5,sticky=S)
calcfluxtitle = Label(root, anchor=W, font=('times', 12, 'bold'), text="Calculate results")
calcfluxtitle.grid(row=25,column=0, columnspan=4, sticky=[N,S,E,W])
calcfluxhelp = Label(root, anchor=W, text="Open a merged results file")
calcfluxhelp.grid(row=26,column=0, columnspan=4, sticky=[N,S,E,W])
calcfluxhelp2 = Label(root, anchor=W, text="input concentrations in ppmv (=ul/l)")
calcfluxhelp2.grid(row=27,column=0, columnspan=4, sticky=[N,S,E,W])
buttonopenconcfile=Button(root, text='open sampletime file', command=askopenresultsfilename)
buttonopenconcfile.grid(row=28,column=1,columnspan=1,sticky=[W])
calcfluxhelp3 = Label(root, anchor=W, text="results are saved in data_tools_uofs")
calcfluxhelp3.grid(row=29,column=0, columnspan=4, sticky=[N,S,E,W])
# #_____________________________________________________________________________________________________________
root.mainloop( )
|
985,004 | d38bb9dc2d06e9153e0c08d14c394a5ac0f37b73 | import sys
import io
from argument_engine.nd_lookups import *
class NaturalDeduction:
# checked-x:2020-Nov-10
def __init__(self, premise_in_a_list, conclusion_is_a_formula):
self.premise = premise_in_a_list \
if any(isinstance(item, Formula) for item in premise_in_a_list) \
else [SymbolToFunction(pre).transform() for pre in premise_in_a_list]
self.conclusion = conclusion_is_a_formula \
if isinstance(conclusion_is_a_formula, Formula) \
else SymbolToFunction(conclusion_is_a_formula).transform()
def prove(self):
"""
Returns:
result: proof in fitch style.
"""
# comment-x:2020-Nov-10
# 1. what are the meanings of old_stdout and new_stdout? https://docs.python.org/3/library/sys.html
# can you comment a little bit or rename them to be some meaningful names? https://docs.python.org/3/library/io.html
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
# comment-x:2020-Nov-10
# 1. what is the meaning of count? does it mean the length of premise_list? It stands for the number of lines, Including premises and all the proofs.
# can you comment a little bit or rename them to be some meaningful names?
# 2. what is the meaning of listlize? does it mean to change an input object into a list? It generates a list of condidate for different proposes.
# what is the meaning of CON? Also, [self.conclusion] is already a list? why do we have to listlize? For example 'Con' stands for conclusion.
premise_list, count = self.read_premises()
premise_list, count = pre_lookup(premise_list, count)
conclusion_list = listlize([self.conclusion], "CON")
result = conclusion_lookup(premise_list, conclusion_list, count)
sys.stdout = old_stdout
if result:
output = new_stdout.getvalue()
how_many_lines = output.count('\n')
if how_many_lines < 10: # comment-x:2020-Nov-10 why 10 is used? Counts for lines less than 10 are only 1-digit, this is used for format spacing.
output = output.replace('. ', '.')
print(output)
self.print_result()
result = output
else:
print("Can't prove.")
return result
def read_premises(self):
premises = []
counter = 1
for item in self.premise:
if isinstance(item, str):
premises.append(Atom(item))
else:
premises.append(item)
print(f"{counter}.", end="")
print(" " if counter < 10 else "", end="")
print(f"| Premise {item}")
counter += 1
return premises, counter
def print_result(self):
print(", ".join(map(str, self.premise)), end=" ")
print(f"|- {self.conclusion}")
if __name__ == "__main__":
# test_str1 = "(A>B)>((B>C)>(A>C))"
# test_str2 = "~(~A|(~A|B))"
# test_str3 = "~(A|B)&~(B|C)"
# test_str4 = "~(~(~A))"
# str_premise = ["p>((q|~q)>(r|s))", "s>~(t|~t)"]
# str_conclusion = "p>r"
# premise = ['h', 'r>~h']
# conclusion = 'r>~a'
# anlp = NaturalDeduction(premise, conclusion).prove()
# premise = [If("p", If(Or("q", Not("q")), Or("r", "s"))), If("s", Not(Or("t", Not("t"))))]
# conclusion = If("p", "r")
#
# premise = ['i', 'h>a', 'e>a', 'i>a', 'h', 'l>a', 'e', 'l']
# conclusion = '~a>~d'
#
# premise = ['r', 'h']
# conclusion = '~(r>~h)'
#
# NaturalDeduction(premise, conclusion).prove()
#
# premise = [Not(Not(Not(Not("p"))))]
# conclusion = "p"
# NaturalDeduction(premise, conclusion).prove()
#
# premise = [Or("p", Or("q", "r"))]
# conclusion = Or(Or("p", "q"), "r")
# premise = ['p>r', 'p>q']
# conclusion = 'p>(q&r)'
# NaturalDeduction(premise,conclusion).prove()
pass
|
985,005 | b31bd82c9301386b2d4a2045736363e0e42c3996 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from non_ai.generic_functions import *
import math
class Markowitz:
def __init__(self):
self.codes_df = pd.DataFrame() # Train DataFrame
self.codes = [] # Portfolio Asset Codes
self.sim_df = pd.DataFrame() # DataFrame of Simulations
self.df_sharpe = pd.DataFrame() # Sharpe Ratio
self.df_min_vol = pd.DataFrame() # Minimum Volatility
self.df_max_ret = pd.DataFrame() # Maximum Return
self.df_max_div = pd.DataFrame() # Maximum Diversity
self.weighted_returns_sharpe = pd.DataFrame() # Weighted Returns (TRAIN)
self.weighted_returns_min_vol = pd.DataFrame()
self.weighted_returns_max_ret = pd.DataFrame()
self.weighted_returns_max_div = pd.DataFrame()
self.port_ret_sharpe = pd.DataFrame() # Portfolio Returns (TRAIN)
self.port_ret_min_vol = pd.DataFrame()
self.port_ret_max_ret = pd.DataFrame()
self.port_ret_max_div = pd.DataFrame()
self.port_ret_test_sharpe = pd.DataFrame() # Portfolio Returns (TEST)
self.port_ret_test_min_vol = pd.DataFrame()
self.port_ret_test_max_ret = pd.DataFrame()
self.port_ret_test_max_div = pd.DataFrame()
self.cumulative_ret_sharpe = pd.DataFrame() # Cumulative Returns (TRAIN)
self.cumulative_ret_min_vol = pd.DataFrame()
self.cumulative_ret_max_ret = pd.DataFrame()
self.cumulative_ret_max_div = pd.DataFrame()
self.cumulative_ret_test_sharpe = pd.DataFrame() # Cumulative Returns (TEST)
self.cumulative_ret_test_min_vol = pd.DataFrame()
self.cumulative_ret_test_max_ret = pd.DataFrame()
self.cumulative_ret_test_max_div = pd.DataFrame()
self.returns_results_table = pd.DataFrame() # Table of Results
def Train(self, codes_df, codes, simulation_num=100000, balance=1):
# Get Mean and Standard Deviation Moments for each Stock Item
for asset in codes:
# Get Mean
codes_df[str(asset)]["Mean"] = mean(codes_df[str(asset)]["Log Return"])
# Get Variance
codes_df[str(asset)]["Std"] = stddev(codes_df[str(asset)]["Log Return"])
self.codes_df = codes_df
self.codes = codes
# Simulation Phase =====================================
print("Creating simulations...")
# Set seed for reproducibility
np.random.seed(0)
# Randomize a Numpy Array with 2000 x n array
rand_nos = np.random.rand(simulation_num, len(codes))
# Create Randomly simulated weights
simulated_portfolio_weights = rand_nos.transpose() / rand_nos.sum(axis=1) # This is a 2000 x 1 array
simulated_portfolio_weights = simulated_portfolio_weights.transpose() # This is now a n x 2000 array
# Put in a DataFrame
df = pd.DataFrame(simulated_portfolio_weights, columns=codes)
# Add Expected Return and Volatility in DataFrame =========
df['Expected Return'] = PortfolioExpectedReturn(codes_df, simulated_portfolio_weights, codes)
df['Volatility'] = PortfolioVolatility(codes_df, simulated_portfolio_weights, codes)
df['Diversity Ratio'] = PortfolioDiversityRatio(codes_df, simulated_portfolio_weights, codes)
self.sim_df = df
# Locate Positions =========================================
print("Locating portfolio positions...")
# locate position of portfolio with lowest volatility
min_vol_port = df.iloc[df['Volatility'].idxmin()]
# locate position of portfolio with highest Sharpe Ratio
max_sharpe = df.iloc[(df['Expected Return'] / df['Volatility']).idxmax()]
# locate position of portfolio with highest Expected Return
max_ret_port = df.iloc[df['Expected Return'].idxmax()]
# locate position of portfolio with greatest diversification - (from medium link)
max_div_port = df.iloc[df['Diversity Ratio'].idxmax()]
# Get Weights For Positions Found ==========================
print("Getting weights for portfolios located...")
# Get weights used for highest sharpe ratio
mask = (df['Expected Return'].values == max_sharpe['Expected Return']) & (
df['Volatility'].values == max_sharpe['Volatility'])
self.df_sharpe = df.loc[mask]
self.df_sharpe.reset_index(inplace=True)
# Get weights used for lowest variance
mask1 = (df['Expected Return'].values == min_vol_port['Expected Return']) & (
df['Volatility'].values == min_vol_port['Volatility'])
self.df_min_vol = df.loc[mask1]
self.df_min_vol.reset_index(inplace=True)
# Get weights used for maximum expected return
mask2 = (df['Expected Return'].values == max_ret_port['Expected Return']) & (
df['Volatility'].values == max_ret_port['Volatility'])
self.df_max_ret = df.loc[mask2]
self.df_max_ret.reset_index(inplace=True)
# Get weights used for maximum diversification
mask3 = (df['Expected Return'].values == max_div_port['Expected Return']) & (
df['Volatility'].values == max_div_port['Volatility'])
self.df_max_div = df.loc[mask3]
self.df_max_div.reset_index(inplace=True)
# Get Weighted Returns =====================================
print("Calculating weighted returns...")
self.weighted_returns_sharpe = pd.DataFrame()
self.weighted_returns_min_vol = pd.DataFrame()
self.weighted_returns_max_ret = pd.DataFrame()
self.weighted_returns_max_div = pd.DataFrame()
for asset in self.codes:
self.weighted_returns_sharpe[str(asset)] = (
self.codes_df[str(asset)]['Daily Return'] * self.df_sharpe[str(asset)][0])
self.weighted_returns_min_vol[str(asset)] = (
self.codes_df[str(asset)]['Daily Return'] * self.df_min_vol[str(asset)][0])
self.weighted_returns_max_ret[str(asset)] = (
self.codes_df[str(asset)]['Daily Return'] * self.df_max_ret[str(asset)][0])
self.weighted_returns_max_div[str(asset)] = (
self.codes_df[str(asset)]['Daily Return'] * self.df_max_div[str(asset)][0])
# Sum of Weighted Returns ==================================
print("Calculating sum of weighted returns...")
self.port_ret_sharpe = self.weighted_returns_sharpe.sum(axis=1) # axis = 1, means count rows
self.port_ret_min_vol = self.weighted_returns_min_vol.sum(axis=1)
self.port_ret_max_ret = self.weighted_returns_max_ret.sum(axis=1)
self.port_ret_max_div = self.weighted_returns_max_div.sum(axis=1)
# Cumulative Returns, Starting with a balance ===============
print("Calculating cumulative returns...")
self.cumulative_ret_sharpe = (balance - 1) + (self.port_ret_sharpe + 1).cumprod()
self.cumulative_ret_min_vol = (balance - 1) + (self.port_ret_min_vol + 1).cumprod()
self.cumulative_ret_max_ret = (balance - 1) + (self.port_ret_max_ret + 1).cumprod()
self.cumulative_ret_max_div = (balance - 1) + (self.port_ret_max_div + 1).cumprod()
print("Done!")
def PlotSimulations(self):
self.sim_df.plot(x='Volatility', y='Expected Return', style='o', title='Volatility vs. Expected Return')
self.sim_df.plot(x='Volatility', y='Diversity Ratio', style='o', title='Volatility vs. Diversity Ratio')
self.sim_df.plot(x='Diversity Ratio', y='Expected Return', style='o', title='Diversity Ratio vs. Expected Return')
def PlotPortfolioPositions(self):
df = self.sim_df
# locate position of portfolio with lowest volatility
min_vol_port = df.iloc[df['Volatility'].idxmin()]
# locate position of portfolio with highest Sharpe Ratio
max_sharpe = df.iloc[(df['Expected Return'] / df['Volatility']).idxmax()]
# locate position of portfolio with highest Expected Return
max_ret_port = df.iloc[df['Expected Return'].idxmax()]
# locate position of portfolio with greatest diversification - (from medium link)
max_div_port = df.iloc[df['Diversity Ratio'].idxmax()]
# create scatter plot coloured by VaR
plt.subplots(figsize=(15, 10))
plt.scatter(df['Volatility'], df['Expected Return'], c=df['Volatility'], cmap='RdYlBu')
plt.xlabel('Volatility')
plt.ylabel('Expected Return')
plt.title('Monte-carlo Simulation - Showing Markowitz Efficient Frontier')
plt.colorbar()
plt.margins(x=-0.45, y=-0.35)
plt.xlim(0, 0.00015)
# plot yellow star to highlight position of lowest variance portfolio
plt.scatter(min_vol_port['Volatility'], min_vol_port['Expected Return'], marker=(5, 1, 0), color='y', s=500)
# plot blue star to highlight position of highest Sharpe Ratio portfolio
plt.scatter(max_sharpe['Volatility'], max_sharpe['Expected Return'], marker=(5, 1, 0), color='b', s=500)
# plot green star to highlight position of highest return portfolio
plt.scatter(max_ret_port['Volatility'], max_ret_port['Expected Return'], marker=(5, 1, 0), color='g', s=500)
# plot red star to highlight position of highest portfolio diversity ratio
plt.scatter(max_div_port['Volatility'], max_div_port['Expected Return'], marker=(5, 1, 0), color='r', s=500)
plt.show()
def Test(self, test_df, balance=1):
print("Calculating returns...")
self.port_ret_test_sharpe = GeneratePortfolioReturns(test_df, self.df_sharpe, self.codes)
self.port_ret_test_min_vol = GeneratePortfolioReturns(test_df, self.df_min_vol, self.codes)
self.port_ret_test_max_ret = GeneratePortfolioReturns(test_df, self.df_max_ret, self.codes)
self.port_ret_test_max_div = GeneratePortfolioReturns(test_df, self.df_max_div, self.codes)
print("Calculating cumulative returns...")
self.cumulative_ret_test_sharpe = (balance - 1) + (self.port_ret_test_sharpe + 1).cumprod()
self.cumulative_ret_test_min_vol = (balance - 1) + (self.port_ret_test_min_vol + 1).cumprod()
self.cumulative_ret_test_max_ret = (balance - 1) + (self.port_ret_test_max_ret + 1).cumprod()
self.cumulative_ret_test_max_div = (balance - 1) + (self.port_ret_test_max_div + 1).cumprod()
print("Done!")
def PlotCumulativeReturns(self, plot="Train"):
# PLOT
if plot == "Train":
plt.figure(figsize=(8, 6), dpi=100)
plt.plot(self.cumulative_ret_sharpe, label="Maximum Sharpe Ratio")
plt.plot(self.cumulative_ret_min_vol, label="Minimum Volatility")
plt.plot(self.cumulative_ret_max_ret, label="Maximum Expected Return")
plt.plot(self.cumulative_ret_max_div, label="Maximum Diversity Ratio")
plt.title('Portfolio Cumulative Returns (TRAIN)')
plt.xlabel('Date')
plt.ylabel('Cumulative Returns')
else:
plt.figure(figsize=(8, 6), dpi=100)
plt.plot(self.cumulative_ret_test_sharpe, label="Maximum Sharpe Ratio")
plt.plot(self.cumulative_ret_test_min_vol, label="Minimum Volatility")
plt.plot(self.cumulative_ret_test_max_ret, label="Maximum Expected Return")
plt.plot(self.cumulative_ret_test_max_div, label="Maximum Diversity Ratio")
plt.title('Portfolio Cumulative Returns (TEST)')
plt.xlabel('Date')
plt.ylabel('Cumulative Returns')
plt.legend()
plt.show()
def ReturnsResultsTable(self, plot="Train"):
df = pd.DataFrame(columns=['Portfolio', 'Average Daily Yield', 'Sharpe Ratio', 'Maximum Drawdown'])
if plot == "Train":
df = self.ReturnsResultsTableRow(df, 'Maximum Sharpe Ratio', self.port_ret_sharpe)
df = self.ReturnsResultsTableRow(df, 'Minimum Volatility', self.port_ret_min_vol)
df = self.ReturnsResultsTableRow(df, 'Maximum Expected Return', self.port_ret_max_ret)
df = self.ReturnsResultsTableRow(df, 'Maximum Diversity Ratio', self.port_ret_max_div)
else:
df = self.ReturnsResultsTableRow(df, 'Maximum Sharpe Ratio', self.port_ret_test_sharpe)
df = self.ReturnsResultsTableRow(df, 'Minimum Volatility', self.port_ret_test_min_vol)
df = self.ReturnsResultsTableRow(df, 'Maximum Expected Return', self.port_ret_test_max_ret)
df = self.ReturnsResultsTableRow(df, 'Maximum Diversity Ratio', self.port_ret_test_max_div)
self.returns_results_table = df
def ReturnsResultsTableRow(self, df, name, returns):
df = df.append({'Portfolio': name,
'Average Daily Yield': round(float(np.mean(returns) * 100), 3),
'Sharpe Ratio': round(float(
np.mean(returns) / np.std(returns) * np.sqrt(252)), 3),
'Maximum Drawdown': round(float(max(
1 - min(returns) / np.maximum.accumulate(returns))), 3)
}, ignore_index=True)
return df
|
985,006 | 4273c00a4980cb0e0e99fde86bcad4fdf3b725f1 | # -*- coding: utf-8 -*-
# @Time : 2019/5/21,021 9:31
# @Author : 徐缘
# @FileName: v1.7ceshi.py
# @Software: PyCharm
"""
大会战 采集部分
从dahuizhan.xlsx上次记录至昨日。
三部分:
1、SQM
2、ELK
3、普天拨测
打包命令: pyinstaller -F -i img\dahuizhan.ico web\llz_indicators\dahuizhan\dahuizhan.py
"""
import web.webCrawler.webcrawler as ww
import web.webCrawler.login as wl
import urllib.request
import urllib.parse
import json
import xlrd
from bs4 import BeautifulSoup
import datetime
from xlutils.copy import copy
import time
import web.webCrawler.login as wl
import sys
import ssl
# Constant
filename = 'dahuizhan.xls' # 文件名
companies = ['huawei', 'hy', 'fonsview', 'zte'] # 平面
query_curl = { # elk_search query中语句
"5xx": {"wildcard": {"httpstatus": "5??"}},
"all": {"wildcard": {"httpstatus": "*"}}
# "all": {"wildcard": {"httpstatus": "???"}}
}
def sqm_nei(cookie, day_sqm):
startTime = day_sqm.strftime('%Y-%m-%d')
# 业务关键指标(首缓冲时延(ms),EPG响应成功率(%),在线播放成功率(%),卡顿时间占比(%),wifi接入占比(%),卡顿次数占比(%)
# EPG响应时延(ms),EPG响应总次数,EPG响应成功次数
url = 'http://117.144.107.165:8088/evqmaster/report/reportaction!returnMiguData.action'
form = {
'paramData': '{\"secFrom\": \"' + startTime + ' 00:00:00\", \"secTo\": \"' + startTime + ' 00:00:00\", \"location\"'
': 4, \"dimension\": \"platform\", \"platform\": \"\", \"tType\": 2, \"isMigu\": false, \"isMiguShanxi'
'\": false, \"bIncludeShanxi\": false}'
}
f = ww.post_web_page(url, form, cookie)
tmp_dict = json.loads(f)
sqm_dict = json.loads(tmp_dict['resultData'])
# print(sqm_dict.keys())
fault = {0, 1, 2, 3, 4, 5, 6, 7, 8, 14, 15} # 14 就是 9 / 15 就是10
list_count = list()
list_total = list()
for i in fault:
count_tmp = 0
total_tmp = 0
for item in sqm_dict['vod']:
if item['FaultID'] == i:
count_tmp += item['Count']
total_tmp += item['Total']
list_count.append(count_tmp)
list_total.append(total_tmp)
# print(list_total)
# print(list_count)
# 先数据全部汇聚好,然后按公式算就成了 来来来欣赏一下飞思达的英文
# Latency 首缓冲时延
# DownSpeed 下载速率
# EPGLoadSuc EPG响应成功率
# OnlineSucPlay 播放成功率
# timeProprot 卡顿时长占比
# wifiAcount wifi接入占比
# UnitTCaton 单位时间(h)卡顿次数
# CatonAcount 卡顿次数占比(%)
# EPGLoadDelay EPG响应时延(ms)
# EPGRequests EPG响应总次数
# EPGReponses EPG响应成功次数
# LoginSuc 登录成功率(%)
res = list()
Latency = round(list_total[4]/list_count[4]/1000, 2)
# DownSpeed = round(list_total[9]*8/list_total[6]/1024/1024, 2)
EPGLoadSuc = round(sqm_dict['epg'][0]['Responses'] / sqm_dict['epg'][0]['Requests'] * 100, 2)
OnlineSucPlay = round(list_total[8] / list_total[7] * 100, 2)
timeProprot = round(list_total[2] / 1000000 / list_total[6] * 100, 2)
access_wifi = 0
access_total = 0
for item in sqm_dict['net'][0]['DistStreamDevice'].split('@'):
tmp = item.split('#')
if tmp[0] == '2':
access_wifi = int(tmp[1])
access_total += int(tmp[1])
# wifiAcount = round(access_wifi / access_total * 100, 2)
UnitTCaton = round(list_total[3]*3600/list_total[6], 2)
CatonAcount = round(list_total[10]*100/list_total[7], 2)
EPGLoadDelay = round(sqm_dict['epg'][0]['TotEpgRspTime'] / sqm_dict['epg'][0]['CntEpgRspTime'] / 1000, 2)
EPGRequests = sqm_dict['epg'][0]['Requests']
EPGReponses = sqm_dict['epg'][0]['Responses']
# LoginSuc = round(sqm_dict['epgSuc'][0]['Responses'] / sqm_dict['epgSuc'][0]['Requests'] * 100, 2)
# res.extend([Latency, DownSpeed, EPGLoadSuc, OnlineSucPlay, timeProprot, wifiAcount, UnitTCaton, CatonAcount,
# EPGLoadDelay, EPGRequests, EPGReponses, LoginSuc])
res.extend([Latency, EPGLoadSuc, OnlineSucPlay, timeProprot, UnitTCaton, CatonAcount,
EPGLoadDelay, EPGRequests, EPGReponses])
# 用户卡顿分布
url = 'http://117.144.107.165:8088/evqmaster/report/reportaction!returnKpiData.action'
form = {
'paramData': '{\"location\": 4, \"secFrom\": \"' + startTime + ' 00:00:00\", \"secTo\": \"' + startTime + ' 00:00:00\", \"dimension\": \"1\", \"idfilter\": \"4\", \"type\": \"usercard\", \"dataType\": \"1\"}'
}
f = ww.post_web_page(url, form, cookie)
tmp_index = f.find('GrnDevices')
f = f[tmp_index:]
tmp_normal_device = f[f.find(':') + 1:f.find(',')]
tmp_index = f.find('RedDevices')
f = f[tmp_index:]
tmp_red_device = f[f.find(':') + 1:f.find(',')]
tmp_index = f.find('YlwDevices')
f = f[tmp_index:]
tmp_ylw_device = f[f.find(':') + 1:f.find(',')]
tmp_index = f.find('BlueDevices')
f = f[tmp_index:]
f = f[f.find(':') + 1:]
tmp_blue_device = ''
for i in f:
if i.isdigit():
tmp_blue_device = tmp_blue_device + i
else:
continue
laggy_device_ratio = 100 - (float(tmp_normal_device) / (
float(tmp_normal_device) + float(tmp_blue_device) + float(tmp_ylw_device) + float(
tmp_red_device)) * 100)
res.append(round(laggy_device_ratio, 2))
# SQM峰值流用户数
# 系统特性 取某一日的值时需要始末日期一致
form = {
'paramData': '{\"location\": 4, \"secFrom\": \"' + startTime + ' 00:00:00\", \"secTo\": \"' + startTime + ' 00:00:00\", \"dimension\": \"1\",\"idfilter\": \"4\", \"type\": \"activeuser\", \"dataType\": \"1\"}'
}
url = 'http://117.144.107.165:8088/evqmaster/report/reportaction!returnKpiData.action'
f = ww.post_web_page(url, form, cookie)
tmp_dict = json.loads(f)
sqm_dict = json.loads(tmp_dict['resultData'])
res.extend([sqm_dict[0]['maxActiveSTBs'], sqm_dict[0]['maxStreamSTBs'], sqm_dict[0]['TotalDevices']])
return res
# # 卡顿时间占比 = 卡顿时长 / 下载时长
# print('卡顿时间占比:', round(list_total[2] / 1000000 / list_total[6] * 100, 2))
#
# # 卡顿次数占比 = 15 / 7 * 100
# lag_times = list_total[10]
# total_times = list_total[7] # 播放节目数
# print('卡顿次数占比:', round(lag_times / total_times * 100, 2))
#
# # 首帧缓冲时长
# print('首帧缓冲时长(S):', round(list_total[4] / list_count[4] / 1000000, 2)) # 秒
#
# # 电视播放成功率
# print('电视播放成功率', round(list_total[8] / list_total[7] * 100, 2))
#
# # EPG加载成功率 加载成功率 和 登录成功率
# # print(sqm_dict['epg'])
# # print(sqm_dict['epgSuc'])
# print('EPG加载成功率', round(sqm_dict['epg'][0]['Responses'] / sqm_dict['epg'][0]['Requests'] * 100, 2))
#
# # 卡顿时间占比, 首帧缓冲时长, 电视播放成功率, EPG加载成功率
# return (round(lag_duration / total_duration * 100, 2),
# round(list_total[4] / list_count[4] / 1000000, 2), round(list_total[8] / list_total[7] * 100, 2),
# round(sqm_dict['epg'][0]['Responses'] / sqm_dict['epg'][0]['Requests'] * 100, 2))
def elk_query(day_elk):
def requ_post(u, form):
json_info = bytes(json.dumps(form), 'utf8')
header = {
'Content-Type': 'application/json',
'Cookie': 'searchguard_authentication=Fe26.2**58b9b06dca6c80d397da9f2a8de3d0e8c443a22f743e68c5d6e19ab6f83722e0*ImAxfRbmIEfZRFUIXslNxw*pn8F9R3Vhjz5x9wqBEQZjGGHQTmIuX9dqLRtpRn2xF6ViIezM6rplIEvy7LhmACuNDc6j7Wc2lkd3tZUMEWD7Sp1eYTi_XDAH1kkdc-vdK0Aa_R8tdHBJzx4OLeCntAY-HvbIfTCE8GnkwIP_GSR8HRtDnUkLGRL0pak4uVN-VTz4-6Q3v8NHYwRcaPkm2bILc9hy8adTbcwceXAD0gqdk5U2eCsl4ZkxMRr0IgmHbA**79a781f9685e2c7d5be49c91586db4b783c442b59a1e9cca85f27fa28bd33f9c*4kAyVrI8pN99pW1mR-QwamTa-LhCu2qtWQHhqWKZYhs; searchguard_storage=Fe26.2**4885c3cdb8c5a24bd52729c23dd7983529a89eea2b837f47c1e54472d9817b3a*WOymcqiVRHOMSzZS4PpXTw*weosPbNsIa2SOFOlUioSCll1O4oowQq-pxSW2ukeUtVllcOSqnN2_sSoaCmFaGyrUbxbMbZ4iO2EU6bOU-dCO_QfCzlCfnSoygLh4edUjvBPzVlmsKMm7APwuy93bdfi2FjFWf5kEym7G9GXAB0RT1IsGH5gNGXy5FtgXQmeTjJrZO3ldHtM1gQzPMJaPJlsGAEXKdsOWlyrqntbsNFWnuv8NY8I990EGqYxD6v4x9hwmjWSMCAp7jR63W8B5ff0**16b78b18e1c284718efa4ccfa7d7ae30783c43fc18c8afd273a8b5aefe42b858*lg0qzB3LqQVtj8F8cQ9taajzNdYwxUjtl8gMdR-NPW8',
'kbn-version': '6.6.1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
request = urllib.request.Request(u, headers=header, data=json_info)
response = urllib.request.urlopen(request)
f = response.read().decode("utf8")
return json.loads(f)
yesterday = day_elk.strftime('%Y.%m.%d')
tmp_content = list()
for cj in companies: # 厂家
url = 'http://117.144.106.34:5601/api/console/proxy?path=%2F{}_sh{}%2F_count&method=POST'.format(cj, yesterday)
for status in query_curl.keys():
my_form = {
"query": query_curl[status]
}
print(url, my_form)
dict_tmp = requ_post(url, my_form)
tmp_content.append(dict_tmp['count'])
'''流量查询'''
for cj in companies: # 厂家
url = 'http://117.144.106.34:5601/api/console/proxy?path=%2F{}_sh{}%2F_search%3Fsize%3D0&method=POST' \
.format(cj, yesterday)
my_form = {
"aggs": {
"group_by_time": {
"date_histogram": {
"field": "@timestamp",
"interval": "5m",
"format": "yyyy-MM-dd HH:mm"
},
"aggs": {
"data_rate": {
"sum": {
"field": "filesize"
}
}
}
}
}
}
print(url, my_form)
dict_tmp = requ_post(url, my_form)
# print(dict_tmp)
elk_rate_dict = dict_tmp['aggregations']['group_by_time']['buckets']
a = lambda x: x['data_rate']['value']
tmp_content.append(round(max(list(map(a, [x for x in elk_rate_dict]))) / 1024 / 1024 / 1024 / 300 * 8, 2))
tmp_content.append(round(sum(list(map(a, [x for x in elk_rate_dict])))
/ len(list(map(a, [x for x in elk_rate_dict]))) / 1024 / 1024 / 1024 / 300 * 8, 2))
return tmp_content
def putian_query(day_putian):
startTime = day_putian.strftime('%Y-%m-%d')
endTime = (day_putian + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
# http://10.221.17.131:9091/report/bizman/common/result.jsp?timename=jiakuandahuizhan
# http://10.221.17.131:9091/report/bizman/common/report.jsp?timename=jiakuandahuizhan&reportType=&cac=5614146&iam=15614135&timename=jiakuandahuizhan&change=true&sid=null&reportFileName=1552455614217&iam=15614135&page=null&pageSizeCus=null&timetype=day&datefromto=2019-04-03~2019-04-04&bar=true
# url = 'http://10.221.17.131:9091/report/bizman/common/report.jsp?timename=jiakuandahuizhan&reportType=&cac=56141' \
# '46&iam=15614135&timename=jiakuandahuizhan&change=true&sid=null&reportFileName=1552455614217&iam=15614135&' \
# 'page=null&pageSizeCus=null&timetype=day&datefromto={}~{}&bar=true'.format(startTime, endTime)
# url = 'http://10.221.17.131:9091/report/bizman/common/report.jsp?timename=jiakuandahuizhan&reportType=&cac=2762197&iam=12675442&timename=jiakuandahuizhan&change=true&sid=null&reportFileName=1554792716199&u=r&page=null&pageSizeCus=null&timetype=customday&datefromto=2019-04-02~2019-04-02'
url = 'http://10.221.17.131:9091/report/bizman/common/report.jsp?timename=jiakuandahuizhan&reportType=&cac=2762197&iam=12675442&timename=jiakuandahuizhan&change=true&sid=null&reportFileName=1554792716199&u=r&page=null&pageSizeCus=null&timetype=customday&datefromto={}~{}'.format(startTime, startTime)
print(url)
f = ww.get_web_page(url)
soup = BeautifulSoup(f, "html.parser")
# print(soup.prettify()) # 这很beautiful
# print(soup.find(attrs={'id': 'jiakuandahuizhan'}).prettify())
res = list()
for table_rows in soup.find(attrs={'id': 'jiakuandahuizhan'}).find_all('tr'):
res.append(table_rows.find_all('td')[2].find('span').get_text())
return res
if __name__ == '__main__':
elk_cookie = wl.elk()
ssl._create_default_https_context = ssl._create_unverified_context
form = {
"query": {'match_all': {}}
}
json_info = bytes(json.dumps(form), 'utf8')
header = {
'Content-Type': 'application/json',
'Cookie': elk_cookie,
'kbn-version': '6.6.1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
request = urllib.request.Request('https://117.144.106.34:5601/api/console/proxy?path=_search&method=POST', headers=header, data=json_info)
response = urllib.request.urlopen(request)
f = response.read().decode("utf8")
print(f)
|
985,007 | 5d90faf6105bdc39eb0eb5d7633c7380467a3024 |
import random
import numpy as np
from FactoredStruct import FactoredStruct
from FactoredMDP import FactoredMDP
class SmallTaxiDomain( FactoredMDP ):
def __init__( self ):
super().__init__( 4, 2, [3, 3, 5, 3, 6] )
# create reward structure
scope = np.array( [0, 1, 2, 4] )
params = np.zeros( self.nelements( scope ) )
for sa in range( np.size( params, 0 ) ):
state = self.decode( sa, scope )
if ( state[3] == 4 and
not state[2] == 4 and
not np.array_equal( state, [0, 2, 0, 4] ) and
not np.array_equal( state, [2, 0, 1, 4] ) and
not np.array_equal( state, [0, 0, 2, 4] ) ):
params[sa] = -9
if ( state[3] == 5 and
not state[2] == 4 and
not np.array_equal( state, [0, 2, 3, 5] ) and
not np.array_equal( state, [2, 0, 3, 5] ) and
not np.array_equal( state, [0, 0, 3, 5] ) ):
params[sa] = -9
self.rewardstruct[0] = FactoredStruct( scope, params, np.zeros( ( self.nstates * self.nactions, np.size( params, 0 ) ) ) )
# normalize in [0,2]
self.rewardstruct[0].params = ( self.rewardstruct[0].params + 9 ) / 15
self.rewardstruct[1] = FactoredStruct( np.array( [2] ), -np.ones( 5 ), np.zeros( ( self.nstates * self.nactions, 5 ) ) )
self.rewardstruct[1].params[4] = 20
# normalize in [0,2]
self.rewardstruct[1].params = ( self.rewardstruct[1].params + 1 ) / 15
# create transition structure
# x-coord
scope = np.array( [0, 1, 2, 4] )
params = np.zeros( ( self.nelements( scope ), 3 ) )
for sa in range( np.size( params, 0 ) ):
state = self.decode( sa, scope )
if state[2] == 4:
params[sa, 0] = 0.3333
params[sa, 1] = 0.3333
params[sa, 2] = 0.3334
elif ( state[3] == 2 and
not state[0] == 2 and
not np.array_equal( state[[0,1]], [0, 0] ) and
not np.array_equal( state[[0,1]], [0, 2] ) and
not np.array_equal( state[[0,1]], [1, 0] ) ):
params[sa, state[0]] = 0.2
params[sa, state[0] + 1] = 0.8
elif ( state[3] == 3 and
not state[0] == 0 and
not np.array_equal( state[[0,1]], [1, 0] ) and
not np.array_equal( state[[0,1]], [1, 2] ) and
not np.array_equal( state[[0,1]], [2, 0] ) ):
params[sa, state[0]] = 0.2
params[sa, state[0] - 1] = 0.8
else:
params[sa, state[0]] = 1;
self.transitionstruct[0] = FactoredStruct( scope, params, np.zeros( ( self.nstates * self.nactions, np.size( params, 0 ) ) ) )
# y-coord
scope = np.array( [1, 2, 4] )
params = np.zeros( ( self.nelements( scope ), 3 ) )
for sa in range( np.size( params, 0 ) ):
state = self.decode( sa, scope )
if state[1] == 4:
params[sa, 0] = 0.3333
params[sa, 1] = 0.3333
params[sa, 2] = 0.3334
elif state[2] == 0 and state[0] < 2:
params[sa, state[0]] = 0.2
params[sa, state[0] + 1] = 0.8
elif state[2] == 1 and state[0] > 0:
params[sa, state[0]] = 0.2
params[sa, state[0] - 1] = 0.8
else:
params[sa, state[0]] = 1
self.transitionstruct[1] = FactoredStruct( scope, params, np.zeros( ( self.nstates * self.nactions, np.size( params, 0 ) ) ) )
# passenger
scope = np.array( [0, 1, 2, 3, 4] )
params = np.zeros( ( self.nelements( scope ), 5 ) )
for sa in range( np.size( params, 0 ) ):
state = self.decode( sa, scope )
if state[2] == 4:
params[sa, 0] = 0.3333
params[sa, 1] = 0.3333
params[sa, 2] = 0.3334
elif ( np.array_equal( state[[0, 1, 2, 4]], [0, 2, 0, 4] ) or
np.array_equal( state[[0, 1, 2, 4]], [2, 0, 1, 4] ) or
np.array_equal( state[[0, 1, 2, 4]], [0, 0, 2, 4] ) ):
params[sa, 3] = 1
elif np.array_equal( state[[0, 1, 2, 4]], [0, 2, 3, 5] ):
if state[3] == 0:
params[sa, 4] = 1
else:
params[sa, 0] = 1
elif np.array_equal( state[[0, 1, 2, 4]], [2, 0, 3, 5] ):
if state[3] == 1:
params[sa, 4] = 1
else:
params[sa, 1] = 1
elif np.array_equal( state[[0, 1, 2, 4]], [0, 0, 3, 5] ):
if state[3] == 2:
params[sa, 4] = 1
else:
params[sa, 2] = 1
else:
params[sa, state[2]] = 1
self.transitionstruct[2] = FactoredStruct( scope, params, np.zeros( ( self.nstates * self.nactions, np.size( params, 0 ) ) ) )
# destination
scope = np.array( [2, 3] )
params = np.zeros( ( self.nelements( scope ), 3 ) )
for sa in range( np.size( params, 0 ) ):
state = self.decode( sa, scope )
if state[0] == 4:
params[sa, 0] = 0.3333
params[sa, 1] = 0.3333
params[sa, 2] = 0.3334
else:
params[sa, state[1]] = 1;
self.transitionstruct[3] = FactoredStruct( scope, params, np.zeros( ( self.nstates * self.nactions, np.size( params, 0 ) ) ) )
self.assignmappings()
def resetstate( self ):
state = [0] * 4
state[0] = random.randint( 0, 2 )
state[1] = random.randint( 0, 2 )
state[2] = random.randint( 0, 2 )
state[3] = random.randint( 0, 2 )
return self.encode( np.array( state ), range( self.nstatefactors ) )
|
985,008 | af7cf741e2570641dc7d6affda7d164609d0b883 | """ Majority Element: An element that occurs in the array more than n/2 times. For size 5 -> 3, 6 -> 4 """
import math
"""Solution: """
def majority_element(a) -> int:
n = len(a)
for i in range(n):
curr_count = 1
for j in range(i+1, n):
if a[i] == a[j]:
curr_count += 1
print(a[i], curr_count)
if curr_count > n//2:
return i
return -1
def majority_element_eff(a) -> int:
n = len(a)
res = 0
count = 1
for i in range(1, n):
if a[i] == a[res]:
count += 1
else:
count -= 1
if count == 0:
res = i
count = 1
count = 0
for i in range(n):
if a[res] == a[i]:
count += 1
if count > n//2:
return res
return -1
def main():
arr_input = [6, 8, 4, 8, 8]
a3 = majority_element_eff(arr_input)
print(a3)
a2 = majority_element(arr_input)
print(a2)
# Using the special variable
# __name__
if __name__ == "__main__":
main()
|
985,009 | 09dec98baeb079f36955e8c1904981d3c4e2aa69 | """
An integer interval [a, b] (for integers a < b) is a set of all consecutive
integers from a to b, including a and b.
Find the minimum size of a set S such that for every integer interval A in
intervals, the intersection of S with A has size at least 2.
Example 1:
Input: intervals = [[1, 3], [1, 4], [2, 5], [3, 5]]
Output: 3
Explanation:
Consider the set S = {2, 3, 4}. For each interval, there are at least 2
elements from S in the interval.
Also, there isn't a smaller size set that fulfills the above condition.
Thus, we output the size of this set, which is 3.
Example 2:
Input: intervals = [[1, 2], [2, 3], [2, 4], [4, 5]]
Output: 5
Explanation:
An example of a minimum sized set is {1, 2, 3, 4, 5}.
Note:
intervals will have length in range [1, 3000].
intervals[i] will have length 2, representing some integer interval.
intervals[i][j] will be an integer in [0, 10^8].
"""
class Solution:
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
r = [float('-inf')] * 2
res = 0
for l, h in sorted(intervals, key=lambda x:x[1]):
if r[0] < l <= r[1]:
res += 1
r = r[1], h
elif l > r[1]:
res += 2
r = h - 1, h
return res
if __name__ == '__main__':
intervals1 = [[1, 3], [1, 4], [2, 5], [3, 5]]
intervals2 = [[1, 2], [2, 3], [2, 4], [4, 5]]
intervals3 = [[1, 5]]
intervals4 = [[4,14],[6,17],[7,14],[14,21],[4,7]]
intervals5 = [[33,44],[42,43],[13,37],[24,33],[24,33],[25,48],[10,47],[18,24],[29,37],[7,34]]
print(Solution().intersectionSizeTwo(intervals1))
print(Solution().intersectionSizeTwo(intervals2))
print(Solution().intersectionSizeTwo(intervals3))
print(Solution().intersectionSizeTwo(intervals4))
print(Solution().intersectionSizeTwo(intervals5))
|
985,010 | d9d08ddfe894124de1d93062f65fc890504b5dda | cantidad=float(input("Introduce la cantidad a invertir: "))
interes=float(input("Introduzca el interes anual: "))
anos=int(input("Introduzca la cantidad de años: "))
for i in range(anos):
cantidad *= 1 + interes / 100
print("Capital tras " + str(i+1) + " años: " + str(round(cantidad, 2))) |
985,011 | e30035c6deb06684917f282c00a0004a1c39c1f5 | coins = [1, 2, 5, 10, 20, 50, 100, 200]
def solution(n):
table = [[]] * (n + 1)
for i in range(n + 1):
table[i] = [[]] * (len(coins))
table[i][0] = 1
for i in range(n + 1):
for j in range(1, len(coins)):
table[i][j] = table[i][j - 1]
if coins[j] <= i:
table[i][j] += table[i - coins[j]][j]
return table[n][len(coins) - 1]
if __name__ == "__main__":
import time
start_time = time.time()
print(solution(200))
print("Solution took {} seconds".format(time.time() - start_time)) |
985,012 | 5a29c9fde4b8b4069c072a386cf839c41288b51e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 22:08:41 2019
@author: YuxuanLong
"""
import numpy as np
"""
# feature engineering
# data filtering and selection
"""
def cal_mask(mask_999, index):
"""
Generate mask for croping the redundant feature (-999s)
"""
mask = np.all(mask_999[:, index], axis = 1) & (np.sum(mask_999, axis = 1) == len(index))
return mask, np.sum(mask)
def save_data(features, labels, mask, file_name):
"""
Given the mask, we crop the feature from some data of the same category
"""
label = labels[mask]
label = label.reshape((len(label), 1))
data = np.concatenate((features[mask, :], label), axis = 1)
np.save(file_name, data)
def processing():
print('Loading the training data...')
train_file = './data/train.csv'
train_data = np.genfromtxt(train_file, delimiter = ',', dtype = 'U')
print('Separate the data into 6 types and save them...')
data = train_data[1:]
labels = data[:, 1]
labels[labels == 'b'] = -1.0
labels[labels == 's'] = 1.0
labels = np.array(list(labels), dtype = float)
features = np.array(list(data[:, 2:]), dtype = float)
mask_999 = (features == -999.0)
num_999 = np.sum(mask_999, axis = 0) / mask_999.shape[0]
# we roughly divide data into 8 types
# A, B, C all refer to feature ids having some 999s
# note id starts from 0
A = [0]
B = [4, 5, 6, 12, 26, 27, 28]
C = [23, 24, 25]
AB = A + B
AC = A + C
BC = B + C
ABC = A + B + C
# D type: no any 999s
mask_A, A_num = cal_mask(mask_999, A) # 4429 ~ 571
mask_B, B_num = cal_mask(mask_999, B) # 69982 ~ 27005
mask_C, C_num = cal_mask(mask_999, C) # 0
mask_AB, AB_num = cal_mask(mask_999, AB) # 7562 ~ 705
mask_AC, AC_num = cal_mask(mask_999, AC) # 0
mask_BC, BC_num = cal_mask(mask_999, BC) # 73790 ~ 23933
mask_ABC, ABC_num = cal_mask(mask_999, ABC) # 26123 ~ 1559
mask_D = np.all(~mask_999, axis = 1) # 68114 ~ 31894
D_num = np.sum(mask_D)
# print(A_num + B_num + C_num + AB_num + AC_num + BC_num + ABC_num + D_num)
# There are two types that do not exist
# In summary, 6 types of feature
# we separate data and record it into 6 files
# the numpy file is save as [features, labels]
save_data(features, labels, mask_A, './train_data/data_A')
save_data(features, labels, mask_B, './train_data/data_B')
save_data(features, labels, mask_AB, './train_data/data_AB')
save_data(features, labels, mask_BC, './train_data/data_BC')
save_data(features, labels, mask_ABC, './train_data/data_ABC')
save_data(features, labels, mask_D, './train_data/data_D')
if __name__ == '__main__':
processing()
|
985,013 | 4832b385e64223c548dd6b97513d128e74bbba6d | #!/usr/bin/env python
# -*-encoding:UTF-8-*-
# 通用视图类
# 简单的通用视图类
from django.http import HttpResponse
from django.views.generic import View
class MyView(View):
def get(self,request,*args,**kwargs):
return HttpResponse('Hello World!')
# 可以向模板中传递变量值的通用类
from django.views.generic.base import TemplateView
class HomePageView(TemplateView):
template_name = "product1.html"
def get_context_data(self, **kwargs):
context = super(HomePageView,self).get_context_data(**kwargs)
context['phone_product1'] = 'iPhone'
context['phone_product2'] = 'Android Mobile'
return context
# 用来跳转的通用视图类
# 列表视图(listView) 需要listview,需要Movie
from django.views.generic import ListView,DeleteView
from testdb.models import Movie
# 妈的!!要区分context_object_name 和 comtext_object_name啊啊!! 后面没有智能提示的!!!
class MovieView(ListView):
model = Movie
template_name = 'movie_view.html'
context_object_name = 'movies'
class QueryMovieView(ListView):
model = Movie
template_name = 'movie_view.html'
context_object_name = 'movies'
def get_queryset(self):
return super(QueryMovieView,self).get_queryset().filter(type='喜剧')
# 通过地址设置参数:http://127.0.0.1:8000/param_query_movie_view/?name=流浪地球
class ParamQueryMovieView(ListView):
model = Movie
template_name = 'movie_view.html'
context_object_name = 'movies'
def get_queryset(self):
name = self.request.GET.get('name')
return super(ParamQueryMovieView,self).get_queryset().filter(name=name)
# 细节视图(DetailVIew)
# 浏览器的访问路径为这样:http://127.0.0.1:8000/movie_detail/13/
class MovieDetailView(DeleteView):
queryset = Movie.objects.all()
template_name = 'movie_detail.html'
context_object_name = 'movie'
def get_object(self, queryset=None):
obj = super(MovieDetailView,self).get_object(queryset=queryset)
return obj
|
985,014 | d5fa986f1edf2a9d316d9f2fd2e83afa0b5a39aa | import re
"""Find a string where there is space character \b around the word, beginning with 7,8 or 9 followed by 9 digits"""
test_str = 'call me at 7234597890 or 5780948489 '
regex_pattern = re.compile(r'\b[^789]\d+\b')
reg_result = regex_pattern.search(test_str)
print(reg_result.group() if reg_result else "No match found") # this returns the first occurrence of the string
reg_result = re.findall(r'\b[789]\d{9}\b', test_str) # returns a list of all occurrences
print(reg_result)
test_str = "This is generated on 01/5-11. Executed on 05-05-2018. My phone no :963 222.4597"
reg_result = re.findall(r'\d{1,2}-\d{1,2}-\d{1,2}', test_str)
print(reg_result)
reg_result = re.findall(r'\d{1,2}[-,/]\d{1,2}[-,/]\d{2,4}', test_str)
print(reg_result)
""" Find a pattern matching 3 digits followed by -,/or. then again 3 digits and last 4 digits and substitute
it with a -"""
reg_result = re.findall(r'\d{3}[-/. ]\d{3}[-/. ]\d{4}', test_str)
for result in reg_result:
phone = re.sub(r'[., ]', '-', result)
print(phone)
|
985,015 | b5b40f395bdacc640088008402bd8c1a3cc1cb26 | from functools import lru_cache
def permutations(xs):
if len(xs) == 1:
return [xs]
return [
[xs[i]] + p
for i in range(len(xs))
for p in permutations(xs[:i]+xs[i+1:])
]
|
985,016 | 2eee2569c8f6eabe767e84807d3b76a0dda97051 | import os
import random
from termcolor import colored, cprint
from colors import *
score = 0
lines = 29
columns = 29
grid = []
for row in range(lines):
grid.append([])
for col in range(columns):
if ((row == 0) or (col == 0) or (row == lines - 1) or
(col == columns - 1)):
grid[row].append('X')
elif row % 2:
grid[row].append(' ')
elif not(col % 2):
grid[row].append('X')
else:
grid[row].append(' ')
for i in range(40):
x = random.randrange(3, lines)
y = random.randrange(3, columns)
while grid[x][y] != ' ':
x = random.randrange(3, lines)
y = random.randrange(3, columns)
grid[x][y] = '/'
# def print_board(grid):
# os.system('clear')
# for row in grid:
# # for ch in row:
def convert(grid):
output = [[" " for i in range(4*columns)] for j in range(2*lines)]
for i in range(len(grid)):
for j in range(len(grid)):
if (grid[i][j] == '3' or grid[i][j] == '2' or
grid[i][j] == '1' or grid[i][j] == '0'):
output[2 * j][4 * i] = '['
output[2 * j][4 * i + 1] = grid[i][j]
output[2 * j][4 * i + 2] = grid[i][j]
output[2 * j][4 * i + 3] = ']'
output[2 * j + 1][4 * i] = '['
output[2 * j + 1][4 * i + 1] = grid[i][j]
output[2 * j + 1][4 * i + 2] = grid[i][j]
output[2 * j + 1][4 * i + 3] = ']'
else:
output[2 * j][4 * i] = grid[i][j]
output[2 * j][4 * i + 1] = grid[i][j]
output[2 * j][4 * i + 2] = grid[i][j]
output[2 * j][4 * i + 3] = grid[i][j]
output[2 * j + 1][4 * i] = grid[i][j]
output[2 * j + 1][4 * i + 1] = grid[i][j]
output[2 * j + 1][4 * i + 2] = grid[i][j]
output[2 * j + 1][4 * i + 3] = grid[i][j]
return output
def print_board(grid):
output = convert(grid)
os.system('clear')
for i in range(len(output)):
for j in range(len(output[row])):
if output[i][j]=="B":
print(bcolors.OKBLUE+output[i][j]+ bcolors.ENDC,end="")
elif output[i][j]=="E":
print(bcolors.FAIL+output[i][j]+ bcolors.ENDC,end="")
elif output[i][j]=="/" :
print(bcolors.OKGREEN+output[i][j]+ bcolors.ENDC,end="")
elif output[i][j]=="1" or output[i][j]=="2" or output[i][j]=="3" or output[i][j]=="e" or output[i][j]=="0":
print(bcolors.WARNING+output[i][j]+bcolors.ENDC,end="")
else:
print(output[i][j],end="")
print('\r') |
985,017 | 5d904d3bc4b1c97c5d5b6333df76488d7f8dec6d | import parameters
from ortools.constraint_solver import pywrapcp
import sys
class Solver:
solver = None
decision_builder = None
data = None
# -- VARS
x_var = dict() # employee -> dict(): shift -> variable
we_var = dict() # employee -> dict(): int representing week -> variable
all_vars = list()
# -- CONSTRS
cover_shift = dict() # shift -> constr
shift_per_week = dict() # employee, int (week) -> constr
adjacent_shifts = dict() # employee, shift -> constr
shifts_max_week = dict() # employee, shift name, int (week) -> constr
we_var_definition = dict() # employee, int(week) -> constr
we_consecutive = dict() # employee, int(week) -> constr
def __init__(self, data):
self.data = data
self.solver = pywrapcp.Solver("simple_example")
self.create_x_vars()
self.create_we_vars()
self.create_cover_shift_constraints()
self.create_shift_per_week_constraints()
self.create_adjacent_shifts_constraints()
self.create_shifts_max_week_constraints()
self.create_we_var_definition_constraints()
self.create_we_consecutive_constraints()
def create_x_vars(self):
for employee in self.data.employees:
self.x_var[employee] = dict()
for shift in self.data.shifts:
if not employee.unavailable[shift]:
self.x_var[employee][shift] = self.solver.IntVar(0,1,"x-E{0}-S{1}-D{2}".format(employee.id, shift.name, shift.day))
self.all_vars.append(self.x_var[employee][shift])
def create_we_vars(self):
for employee in self.data.employees:
self.we_var[employee] = dict()
for we in range(self.data.weeks):
self.we_var[employee][we] = self.solver.IntVar(0,1,"w-{0}-{1}".format(employee.name, we))
self.all_vars.append(self.we_var[employee][we])
def create_cover_shift_constraints(self):
# -- mandatory shifts must be taken
for shift in self.data.shifts:
if shift.mandatory:
self.solver.Add(sum(self.x_var[e][shift] for e in self.data.employees if not e.unavailable[shift]) > 0)
def create_shift_per_week_constraints(self):
# -- employee must take correct number of shifts per week
for e in self.data.employees:
for w in range(self.data.weeks):
self.solver.Add(sum(self.x_var[e][s] for s in self.data.shifts.select(name='*', mandatory='*', day='*', week=w) if not e.unavailable[s]) == e.shifts_per_week[w])
def create_adjacent_shifts_constraints(self):
# -- for each set of adjacent shifts, select at most 1, for each employee
# counter = 0
# for shift in self.data.shifts:
# adjacents = self.data.shifts.get_adjacent(shift)
# print "shift", shift
# print "adjacents", [(s.name, s.day) for s in adjacents]
# for e in self.data.employees:
# if not e.unavailable[shift]:
# print e.name, [(s.name, s.day) for s in adjacents if not e.unavailable[s]]
# self.solver.Add(sum(self.x_var[e][s] for s in adjacents if not e.unavailable[s]) <= 1)
for d in range(self.data.days):
if not self.data.holidays[d]:
shifts = self.data.shifts.select(name='*', mandatory='*', day=d,week='*')
for e in self.data.employees:
shifts_e = [s for s in shifts if not e.unavailable[s]]
if shifts_e and e.id in [0]:
self.solver.Add(sum(self.x_var[e][s] for s in shifts_e) < 2)
print d, e.name, ", ".join(map(lambda x: getattr(x, 'name'), shifts_e))
def create_shifts_max_week_constraints(self):
# -- for each shift type, employee can take a maximum per week
for shift_name, max_shifts in parameters.shifts_max_week.items():
for e in self.data.employees:
for w in range(self.data.weeks):
self.shifts_max_week[e, shift_name, w] = self.solver.Add(
sum(self.x_var[e][s] for s in self.data.shifts.select(name=shift_name, mandatory='*', day='*', week=w)) <= max_shifts)
def create_we_var_definition_constraints(self):
# -- get number of shifts in a weekend
for w in range(self.data.weeks):
we_shifts = self.data.shifts.select(name='*', mandatory='*', day= w*7 +5, week=w) + self.data.shifts.select(
name='*', mandatory='*', day=w*7+ 6, week=w)
for e in self.data.employees:
self.we_var_definition[e,w] = self.solver.Add(sum(self.x_var[e][s] for s in we_shifts if not e.unavailable[s]) < len(we_shifts)*self.we_var[e][w])
def create_we_consecutive_constraints(self):
for w in range(self.data.weeks - parameters.max_we_consecutive):
for e in self.data.employees:
self.we_consecutive[e,w] = self.solver.Add(sum(self.we_var[e][w + i] for i in range(parameters.max_we_consecutive)) <= parameters.max_we_consecutive)
def solve(self):
self.decision_builder = self.solver.Phase(self.all_vars, self.solver.CHOOSE_RANDOM, self.solver.ASSIGN_MIN_VALUE)
solutions_limit = self.solver.SolutionsLimit(1)
time_limit = self.solver.TimeLimit(3 * 1000)
collector = self.solver.AllSolutionCollector()
collector.Add(self.all_vars)
if self.solver.Solve(self.decision_builder, [solutions_limit, collector]):
print "Some solutions found"
# self.print_solution()
sol = 0
while self.solver.NextSolution():
print "writing ..."
with open("solutions/sol-%s.txt"%sol, 'w') as target:
for day in range(self.data.days):
target.write("DAY %s\n" %day)
shifts = self.data.shifts.select(name='*', mandatory='*', day=day, week='*')
for s in shifts:
for e in self.data.employees:
if not e.unavailable[s] and self.x_var[e][s].Value() > 0:
target.write("{0}: {1}\n".format(s.name, e.name))
sys.exit()
sol += 1
else:
print "No solutions found"
def print_solution(self):
sol = 0
while self.solver.NextSolution():
print "Solution", sol
sol += 1
for day in range(self.data.days):
print "DAY",day
shifts = self.data.shifts.select(name='*',mandatory='*',day=day,week='*')
for s in shifts:
for e in self.data.employees:
if not e.unavailable[s] and self.x_var[e][s].Value() > 0:
print "{0}: {1}".format(s, e.name)
# sys.exit()
print("\nNumber of solutions found:", sol)
def populate_solution(self):
pass
def to_string(self):
pass
|
985,018 | 13a0cb67134be6c04d8f4e612e5ecaa6ec8d7680 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 1 19:28:42 2018
@author: Administrator
"""
from selenium import webdriver
import time
import csv
#Simulated browser login
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get("https://www.investing.com/search/?q=crude%20oil&tab=news")
time.sleep(100)
csv_file = open("C:/Users/nihao/Desktop/headlines.csv","w")
writer = csv.writer(csv_file)
writer.writerow(['time','headlines'])
#2213 is the number of news
for i in range(1,2213):
newstime = driver.find_element_by_xpath('//*[@id="fullColumn"]/div/div[4]/div[3]/div/div['+str(i)+']/div/div/time').text
headlines = driver.find_element_by_xpath('//*[@id="fullColumn"]/div/div[4]/div[3]/div/div['+str(i)+']/div/a').text
writer.writerow([newstime,headlines])
if i%20 ==0:
js="var q=document.documentElement.scrollTop=100000"
driver.execute_script(js)
time.sleep(4)
csv_file.close()
driver.close()
|
985,019 | 5b42e61d05ea03a0f5902091bf8db31b710e85b8 | # Generated by Django 3.0.2 on 2020-02-13 05:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('classes', '0001_initial'),
('management', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='studentattendance',
name='classroom',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='management.ClassRoom'),
),
migrations.AddField(
model_name='studentattendance',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='management.Student'),
),
]
|
985,020 | 1ecab2532b0ef0acd8fb69fb192b0557b35050f5 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'TelaBuscarOs.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QDesktopWidget
class Ui_TelaBuscarOs(object):
def setupUi(self, TelaBuscarOs):
TelaBuscarOs.setObjectName("TelaBuscarOs")
TelaBuscarOs.setWindowModality(QtCore.Qt.NonModal)
TelaBuscarOs.resize(718, 693)
# Deixando a janela de um tamanho fixo para não pode ser Maximizada
TelaBuscarOs.setFixedSize(718,693)
# Deixando a tela no centro
c = TelaBuscarOs.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
c.moveCenter(cp)
TelaBuscarOs.move(c.topLeft())
#
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(13)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
TelaBuscarOs.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Icone/logo-icone.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TelaBuscarOs.setWindowIcon(icon)
TelaBuscarOs.setLayoutDirection(QtCore.Qt.LeftToRight)
TelaBuscarOs.setStyleSheet("background-color: \'dodgerblue\';\n"
"font: 75 13pt \"Hack\";\n"
"color: rgb(238, 238, 236);")
TelaBuscarOs.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.centralwidget = QtWidgets.QWidget(TelaBuscarOs)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 721, 71))
font = QtGui.QFont()
font.setFamily("FreeSans")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
self.label.setFont(font)
self.label.setStyleSheet("font: 75 18pt \"FreeSans\";\n"
"background-color: rgb(85, 87, 83);")
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(60, 180, 111, 21))
self.label_2.setObjectName("label_2")
self.txtMarca = QtWidgets.QLineEdit(self.centralwidget)
self.txtMarca.setEnabled(False)
self.txtMarca.setGeometry(QtCore.QRect(200, 280, 141, 25))
self.txtMarca.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtMarca.setObjectName("txtMarca")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(60, 250, 81, 21))
self.label_3.setObjectName("label_3")
self.cmbTipo = QtWidgets.QComboBox(self.centralwidget)
self.cmbTipo.setEnabled(False)
self.cmbTipo.setGeometry(QtCore.QRect(60, 280, 131, 25))
self.cmbTipo.setStyleSheet("color: rgb(0, 0, 0);\n"
"selection-background-color: rgb(136, 138, 133);\n"
"background-color: rgb(238, 238, 236);")
self.cmbTipo.setObjectName("cmbTipo")
self.cmbTipo.addItem("")
self.cmbTipo.addItem("")
self.cmbTipo.addItem("")
self.cmbTipo.addItem("")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(200, 250, 81, 21))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(360, 250, 81, 21))
self.label_5.setObjectName("label_5")
self.txtModelo = QtWidgets.QLineEdit(self.centralwidget)
self.txtModelo.setEnabled(False)
self.txtModelo.setGeometry(QtCore.QRect(360, 280, 141, 25))
self.txtModelo.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtModelo.setObjectName("txtModelo")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(520, 250, 101, 21))
self.label_6.setObjectName("label_6")
self.txtNumSerie = QtWidgets.QLineEdit(self.centralwidget)
self.txtNumSerie.setEnabled(False)
self.txtNumSerie.setGeometry(QtCore.QRect(520, 280, 121, 25))
self.txtNumSerie.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtNumSerie.setObjectName("txtNumSerie")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(60, 320, 101, 21))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(360, 320, 121, 21))
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(260, 500, 81, 24))
self.label_9.setStyleSheet("font: 75 15pt \"Hack\";")
self.label_9.setObjectName("label_9")
self.txtValor = QtWidgets.QLineEdit(self.centralwidget)
self.txtValor.setEnabled(False)
self.txtValor.setGeometry(QtCore.QRect(360, 500, 101, 25))
self.txtValor.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtValor.setObjectName("txtValor")
self.btnBuscar = QtWidgets.QPushButton(self.centralwidget)
self.btnBuscar.setGeometry(QtCore.QRect(40, 620, 121, 41))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
self.btnBuscar.setFont(font)
self.btnBuscar.setStyleSheet("background-color: rgb(50, 150, 255);\n"
"font: 75 14pt \"Hack\";\n"
"color: rgb(0,0,0);")
self.btnBuscar.setObjectName("btnBuscar")
self.txtDefeito = QtWidgets.QTextEdit(self.centralwidget)
self.txtDefeito.setEnabled(False)
self.txtDefeito.setGeometry(QtCore.QRect(60, 350, 281, 121))
self.txtDefeito.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtDefeito.setTabChangesFocus(True)
self.txtDefeito.setObjectName("txtDefeito")
self.txtSolucao = QtWidgets.QTextEdit(self.centralwidget)
self.txtSolucao.setEnabled(False)
self.txtSolucao.setGeometry(QtCore.QRect(360, 350, 281, 121))
self.txtSolucao.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtSolucao.setTabChangesFocus(True)
self.txtSolucao.setObjectName("txtSolucao")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(60, 110, 291, 21))
self.label_10.setObjectName("label_10")
self.txtNumOrdem = QtWidgets.QLineEdit(self.centralwidget)
self.txtNumOrdem.setGeometry(QtCore.QRect(60, 140, 121, 25))
self.txtNumOrdem.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtNumOrdem.setObjectName("txtNumOrdem")
self.btnAlterar = QtWidgets.QPushButton(self.centralwidget)
self.btnAlterar.setEnabled(False)
self.btnAlterar.setGeometry(QtCore.QRect(300, 620, 121, 41))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
self.btnAlterar.setFont(font)
self.btnAlterar.setStyleSheet("background-color: rgb(227, 227, 0);\n"
"font: 75 14pt \"Hack\";\n"
"color: rgb(0,0,0);")
self.btnAlterar.setObjectName("btnAlterar")
self.btnDeletar = QtWidgets.QPushButton(self.centralwidget)
self.btnDeletar.setEnabled(False)
self.btnDeletar.setGeometry(QtCore.QRect(430, 620, 121, 41))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
self.btnDeletar.setFont(font)
self.btnDeletar.setStyleSheet("background-color: rgb(78, 154, 6);\n"
"background-color: rgb(227, 0, 0);\n"
"font: 75 14pt \"Hack\";\n"
"color: rgb(0,0,0);")
self.btnDeletar.setObjectName("btnDeletar")
self.btnLimpar = QtWidgets.QPushButton(self.centralwidget)
self.btnLimpar.setEnabled(False)
self.btnLimpar.setGeometry(QtCore.QRect(560, 620, 121, 41))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
self.btnLimpar.setFont(font)
self.btnLimpar.setStyleSheet("background-color: rgb(170, 170, 170);\n"
"font: 75 14pt \"Hack\";\n"
"color: rgb(0,0,0);")
self.btnLimpar.setObjectName("btnLimpar")
self.txtCliente = QtWidgets.QLineEdit(self.centralwidget)
self.txtCliente.setGeometry(QtCore.QRect(60, 210, 441, 25))
self.txtCliente.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(238, 238, 236);")
self.txtCliente.setObjectName("txtCliente")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(236, 544, 291, 51))
self.label_11.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_11.setStyleSheet("font: 36pt \"Sans Serif\";")
self.label_11.setText("")
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.btnFinalizar = QtWidgets.QPushButton(self.centralwidget)
self.btnFinalizar.setGeometry(QtCore.QRect(170, 620, 121, 41))
font = QtGui.QFont()
font.setFamily("Hack")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setWeight(9)
self.btnFinalizar.setFont(font)
self.btnFinalizar.setStyleSheet("background-color: rgb(0, 255, 70);\n"
"font: 75 14pt \"Hack\";\n"
"color: rgb(0,0,0);")
self.btnFinalizar.setObjectName("btnFinalizar")
TelaBuscarOs.setCentralWidget(self.centralwidget)
self.retranslateUi(TelaBuscarOs)
QtCore.QMetaObject.connectSlotsByName(TelaBuscarOs)
TelaBuscarOs.setTabOrder(self.txtNumOrdem, self.cmbTipo)
TelaBuscarOs.setTabOrder(self.cmbTipo, self.txtMarca)
TelaBuscarOs.setTabOrder(self.txtMarca, self.txtModelo)
TelaBuscarOs.setTabOrder(self.txtModelo, self.txtNumSerie)
TelaBuscarOs.setTabOrder(self.txtNumSerie, self.txtDefeito)
TelaBuscarOs.setTabOrder(self.txtDefeito, self.txtSolucao)
TelaBuscarOs.setTabOrder(self.txtSolucao, self.txtValor)
TelaBuscarOs.setTabOrder(self.txtValor, self.btnBuscar)
TelaBuscarOs.setTabOrder(self.btnBuscar, self.btnAlterar)
TelaBuscarOs.setTabOrder(self.btnAlterar, self.btnDeletar)
def retranslateUi(self, TelaBuscarOs):
_translate = QtCore.QCoreApplication.translate
TelaBuscarOs.setWindowTitle(_translate("TelaBuscarOs", "Buscar Ordem de Serviço"))
self.label.setText(_translate("TelaBuscarOs", "Buscar Ordem de Serviço"))
self.label_2.setText(_translate("TelaBuscarOs", "Cliente"))
self.label_3.setText(_translate("TelaBuscarOs", "Tipo"))
self.cmbTipo.setItemText(0, _translate("TelaBuscarOs", "Computador"))
self.cmbTipo.setItemText(1, _translate("TelaBuscarOs", "Notebook"))
self.cmbTipo.setItemText(2, _translate("TelaBuscarOs", "Celular"))
self.cmbTipo.setItemText(3, _translate("TelaBuscarOs", "Tablet"))
self.label_4.setText(_translate("TelaBuscarOs", "Marca"))
self.label_5.setText(_translate("TelaBuscarOs", "Modelo"))
self.label_6.setText(_translate("TelaBuscarOs", "N°Serie"))
self.label_7.setText(_translate("TelaBuscarOs", "Defeito"))
self.label_8.setText(_translate("TelaBuscarOs", "Solução"))
self.label_9.setText(_translate("TelaBuscarOs", "Valor"))
self.btnBuscar.setText(_translate("TelaBuscarOs", "Buscar"))
self.label_10.setText(_translate("TelaBuscarOs", "N° Ordem de Serviço"))
self.btnAlterar.setText(_translate("TelaBuscarOs", "Alterar"))
self.btnDeletar.setText(_translate("TelaBuscarOs", "Deletar"))
self.btnLimpar.setText(_translate("TelaBuscarOs", "Limpar"))
self.btnFinalizar.setText(_translate("TelaBuscarOs", "Finalizar"))
import resource_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
TelaBuscarOs = QtWidgets.QMainWindow()
ui = Ui_TelaBuscarOs()
ui.setupUi(TelaBuscarOs)
TelaBuscarOs.show()
sys.exit(app.exec_())
|
985,021 | d047160b60e7827d27dc981cfd8f0f4fc15b7396 | """
Helper script for checking status of interfaces
This script contains re-usable functions for checking status of interfaces on SONiC.
"""
import logging
from transceiver_utils import all_transceivers_detected
def parse_intf_status(lines):
"""
@summary: Parse the output of command "show interface description".
@param lines: The output lines of command "show interface description".
@return: Return a dictionary like:
{
"Ethernet0": {
"oper": "up",
"admin": "up",
"alias": "etp1",
"desc": "ARISTA01T2:Ethernet1"
},
...
}
"""
result = {}
for line in lines:
fields = line.split()
if len(fields) >= 5:
intf = fields[0]
oper, admin, alias, desc = fields[1], fields[2], fields[3], ' '.join(fields[4:])
result[intf] = {"oper": oper, "admin": admin, "alias": alias, "desc": desc}
return result
def check_interface_status(dut, interfaces):
"""
@summary: Check the admin and oper status of the specified interfaces on DUT.
@param dut: The AnsibleHost object of DUT. For interacting with DUT.
@param interfaces: List of interfaces that need to be checked.
"""
logging.info("Check interface status using cmd 'show interface'")
mg_ports = dut.minigraph_facts(host=dut.hostname)["ansible_facts"]["minigraph_ports"]
output = dut.command("show interface description")
intf_status = parse_intf_status(output["stdout_lines"][2:])
check_intf_presence_command = 'show interface transceiver presence {}'
for intf in interfaces:
expected_oper = "up" if intf in mg_ports else "down"
expected_admin = "up" if intf in mg_ports else "down"
if intf not in intf_status:
logging.info("Missing status for interface %s" % intf)
return False
if intf_status[intf]["oper"] != expected_oper:
logging.info("Oper status of interface %s is %s, expected '%s'" % (intf, intf_status[intf]["oper"],
expected_oper))
return False
if intf_status[intf]["admin"] != expected_admin:
logging.info("Admin status of interface %s is %s, expected '%s'" % (intf, intf_status[intf]["admin"],
expected_admin))
return False
# Cross check the interface SFP presence status
check_presence_output = dut.command(check_intf_presence_command.format(intf))
presence_list = check_presence_output["stdout_lines"][2].split()
assert intf in presence_list, "Wrong interface name in the output: %s" % str(presence_list)
assert 'Present' in presence_list, "Status is not expected, presence status: %s" % str(presence_list)
logging.info("Check interface status using the interface_facts module")
intf_facts = dut.interface_facts(up_ports=mg_ports)["ansible_facts"]
down_ports = intf_facts["ansible_interface_link_down_ports"]
if len(down_ports) != 0:
logging.info("Some interfaces are down: %s" % str(down_ports))
return False
return True
def check_interface_information(dut, interfaces):
if not all_transceivers_detected(dut, interfaces):
logging.info("Not all transceivers are detected")
return False
if not check_interface_status(dut, interfaces):
logging.info("Not all interfaces are up")
return False
return True
|
985,022 | df06c88b06bbad7b24c3a910ebb11f2a32b7247b | """
@Author : jiaojie
@Time : 2020/4/5 22:40
@Desc :
"""
"""
元组
元组的定义:用()表示
元组是不可变类型的数据,定义了之后不能对里面的元素进行修改
元组的方法:
count():用来计算指定元素的个数
index():查找指定元素的下标
元组可以下标取值和切片
注意点:
1.空元组定义:()
2.元组中只要一个元素:(xxx,)或者(xxx),
元组内部元素修改:特殊情况
元组内部有可变类型的元素的时候,内部的可变类型是可以修改
"""
tu = ()
print(type(tu))
tu1 = (11, 22, 33, 'a', 'bb')
print(tu1[2])
print(tu1[0:3])
tu2 = (111,)
print(tu2)
tu3 = ([111, 222, 333], 'a', 'b')
print(tu3)
print(id(tu3))
print(id(tu3[0]))
print(id(tu3[1]))
print(id(tu3[2]))
tu3[0].append(999)
print(tu3)
print(id(tu3))
print(id(tu3[0]))
print(id(tu3[1]))
print(id(tu3[2])) |
985,023 | 5d7e94b531308adb7477bbda0da19e29735f4233 | from django.contrib import admin
from .models import learn
# Register your models here.
admin.site.register(learn) |
985,024 | f4eb806c6c6e7e9ab624eed0963319a5ff33372c | """
https://www.hackerrank.com/challenges/time-conversion
"""
import sys
time = input().strip()
hourStr, minStr, secStr = time.split(":")
if "AM" in time and hourStr == "12":
hourStr = "00"
elif "PM" in time and hourStr != "12":
hourStr = str(int(hourStr) + 12)
print(hourStr + ":" + minStr + ":" + secStr[0:2])
|
985,025 | 5c5fb172382cae82d6dac0966f8dda0eb6124c29 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-16 18:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sosokan', '0004_auto_20161116_1339'),
]
operations = [
migrations.RemoveField(
model_name='ad',
name='active',
),
]
|
985,026 | 6832eecc87150fa873d05c14e4165cb627eee9a7 | from zmqlocker import LockerClient
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--request", dest="request", action="store_true")
parser.add_argument("--release", dest="release", action="store_true")
parser.add_argument("--jobid", help="the id of the job requesting the GPU")
args = parser.parse_args()
if args.request and args.release:
raise(ValueError, "both request and release can't be asked at the same time")
# Ada passes something like '5580.ada.cs.ubc.ca' or '5580[1].ada.cs.ubc.ca'
jid = args.jobid.split('.')[0]
locker_cli = LockerClient( jid )
if args.request:
print "export CUDA_VISIBLE_DEVICES={0}".format( locker_cli.request_gpu() )
elif args.release:
print "echo {0}".format( locker_cli.release_gpu() )
else:
raise(ValueError, "must specify request or release")
|
985,027 | 4866cfd363278edb803f6c665198aaf7807d8822 | # tests
test_img = [
[225, 000, 000], [000, 225, 000], [000, 000, 225],
[225, 225, 000], [225, 000, 000], [225, 000, 000],
[225, 225, 225], [000, 225, 000], [000, 225, 000]
]
fade_1 = [[45, 0, 0], [0, 45, 0], [0, 0, 45],
[45, 45, 0], [225, 0, 0], [45, 0, 0],
[45, 45, 45], [0, 45, 0], [0, 45, 0]]
fade_2 = [[13, 0, 0], [0, 22, 0], [0, 0, 13],
[22, 22, 0], [225, 0, 0], [22, 0, 0],
[13, 13, 13], [0, 22, 0], [0, 13, 0]]
fade_3 = [[6, 0, 0], [0, 14, 0], [0, 0, 6],
[14, 14, 0], [225, 0, 0], [14, 0, 0],
[6, 6, 6], [0, 14, 0], [0, 6, 0]]
blur_1 = [[61, 7, 0], [43, 4, 1], [59, 3, 1],
[41, 8, 1], [29, 6, 1], [39, 5, 1],
[61, 8, 1], [43, 6, 1], [59, 5, 0]]
blur_2 = [[29, 6, 1], [29, 6, 1], [29, 6, 1],
[29, 6, 1], [29, 6, 1], [29, 6, 1],
[29, 6, 1], [29, 6, 1], [29, 6, 1]]
blur_3 = [[29, 6, 1], [29, 6, 1], [29, 6, 1],
[29, 6, 1], [29, 6, 1], [29, 6, 1],
[29, 6, 1], [29, 6, 1], [29, 6, 1]]
assert fade_image(test_img, 3, 1, 1, 1) == fade_1
assert fade_image(test_img, 3, 1, 1, 2) == fade_2
assert fade_image(test_img, 3, 1, 1, 3) == fade_3
assert blur_image(test_img, 3, 1) == blur_1
assert blur_image(test_img, 3, 2) == blur_2
assert blur_image(test_img, 3, 3) == blur_3
usage1 = "Usage: python3 pixelmagic.py <mode> <image>"
usage2 = "Usage: python3 pixelmagic.py fade <image> <row> <col> <radius>"
usage3 = "Error: Invalid mode"
usage4 = "Could not open file INVALID_FILE"
assert check_args(['pixelmagic.py']) == usage1
assert check_args(['pixelmagic.py','fade','INVALID_FILE']) == usage2
assert check_args(['pixelmagic.py','NO_MODE','INVALID_FILE']) == usage3
assert check_args(['pixelmagic.py','blur','INVALID_FILE']) == usage4
assert check_args(['pixelmagic.py','blur','pixelmagic.py']) == None
|
985,028 | 06e2c380f4a670e2c1e8c382dfbaa7f7d0b0e314 | import numpy as np
from keras.models import load_model
from keras.preprocessing import image
model = load_model('model.h5')
# summarize model
model.summary()
imagename = "56_100.jpg"
test_image = image.load_img(imagename, target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = model.predict(test_image)
print(result)
# if result[0][0] == 1:
# prediction = 'dog'
# return [{"image": prediction}]
# elif result[0][0] == 1:
# prediction = 'dog'
# return [{"image": prediction}]
# else:
# prediction = 'cat'
# return [{"image": prediction}] |
985,029 | 093e99bdf3ffc7267169181b66ded973181e11bd | import pytest
import mock
import connexion
from src import launch
@pytest.fixture(scope='module')
def client():
flask_app = connexion.FlaskApp(__name__)
with flask_app.app.test_client() as c:
yield c
@pytest.fixture()
def user_role():
return 'collaborator'
def test_get_health(client):
# GIVEN no query parameters or payload
# WHEN I access to the url GET /health
# THEN the HTTP response is 404 not found
response = client.get('/health')
assert response.status_code == 404
|
985,030 | 2b9497eac7018a44c650c45dafe15871a77a08a8 |
from rest_framework import serializers
from .models import Urlspage
class PageSerializer(serializers.ModelSerializer):
class Meta:
model = Urlspage
fields = ('id', 'name', 'is_valid', 'created_at') |
985,031 | f3df0e48db12986528d15bdea5819930a7a59df0 | version https://git-lfs.github.com/spec/v1
oid sha256:cbded24addb8cbc2b69e4ea3c7e7f9b4eecfcb21e6fc06008d8feb7f12e605a2
size 795
|
985,032 | bd11e4163c3f429803566ada44576646f617531a | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext as _
from easy_thumbnails.fields import ThumbnailerImageField
class User(AbstractUser):
"""Inheriting User model with unique EmailField."""
email = models.EmailField(unique=True, error_messages={
'unique': _('A user with that email already exists.')
})
class Food(models.Model):
"""Recipe Food model."""
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Food Ingredient model."""
UNIT_CHOICES = (
('PIECE', 'PIECE'),
('GRAM', 'GRAM'),
('LITER', 'LITER'),
('SPOON', 'SPOON'),
('TEASPOON', 'TEASPOON'),
('MILLILITER', 'MILLILITER'),
('BUNCH', 'BUNCH'),
('PINCH', 'PINCH')
)
food = models.ForeignKey(Food, on_delete=models.PROTECT)
unit = models.CharField(max_length=10, choices=UNIT_CHOICES)
amount = models.PositiveIntegerField(blank=True, default=0)
def __str__(self):
return f'{self.food.name}, {self.amount}'
class Step(models.Model):
"""Recipe Step model."""
instruction = models.TextField(max_length=500)
order = models.IntegerField(blank=True, default=0)
class Meta:
ordering = ['order']
def __str__(self):
return f'{self.instruction}'
class Recipe(models.Model):
"""Recipe model."""
CROP_SETTINGS = {'size': (300, 300), 'crop': 'smart'} # image settings
DIFFICULTY_CHOICES = (
('EASY', 'EASY'),
('MEDIUM', 'MEDIUM'),
('HARD', 'HARD')
)
RATING_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5')
)
name = models.CharField(max_length=50)
image = ThumbnailerImageField(default='default.png', upload_to='recipe_images', resize_source=CROP_SETTINGS)
description = models.TextField(max_length=500)
ingredients = models.ManyToManyField(Ingredient)
portions = models.PositiveIntegerField()
preparation_time = models.PositiveIntegerField(help_text='Time in minutes.')
difficulty = models.CharField(max_length=6, choices=DIFFICULTY_CHOICES)
rating = models.CharField(max_length=1, choices=RATING_CHOICES, blank=True, default=0)
date_posted = models.DateTimeField(auto_now_add=True)
steps = models.ManyToManyField(Step)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
|
985,033 | 6d1aab734995bd692e531a9f21eab70f30a04c81 |
import requests
import io
import zipfile
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def getAndExtractZip(url):
# yields (filename, file-like object) pairs
response = requests.get(url)
with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:
for zipinfo in thezip.infolist():
with thezip.open(zipinfo) as thefile:
yield zipinfo.filename, thefile
url = "http://seanlahman.com/files/database/baseballdatabank-master_2018-03-28.zip"
df_salaries = pd.DataFrame()
df_teams = pd.DataFrame()
for name, content in getAndExtractZip(url):
if name == 'baseballdatabank-master/core/Salaries.csv':
df_salaries = pd.read_csv(content)
if name == 'baseballdatabank-master/core/Teams.csv':
df_teams = pd.read_csv(content)
print(df_salaries.head())
print(df_salaries.max())
print(df_salaries.pct_change)
#print(df_teams.head())
print(pd.merge(df_salaries,df_teams))
fig, axis = plt.subplots(1, 1)
df_salaries.plot()
plt.show()
|
985,034 | 19b37f932ff3670edbe331386f944b155a151b77 |
print "MAIL MODULE INIT"
from folderTree import *
from mailList import *
from mailMessage import *
from imapSettings import *
from smtpSettings import * |
985,035 | 52aa3f03fc4537114b7fcb82da443964bb3d0f6e | import time
#Ex.1
# def action_decorator(func):
# def inner(value):
# print(func.__name__, 'is canceled!')
# return inner
#Ex.2
# def action_decorator(func):
# def inner(value):
# start = time.time()
# func(value)
# print(time.time() - start, ' sec - exec time')
# return inner
#Ex.3
# counter=[]
# def action_decorator(func):
# def inner(value):
# func(value)
# counter.append(1)
# print(len(counter), 'time exec func')
# return inner
#Ex.4
# def action_decorator(func):
# print('Create decorator')
# def inner(value):
# print('Begin of exec func')
# func(value)
# print('End of exec func')
# return inner
#Ex.5
# def action_decorator(func):
# def inner(value):
# try:
# func(value)
# except Exception as e:
# print(e)
# return inner
#@action_decorator
def long(value):
time.sleep(5) # delays for 5 seconds
return 'long ' + str(value)
def short(string_param):
print('Speed!', string_param)
return 'short'
def medium(value, *modificators):
result = value
for m in modificators:
result *= m
return result
def change_sign(num, check_sign=True):
if check_sign and num > 0:
raise ValueError('num > 0!')
return -num
#map/filter/reduce
arr=[1,4,5,30,99]
print(list(map(lambda x: x%5, arr)))
arr = [3, 4, 90, -2]
print(list(map(lambda x: str(x),arr)))
arr = ['some', 1, 'v', 40, '3a', str]
print(list(filter(lambda x: type(x) != type(''),arr)))
from functools import reduce
arr = ['some', 'other', 'value']
print(len(reduce(lambda a,x: a + x,arr))) |
985,036 | 17a530854d9cf2f05d8222f9661e6156f1361f05 | #!/usr/bin/env python
# DickServ IRC Bot - Developed by acidvegas in Python (https://acid.vegas/dickserv)
# wolfram.py
from xml.etree import ElementTree as etree
import config
import httplib
def ask(query):
params = httplib.data_encode({'input':query, 'appid':config.api.wolfram_api_key})
data = httplib.get_source('http://api.wolframalpha.com/v2/query?' + params)
results = {}
tree = etree.fromstring(data)
for e in tree.findall('pod'):
for item in [ef for ef in list(e) if ef.tag=='subpod']:
for it in [i for i in list(item) if i.tag=='plaintext']:
if it.tag=='plaintext':
results[e.get('title')] = it.text
if 'Result' in results:
return results['Result']
else:
return False |
985,037 | 42604182a263d62f4781c1e1368bb931cdf097ce | import pytest
from scripts.generate_pipeline import get_diff
#
# function get_diff tests
#
@pytest.mark.parametrize(
"file_contents,expected_result",
[
(
"""test.py
folder_a/test.tf
folder_a/folder_b/test.txt""",
["test.py", "folder_a/test.tf", "folder_a/folder_b/test.txt"],
), # Diff present
("\n", []), # No Diff
],
)
def test_get_diff(mocker, file_contents, expected_result):
open_mock = mocker.patch(
"scripts.generate_pipeline.open", mocker.mock_open(read_data=file_contents)
)
result = get_diff()
# Tests
assert result == expected_result
open_mock.assert_called_once_with(".git_diff_conditional/git_diff", "r")
def test_get_diff_no_file(mocker, log_and_exit_mock):
open_mock = mocker.patch(
"scripts.generate_pipeline.open", side_effect=FileNotFoundError
)
result = get_diff()
# Tests
assert result is None
open_mock.assert_called_once_with(".git_diff_conditional/git_diff", "r")
log_and_exit_mock.assert_called_once_with(
"error", "Error getting diff from file", 1
)
|
985,038 | 194d5df9f64c3dc402a5dae7a78599d284182f29 | import sys
from graphviz import Digraph
def plot(genotype, filename):
g = Digraph(
format="pdf",
edge_attr=dict(fontsize="20", fontname="times"),
node_attr=dict(
style="filled",
shape="rect",
align="center",
fontsize="20",
height="0.5",
width="0.5",
penwidth="2",
fontname="times",
),
engine="dot",
)
g.body.extend(["rankdir=LR"])
g.node("c_{k-2}", fillcolor="darkseagreen2")
g.node("c_{k-1}", fillcolor="darkseagreen2")
assert len(genotype) % 2 == 0
steps = len(genotype) // 2
for i in range(steps):
g.node(str(i), fillcolor="lightblue")
for i in range(steps):
for k in [2 * i, 2 * i + 1]:
op, j = genotype[k]
if j == 0:
u = "c_{k-2}"
elif j == 1:
u = "c_{k-1}"
else:
u = str(j - 2)
v = str(i)
g.edge(u, v, label=op, fillcolor="gray")
g.node("c_{k}", fillcolor="palegoldenrod")
for i in range(steps):
g.edge(str(i), "c_{k}", fillcolor="gray")
g.render(filename, view=True)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage:\n python {} ARCH_NAME".format(sys.argv[0]))
sys.exit(1)
genotype_name = sys.argv[1]
try:
print(genotype_name)
# genotype = MY_ARCT4
genotype = eval("genotypes.{}".format(genotype_name))
except AttributeError:
print("{} is not specified in genotypes.py".format(genotype_name))
sys.exit(1)
plot(genotype.normal, "normal")
plot(genotype.reduce, "reduction")
|
985,039 | fdafa9a79f72fbea0ab2722d1b2c6b3e52ffa397 | # -*- coding: utf-8 -*-
print("Hello World!")
result = 1+ 2
result = 1+3
print(result)
for i in range(2,20,5):
print(i,end="-")
list1 = [1,2,3,4]
for item in list1:
print(item)
tuple1 = (12,"String",13.6,"String2")
tuple2 = (30,50,9)
print(tuple1)
print(tuple1[0])
tuple3 = tuple2+tuple1
len(tuple3)
dict1={}
dict1["car"] = "Car is a mechanical wheeler"
print(dict1.get("car","not found"))
input1 = input("enter a something:")
some = input1;
print (some)
inp = input ("Enter something for the file write : ")
with open("text.txt","w") as f:
f.write(inp)
inp = input ("Enter something for add to the file : ")
with open("text.txt","a") as f:
f.write(inp)
with open("text.txt","r") as f:
print(f.read())
def function(arg1,arg2):
return (arg1+arg2)
print(function(1,3))
class Dog:
def bark(self):
print("Dog is barking")
def bite(self):
print("Dog is biting you")
dog = Dog()
dog.bark()
class Game:
def __init__(self,name):
self.name = name
def start(self):
print(self.name , " has been started")
def stop(self):
print(self.name, " has been stopped" )
game = Game("WoW")
print(game.name)
game.start()
game.stop()
str = ["string","str","str2"]
newStr= [string for string in str]
words = ["you","think","you","do","but","you","don't"]
sentence = "-".join(words);
|
985,040 | c68897ffeb7c6e13968c4f654e20b88845892b15 | from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class ModuleInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing and setting up modules support on %s " % (node.alias))
node.ssh.execute('wget -c -P Downloads http://downloads.sourceforge.net/project/modules/Modules/modules-3.2.10/modules-3.2.10.tar.gz')
node.ssh.execute('tar xvzf Downloads/modules-3.2.10.tar.gz -C Downloads')
node.ssh.execute('./Downloads/modules-3.2.10/configure')
node.ssh.execute('make')
node.ssh.execute('make install')
node.ssh.execute('cp /usr/local/Modules/3.2.10/init/sh /etc/profile.d/modules.sh')
node.ssh.execute('chmod 755 /etc/profile.d/modules.sh')
node.ssh.execute('. /etc/profile.d/modules.sh')
node.ssh.execute('echo "/usr/local/Modules/applications" >> ${MODULESHOME}/init/.modulespath')
node.ssh.execute('echo ". /etc/profile.d/modules.sh" >> /etc/bash.bashrc')
node.ssh.execute('mkdir -p /usr/local/Modules/applications')
|
985,041 | f66ebec494e6da5515ab563f6cd51b5c7007b9c4 | """
Unpacking a Sequence into Separate Variables
"""
p = (4, 5)
x, y = p
print(x)
print(y)
print()
data = ['acme', 50, 91.1, (2012, 12, 21)]
name, shares, price, date = data
print(name)
print(date)
name, shares, price, (year, mon, day) = data
print(name)
print(year)
print(mon)
print(day)
print()
# Unpacking works with any object that happens to be iterable.
s = 'Momo'
a, b, c, d = s
print(a)
print(b)
print(d)
print()
# Throw away some variables
_, shares, price, _ = data
print(shares)
print(price)
|
985,042 | 23c4c6893ecb4709821a3aab7d21da0d0ba0be37 | # -*- coding: utf-8 -*-
from django import forms
class DocumentForm(forms.Form):
fileLink = forms.FileField(
label='Select a file'
)
company = forms.CharField(widget=forms.HiddenInput())
class EditDocumentForm(forms.Form):
op_file = forms.FileField(
label='Select a file',
required=False
)
op_id = forms.CharField(widget=forms.HiddenInput())
op_company = forms.CharField(widget=forms.HiddenInput())
|
985,043 | d6b7d7b1e72d7e58f52259bb1464173e41250fb9 | import os
import shutil
try:
import subprocess32 as subprocess
except Exception:
import subprocess
import requests
import requests.exceptions
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestermover import mover_utils
from pandaharvester.harvesterconfig import harvester_config
# logger
baseLogger = core_utils.setup_logger('analysis_aux_preparator')
# preparator plugin for analysis auxiliary inputs
class AnalysisAuxPreparator(PluginBase):
# constructor
def __init__(self, **kwarg):
self.gulOpts = None
self.maxAttempts = 3
PluginBase.__init__(self, **kwarg)
# trigger preparation
def trigger_preparation(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='trigger_preparation')
tmpLog.debug('start')
# loop over all inputs
allDone = True
for tmpFileSpec in jobspec.inFiles:
# local access path
url = tmpFileSpec.url
accPath = self.make_local_access_path(tmpFileSpec.scope, tmpFileSpec.lfn)
# check if already exits
if os.path.exists(accPath):
continue
# make directories if needed
if not os.path.isdir(os.path.dirname(accPath)):
os.makedirs(os.path.dirname(accPath))
# get
return_code = 1
if url.startswith('http'):
try:
tmpLog.debug('getting via http from {0} to {1}'.format(url, accPath))
res = requests.get(url, timeout=180, verify=False)
if res.status_code == 200:
with open(accPath, 'w') as f:
f.write(res.content)
return_code = 0
else:
errMsg = 'failed to get {0} with StatusCode={1} {2}'.format(url, res.status_code, res.text)
tmpLog.error(errMsg)
except requests.exceptions.ReadTimeout:
tmpLog.error('read timeout when getting data from {0}'.format(url))
except Exception:
core_utils.dump_error_message(tmpLog)
elif url.startswith('docker'):
args = ['docker', 'save', '-o', accPath, url.split('://')[-1]]
try:
tmpLog.debug('executing ' + ' '.join(args))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return_code = p.returncode
if stdout is not None:
stdout = stdout.replace('\n', ' ')
if stderr is not None:
stderr = stderr.replace('\n', ' ')
tmpLog.debug("stdout: %s" % stdout)
tmpLog.debug("stderr: %s" % stderr)
except Exception:
core_utils.dump_error_message(tmpLog)
elif url.startswith('/'):
try:
shutil.copyfile(url, accPath)
return_code = 0
except Exception:
core_utils.dump_error_message(tmpLog)
else:
tmpLog.error('unsupported protocol in {0}'.format(url))
if return_code != 0:
allDone = False
if allDone:
tmpLog.debug('succeeded')
return True, ''
else:
errMsg = 'failed'
tmpLog.error(errMsg)
# check attemptNr
for tmpFileSpec in jobspec.inFiles:
if tmpFileSpec.attemptNr >= self.maxAttempts:
errMsg = 'gave up due to max attempts'
tmpLog.error(errMsg)
return (False, errMsg)
return None, errMsg
# check status
def check_stage_in_status(self, jobspec):
return True, ''
# resolve input file paths
def resolve_input_paths(self, jobspec):
pathInfo = dict()
for tmpFileSpec in jobspec.inFiles:
url = tmpFileSpec.lfn
accPath = self.make_local_access_path(tmpFileSpec.scope, tmpFileSpec.lfn)
pathInfo[tmpFileSpec.lfn] = {'path': accPath}
jobspec.set_input_file_paths(pathInfo)
return True, ''
# make local access path
def make_local_access_path(self, scope, lfn):
return mover_utils.construct_file_path(self.localBasePath, scope, lfn)
|
985,044 | 97b0b66d52631c0e13a41934bb725bc4de873db7 | import random, time
from pyglet.window.key import MOTION_UP, MOTION_DOWN, MOTION_LEFT, MOTION_RIGHT
from env.snake_env import SnakeEnv
def interact():
"""
Human interaction with the environment
"""
env = SnakeEnv()
done = False
r = 0
action = random.randrange(4)
delay_time = 0.2
# After the first run of the method env.render()
# env.renderer.viewer obtains an attribute 'window'
# which is a pyglet.window.Window object
env.render(mode='human')
# Use the arrows to control the snake's movement direction
@env.renderer.viewer.window.event
def on_text_motion(motion):
"""
Events to actions mapping
"""
nonlocal action
if motion == MOTION_UP:
action = 0
elif motion == MOTION_DOWN:
action = 2
elif motion == MOTION_LEFT:
action = 3
elif motion == MOTION_RIGHT:
action = 1
while not done:
time.sleep(delay_time)
obs, reward, done, info = env.step(action)
env.render(mode='human')
if reward:
r += reward
# Speeding up snake after eating food
delay_time -= 1/6 * delay_time
return r
if __name__ == '__main__':
interact() |
985,045 | 1ad2b5e7333655a418c4fe91104aa57a3a1064e3 | import numpy as np
class Team:
def __init__(self, name='', elo_home=1500, elo_away=1500, gp=0, gc=0, points=0, matches=0, wins=0, losses=0):
self.name = name
#home elo
self.elo_home = np.array([], dtype=int)
self.elo_home= np.hstack((self.elo_home,elo_home))
#away elo
self.elo_away = np.array([], dtype=int)
self.elo_away = np.hstack((self.elo_home,elo_away))
self.gp = gp
self.gc = gc
self.pts = points
self.matches = matches
self.wins = wins
self.losses = losses
self.draws = matches - wins - losses
def __eq__(self, other):
sg = self.gp - self.gc
sgo = other.gp - other.gc
return self.pts == other.pts and self.wins == other.wins and sg == sgo and self.gp == other.gp
def __gt__(self, other):
sg = self.gp - self.gc
sgo = other.gp - other.gc
if self.pts > other.pts:
return True
elif self.pts == other.pts and self.wins > other.wins:
return True
elif self.pts == other.pts and self.wins == other.wins and sg > sgo:
return True
elif self.pts == other.pts and self.wins == other.wins and sg == sgo and self.gp > other.gp:
return True
else:
return False
def play(self, opponent, goal_avg=2.3, update='y'):
# obtain match score
w = self.get_weights(opponent)
score = np.zeros(2,dtype=int)
for i in range(0, 2):
score[i] = int(w[i] * np.random.poisson(goal_avg, 1))
if update == 'y':
# update points, gp, gc
self.update_stats(score, 'home')
opponent.update_stats(score, 'away')
# update elos
self.update_elo(opponent.elo_away[-1], score, 'home')
opponent.update_elo(self.elo_home[-2], score, 'away')
return score
def get_weights(self, opponent):
"""
:rtype : w, a vector with 2 floats
"""
w = np.zeros(2)
y = (self.elo_home[-1] - opponent.elo_away[-1])/400.
w[0] = 1/(1 + pow(10, -y))
w[1] = 1 - w[0]
return w
def update_stats(self, score, home_or_away):
if home_or_away == 'home':
gp = score[0]
gc = score[1]
else:
gp = score[1]
gc = score[0]
if gp < gc:
pts = 0
self.losses += 1
elif gp > gc:
pts = 3
self.wins += 1
else:
pts = 1
self.draws += 1
self.gp += gp
self.gc += gc
self.pts += pts
self.matches +=1
def update_elo(self, opponent_elo, score, home_or_away='home', k=30):
"""
:rtype : self.elo is updated
"""
if home_or_away == 'home':
goal_diff = score[0] - score[1]
else:
goal_diff = score[1] - score[0]
# determine outcome
if goal_diff > 0:
# win
w = 1.0
elif goal_diff < 0:
#defeat
w = 0.0
goal_diff *= -1
else:
#draw
w = 0.5
if goal_diff <= 1:
G = 1.0
elif goal_diff == 2:
G = 1.5
else:
G = 1.75 + (goal_diff - 3)/8.0
if (home_or_away == 'home'):
elo_diff = self.elo_home[-1] - opponent_elo
else:
elo_diff = self.elo_away[-1] - opponent_elo
# expected outcome
wex = 1/(1 + pow(10, - elo_diff/400.0))
# elo change is proportional to the difference between the result and its expectation
if (home_or_away == 'home'):
self.elo_home = np.hstack((self.elo_home, self.elo_home[-1] + round(k*G*(w-wex))))
else:
self.elo_away = np.hstack((self.elo_away, self.elo_away[-1] + round(k*G*(w-wex))))
def team_printout(self):
'''
Function that prints out all info necessary to print out a table
:return: a string
'''
print self.name, self.elo_home[-1], self.elo_away[-1], self.matches, self.pts, self.wins, self.draws, self.losses, \
self.gp, self.gc, self.gp - self.gc
|
985,046 | a6c374edebbce625e66e427584ea73cb03a7ccdd | n=input("請輸入正整數:")
prime=[2]
for i in range(3,int(n)+1):
test=True
for j in range(len(prime)):
if i%prime[j]==0:
test=False
break
if (prime[j]**2)>i:
break
if test:
prime.append(i)
list1=[]
for i in range(len(n)):
for j in range(i,len(n)):
data=n[i:j+1]
if int(data) in prime:
list1.append(int(data))
q=len(list1)
if len(list1)==0:
print("No prime found")
else:
print("子字串中最大的質數值為:",max(list1))
|
985,047 | ee803fab7a7fffff4b690f78b73f558b3e65dff4 | badIngredients = ["Octinoxate", "Oxybenzone", "Benzophenone-3", "Avobenzone", "benzophenone",
"Cylcopentasiloxane", "Cyclomethicone", "Formaldehyde", "Diazolidinyl", "urea",
"DMDM", "Hydantoin", "Hydroxymethylglycinate", "Methylisothiazolinone", "Octyl",
"methoxycinnamate", "Parabens", "Sodium", "lauryl", "laureth", "sulfate", "SLS", "SLES",
"Butylparaben", "4-methylbenzylidene", "camphor", "Octocrylene", "Para-aminobenzoic", "acid",
"PABA", "Methylparaben", "Ethylparaben", "Propylparaben", "Butylparaben", "Benzylparaben",
"Triclosan"]
numBad = 0
ingredients = input ("Enter ingredients: ")
ingredients = ingredients.replace(",", " ")
ingredientList = ingredients.split(" ")
for i in (range (len (ingredientList))):
for j in (range (len (badIngredients))):
if ingredientList[i].lower() == badIngredients[j].lower():
numBad += 1
print (badIngredients[j])
print (numBad)
|
985,048 | 01eafef3d84c2bd6ed2ebe7dba86846ab55d3a4f | import sys, os, platform
from src.pnc import Environment
from src.imaging_derivs import DataVector
from src.plotting import roi_to_vtx
from src.utils import get_states_from_brain_map
import numpy as np
# %% plotting
import seaborn as sns
import matplotlib.pyplot as plt
from nilearn import plotting
from src.plotting import set_plotting_params
set_plotting_params(format='svg')
figsize = 1.5
# %% Setup project environment
computer = 'macbook'
parc = 'schaefer'
n_parcels = 200
sc_edge_weight = 'streamlineCount'
environment = Environment(computer=computer, parc=parc, n_parcels=n_parcels, sc_edge_weight=sc_edge_weight)
environment.make_output_dirs()
environment.load_parc_data()
# %% get states
bbw_dir = os.path.join(environment.research_data, 'BigBrainWarp', 'spaces', 'fsaverage')
if parc == 'schaefer':
state_brain_map = np.loadtxt(os.path.join(bbw_dir, 'Hist_G2_Schaefer2018_{0}Parcels_17Networks.txt' \
.format(n_parcels)))
elif parc == 'glasser':
state_brain_map = np.loadtxt(os.path.join(bbw_dir, 'Hist_G2_HCP-MMP1.txt'))
state_brain_map = state_brain_map * -1
bin_size = 10
n_states = int(n_parcels / bin_size)
states = get_states_from_brain_map(brain_map=state_brain_map, n_bins=n_states)
# %%
state_brain_map = DataVector(data=state_brain_map, name='state_brain_map')
state_brain_map.rankdata()
state_brain_map.rescale_unit_interval()
# %% plot
cmap = 'viridis'
figwidth = 1
figratio = 0.60
figheight = figwidth * figratio
for hemi in ['left', 'right']:
if hemi == 'left':
vtx_data, plot_min, plot_max = roi_to_vtx(state_brain_map.data + 1e-5, environment.parcel_names, environment.lh_annot_file)
vtx_data = vtx_data.astype(float)
elif hemi == 'right':
vtx_data, plot_min, plot_max = roi_to_vtx(state_brain_map.data + 1e-5, environment.parcel_names, environment.rh_annot_file)
vtx_data = vtx_data.astype(float)
for view in ['lateral', 'medial']:
f = plotting.plot_surf_roi(environment.fsaverage['infl_{0}'.format(hemi)], roi_map=vtx_data,
hemi=hemi, view=view, vmin=0, vmax=1,
bg_map=environment.fsaverage['sulc_{0}'.format(hemi)], bg_on_data=True,
darkness=.5, cmap=cmap, colorbar=False)
f.set_figwidth(figwidth)
f.set_figheight(figheight)
plt.subplots_adjust(0, 0, 1, 1, 0, 0)
for ax in f.axes:
ax.axis('off')
ax.margins(x=0, y=0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
f.savefig(os.path.join(environment.figdir, '{0}_{1}_{2}.png'.format(state_brain_map.name, hemi, view)),
dpi=1000, bbox_inches='tight',
pad_inches=0)
plt.close()
# %% colorbar
f, ax = plt.subplots(1, 1, figsize=(.5, .5))
h = sns.heatmap(np.zeros((5, 5)), vmin=0, vmax=1, cmap=cmap, cbar_kws={"orientation": "vertical"})
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.remove()
cbar = ax.collections[0].colorbar
cbar.set_ticks([])
f.savefig(os.path.join(environment.figdir, 'colorbar_viridis.png'), dpi=300, bbox_inches='tight',
pad_inches=0)
plt.close()
# %% plot states separately
cmap = plt.get_cmap('Pastel1')
figwidth = .5
figratio = 0.60
figheight = figwidth * figratio
x = ([2, 1], [15, 2])
for i in np.arange(len(x)):
states_ij = np.zeros(states.shape)
states_ij[states == x[i][0]] = x[i][1]
for hemi in ['left', 'right']:
if hemi == 'left':
vtx_data, plot_min, plot_max = roi_to_vtx(states_ij, environment.parcel_names, environment.lh_annot_file)
vtx_data = vtx_data.astype(float)
elif hemi == 'right':
vtx_data, plot_min, plot_max = roi_to_vtx(states_ij, environment.parcel_names, environment.rh_annot_file)
vtx_data = vtx_data.astype(float)
for view in ['lateral', 'medial']:
f = plotting.plot_surf_roi(environment.fsaverage['infl_{0}'.format(hemi)], roi_map=vtx_data,
hemi=hemi, view=view, vmin=1, vmax=9,
bg_map=environment.fsaverage['sulc_{0}'.format(hemi)], bg_on_data=True,
darkness=.5, cmap=cmap, colorbar=False)
f.set_figwidth(figwidth)
f.set_figheight(figheight)
f.savefig(os.path.join(environment.figdir, '{0}_{1}_{2}_{3}.png'.format('state', i, hemi, view)), dpi=1000,
bbox_inches='tight',
pad_inches=0)
plt.close()
# %% dummy energy matrix
n_states = 5
indices_lower = np.tril_indices(n_states, k=-1)
figsize = 1.25
f, ax = plt.subplots(1, 1, figsize=(figsize, figsize))
np.random.seed(0)
x = np.random.rand(n_states, n_states)
x[np.eye(n_states) == 1] = np.nan
h = sns.heatmap(x, vmin=0, vmax=1, cmap='Purples', square=True,
cbar_kws={'shrink': 0.8, 'label': 'energy (a.u.)'})
ax.set_title('e')
ax.set_ylabel("Initial states")
ax.set_xlabel("Target states")
cbar = ax.collections[0].colorbar
cbar.set_ticks([])
ax.tick_params(pad=-2.5)
f.savefig(os.path.join(environment.figdir, 'schematic_energy'), dpi=300, bbox_inches='tight',
pad_inches=0.01)
plt.close()
f, ax = plt.subplots(1, 1, figsize=(figsize, figsize))
x = x - x.transpose()
plot_mask = np.zeros((n_states, n_states))
plot_mask[indices_lower] = 1
plot_mask = plot_mask.astype(bool)
h = sns.heatmap(x, mask=plot_mask, vmin=-1, vmax=1, cmap='coolwarm', square=True,
cbar_kws={'shrink': 0.8, 'label': 'energy asymmetry'})
ax.set_title('ed')
ax.set_ylabel("Initial states")
ax.set_xlabel("Target states")
cbar = ax.collections[0].colorbar
cbar.set_ticks([])
ax.tick_params(pad=-2.5)
f.savefig(os.path.join(environment.figdir, 'schematic_energy_asym'), dpi=300, bbox_inches='tight',
pad_inches=0.01)
plt.close()
|
985,049 | 58b2d9a2467bc40b3f7738a1ea9829fb65b350e4 | import csv
import random
from collections import defaultdict
def load_data(filename):
quotes = []
with open(filename, 'rb') as f:
reader = csv.reader(f, delimiter= ';')
for line in reader:
parsedQuote = line[0].split(' ')
author = line[1]
quotes.append([parsedQuote, author])
return quotes
class Game:
def __init__(self):
self.player_1 = None
self.player_1_score = 0
self.player_2 = None
self.player_2_score = 0
self.current_player = None
def switch_player(self):
if self.current_player == self.player_1:
self.current_player = self.player_2
else:
self.current_player = self.player_1
def add_point(self):
if self.current_player == self.player_1:
self.player_1_score += 1
else:
self.player_2_score += 1
def play_game(self, quotes):
# get player information
self.player_1 = raw_input("Enter player 1's name: ")
self.player_2 = raw_input("Enter player 2's name: ")
self.current_player = self.player_1
# game loop
while quotes:
# get the quote
quote_group = quotes.pop()
quote = quote_group[0]
author = quote_group[1]
# choose a random index
random_index = random.randint(0, (len(quote)-1))
missing_word = quote[random_index].lower()
quote[random_index] = "_____"
# guess the word
print "It's your turn, {0}".format(self.current_player)
print "This quote is by {0}: \n {1}".format(author, ' '.join(quote))
guess = raw_input('Guess the missing word: ').lower()
if guess == missing_word:
print "correct! You get a point! You guessed {0}, and the missing word was {1}".format(guess, missing_word)
self.add_point()
elif guess in missing_word:
print "close, but you still get a point! You guessed {0}, and the missing word was {1}".format(guess, missing_word)
self.add_point()
else:
print "Wrong guess. The missing word was: {0}".format(missing_word)
self.switch_player()
exit = raw_input("Type exit to quit: ")
if exit.lower() == "exit":
break
print "All out of quotes!"
print "The final score is: {0}:{1}, {2}:{3}".format(self.player_1, self.player_1_score, self.player_2, self.player_2_score)
def main():
quotes = []
quotes = load_data('randomquotes.csv')
print "Welcome to Guess the word in the quote!"
game = Game()
game.play_game(quotes)
if __name__ == "__main__":
main()
|
985,050 | e4a198d0482bbc029aa293e83885f7cf752e0928 | #Heres that one we looked at in Python Tutor.
houses = ["Eric's house", "Kenny's house", "Kyle's house", "Stan's house"]
# Each function call represents an elf doing his work
def deliver_presents_recursively(houses):
# Worker elf doing his work
if len(houses) == 1:
house = houses[0]
print("Delivering presents to", house)
# Manager elf doing his work
else:
mid = len(houses) // 2
first_half = houses[:mid]
second_half = houses[mid:]
# Divides his work among two elves
deliver_presents_recursively(first_half)
deliver_presents_recursively(second_half)
deliver_presents_recursively(houses)
Here are some simple iterative problems that have a simple recursive solution. I'll have the solution in the other file. DON'T PEEK.
#REVERSE A STRING
def reverse(s):
str = ""
for i in s:
str = i + str
return str
s = "Geeksforgeeks"
print ("The original string is : ",end="")
print (s)
print ("The reversed string(using loops) is : ",end="")
print (reverse(s))
#Now do the same, but recursion.
def reverse_recursion(s):
#your code here.
s = "Geeksforgeeks"
print ("The original string is : ",end="")
print (s)
print ("The reversed string(using recursion) is : ",end="")
print (reverse(s))
#TOTAL CONSONANTS
# Iterative Python3 program to count
# total number of consonants
# Function to check for consonant
def isConsonant(ch):
# To handle lower case
ch = ch.upper()
return not (ch == 'A' or ch == 'E' or
ch == 'I' or ch == 'O' or
ch == 'U') and ord(ch) >= 65 and ord(ch) <= 90
def totalConsonants(string):
count = 0
for i in range(len(string)):
# To check is character is Consonant
if (isConsonant(string[i])):
count += 1
return count
# Driver code
string = "abc de"
print(totalConsonants(string))
def recursiveTotalConsonants(string):
#Your code here.
|
985,051 | 1c440e92369afff3bf0bf45df7bb9918322b3ab0 | # -*- encoding: utf-8 -*-
import redis
import falcon
from .exception import NotRedisException
class MiddleWare(object):
def __init__(self, conn):
self.conn = conn
def process_resource(self, req, resp, resource, params):
path = req.path
content = req.stream.read()
key = '{}:{}'.format(path, content)
if isinstance(self.conn, redis.StrictRedis):
data = self.conn.get(key)
if data:
resp.body = data
resp.status = falcon.HTTP_200
req.context['has_cached'] = True
def process_response(self, req, resp, resource, req_succeeded):
if req.context.get('cache'):
path = req.path
content = req.stream.read()
key = '{}:{}'.format(path, content)
value = resp.body
ttl = req.context.get('cache_ttl', 600)
if isinstance(self.conn, redis.StrictRedis):
self.conn.set(key, value, ex=ttl)
else:
raise NotRedisException()
class Cache(object):
def __init__(self, host, port=6379, db=0):
self.pool = redis.ConnectionPool(host=host, port=port, db=db)
self.conn = redis.StrictRedis(connection_pool=self.pool)
@staticmethod
def cache(ttl=600):
def wrap1(func):
def wrap2(cls, req, resp, *args, **kwargs):
if req.context.get('has_cached'):
pass
else:
func(cls, req, resp, *args, **kwargs)
req.context['cache'] = True
req.context['cache_ttl'] = ttl
return wrap2
return wrap1
@property
def middleware(self):
return MiddleWare(self.conn)
|
985,052 | 144081281bd53e7bb5d9655a02a174fa68fddb3b | # coding: utf-8
import csv
fichier1 = "concordia1.csv"
f1 = open(fichier1)
devoir = csv.reader(f1)
# Nombre de caractères dans les titres
longTitre = len(devoir[2])
print(longTitre)
# Maîtrise ou doctorat
for these in devoir:
if "M." in devoir[6]:
these = "maîtrise"
else:
these = "doctorat"
print(these)
|
985,053 | bfd7ae4cdb24b37daab99bf9f56d9b5e9c2b13d3 | #! /usr/bin/env python
from MDAnalysis import *
from math import *
import argparse
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from scipy import interpolate
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Make triangle")
parser.add_argument("-t",
action="store", nargs='?',
required=True, dest="traj",
help="specifies an .dcd file created using the '-pbc mol' option")
parser.add_argument("-p",
action="store", nargs='?',
required=True, dest="psf",
help="path of a sample psf file")
parser.add_argument("-l",
action="store", nargs='?', default=float(0),
required=False, dest="lowerBond", type=float,
help="the lower bond of your scale bar")
parser.add_argument("-u",
action="store", nargs='?', default=float(1),
required=False, dest="upperBond", type=float,
help="the upper bond of your scale bar")
parser.add_argument("-r",
action="store", nargs='?', default="head",
required=False, dest="head",
help="the upper bond of your scale bar")
parser.add_argument("-o",
action="store", nargs='?', default="../Analysis/out.pov",
required=False, dest="output",
help="output filename")
parser.add_argument("-i",
action="store", nargs='?', default="../Analysis/ScalerZ.dat",
required=False, dest="input",
help="input` filename")
args = parser.parse_args()
traj_filename = args.traj
psf_filename = args.psf
colorLow = args.lowerBond
colorHigh = args.upperBond
headerFile = args.head
output_filename = args.output
input_filename = args.input
max_z = 350
shift = 10
# Read the trj and gro file
u = Universe(psf_filename, traj_filename)
# Obtain initial information form gro and trj files
print "Total number of atoms = " + str(len(u.atoms))
num_frames = u.trajectory.numframes
print "number of frames " + str(num_frames)
frame = num_frames / 2
# frame = 1
f1 = open('/home/ramezani/midway/lc/' + headerFile, 'r')
f2 = open(output_filename, 'w+')
head = f1.read()
f1.close()
f2.write(head)
f1 = open(input_filename, 'r')
z = []
S = []
for line in f1:
line = line.strip()
if line != "":
columns = line.split()
if '#' in columns[0]:
check = 'yes'
else:
zz = float(columns[0])
SS = float(columns[1])
S.append(SS)
z.append(zz)
y = np.zeros((len(z)), dtype='float')
for ii in range(0, len(z)):
y[ii] = S[ii]
zmin = z[0]
zmax = z[len(z) - 1]
delta = 2
f = interpolate.interp1d(z, y, kind='linear')
znew = np.arange(zmin, zmax, delta)
ynew = f(znew)
# plt.plot(z, y, 'o', znew, ynew, '-')
# plt.show()
R = 0.0
G = 0.0
B = 0.0
T = 1.0
Ra = 1.0
for curr_frame in xrange(0, frame + 1):
if curr_frame != 0:
trj = u.trajectory.next()
else:
trj = u.trajectory[0]
if curr_frame < frame:
continue
else:
box = u.dimensions[0:3]
coor = u.selectAtoms("name NY1").residues.coordinates()
for i in xrange(0, len(coor)):
pos1 = coor[i]
nz = int((pos1[2] - zmin - delta / 2) / delta)
if nz >= len(ynew):
nz = len(ynew) - 1
if nz < 0:
nz = 0
yy = ynew[nz]
yy = (yy - colorLow) / (colorHigh - colorLow)
B = exp(-(yy * yy) / 0.1)
G = exp(-(yy - 0.5) * (yy - 0.5) / 0.1)
R = exp(-(yy - 1.0) * (yy - 1.0) / 0.1)
VV = sqrt(R * R + G * G + B * B)
R /= VV
G /= VV
B /= VV
R += 0.5
G += 0.5
B += 0.5
f2.write(
'sphere { < %5.3f, %5.3f, %5.3f>, %5.3f pigment {color rgb<%5.3f, %5.3f, %5.3f>} finish {phong .8 }} \n' % (
pos1[0], pos1[1], pos1[2], Ra, R, G, B))
# Write the pov-ray file for F9
coor_f_c = u.selectAtoms("type CCF3 or type CCF2").coordinates()
coor_f_f = u.selectAtoms("type FCF3 or type FCF2").coordinates()
coor_f = u.selectAtoms("resname F9").coordinates()
f2.write('// F \n')
R = 1.0
G = 0.6
B = 0.6
r = 1.0
for i in xrange(0, len(coor_f) - 29, 29):
flag = False
for j in range(0, 29):
pos1 = coor_f[i + j]
if pos1[2] > max_z:
flag = True
break
if flag:
for j in range(0, 29):
coor_f[i + j][2] = coor_f[i+j][2] - box[2]
flag = False
for i in xrange(0, len(coor_f)):
pos1 = coor_f[i]
f2.write(
'sphere { < %5.3f, %5.3f, %5.3f>, %5.3f pigment {color rgb<%5.3f, %5.3f, %5.3f>} finish {specular 0.7 roughness 0.03}} \n' % (
pos1[0], pos1[1], pos1[2], r, R, G, B))
f2.close()
|
985,054 | 4b1f7de3ef80cbedc2bfa249e1e708ceba6d39de | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 12:26:49 2020
@author: Mr.Vakili
"""
import numpy as np
import cv2
file=cv2.VideoCapture('hw.avi')
while True:
#start getting frames
_,frame = file.read()
frame_gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
frame_blur=cv2.GaussianBlur(frame_gray,(5,5),0)
#canny
edge_canny = cv2.Canny(frame_gray,50,200)
#sobel
#blurred
edge_x_blur=cv2.Sobel(frame_blur,cv2.CV_64F,1,0,ksize=5)
edge_y_blur=cv2.Sobel(frame_blur,cv2.CV_64F,0,1,ksize=5)
edge_sobel_blur=cv2.Sobel(frame_blur,cv2.CV_8U,1,1,ksize=5)
#sobel
#org
edge_x=cv2.Sobel(frame_gray,cv2.CV_64F,1,0,ksize=5)
edge_y=cv2.Sobel(frame_gray,cv2.CV_64F,0,1,ksize=5)
edge_sobel=cv2.Sobel(frame_gray,cv2.CV_8U,1,1,ksize=5)
#prewitt
#blur
xkernel=np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
ykernel=np.array([[-1,0,1],[-1,0,1],[-1,0,1]])
xedge_prewitt_blur=cv2.filter2D(frame_blur,-1,xkernel)
yedge_prewitt_blur=cv2.filter2D(frame_blur,-1,ykernel)
#prewitt
#org
xedge_prewitt=cv2.filter2D(frame_gray,-1,xkernel)
yedge_prewitt=cv2.filter2D(frame_blur,-1,ykernel)
#show frames to produce video
cv2.imshow("CANNY",edge_canny)
cv2.imshow('Sobel_ORG',edge_sobel)
cv2.imshow('Sobel_BLUR',edge_sobel_blur)
cv2.imshow('PrewittX_BLUR',xedge_prewitt_blur)
cv2.imshow('PrewittY_BLUR',yedge_prewitt_blur)
cv2.imshow('PrewittX_ORG',xedge_prewitt)
cv2.imshow('PrewittY_ORG',yedge_prewitt)
key=cv2.waitKey(20)
if key == ord('e'):
break
else:
pass
cam.release()
cv2.destroyAllWindows()
|
985,055 | d0b211f544e5c79f4d96045189e50af86e1bb27b | from django.db import models
from django.conf import settings
from django_extensions.db.models import TimeStampedModel
from .constants import ContactType
class PhoneBook(TimeStampedModel):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=255, blank=True, null=True)
def __str__(self):
return self.title
class Contact(TimeStampedModel):
first_name = models.CharField(max_length=255, blank=True, null=True)
last_name = models.CharField(max_length=255, blank=True, null=True)
emails = models.ManyToManyField("ContactEmail", blank=True)
contact_numbers = models.ManyToManyField("ContactNumber", blank=True)
favourite = models.BooleanField(default=False)
phonebook = models.ForeignKey(PhoneBook, on_delete=models.CASCADE)
def __str__(self):
return "{} {}".format(self.first_name, self.last_name)
class ContactEmail(TimeStampedModel):
email = models.CharField(max_length=255, blank=True, null=True)
def __str__(self):
return self.email
class ContactNumber(TimeStampedModel):
number = models.CharField(max_length=11, blank=True, null=True)
contact_type = models.CharField(max_length=10, choices=ContactType.CHOICES)
def __str__(self):
return self.number
class ContactGroup(TimeStampedModel):
name = models.CharField(max_length=255)
contacts = models.ManyToManyField(Contact, blank=True)
def __str__(self):
return self.name
class CallLog(TimeStampedModel):
contact = models.ForeignKey("Contact", on_delete=models.CASCADE)
duration = models.IntegerField(default=0)
def __str__(self):
return "Call Log for {}".format(self.contact)
|
985,056 | da8629b5d1de7b4297b1417924cd1015c5d2da2b | import os
import numpy as np
import torch
from torch import Tensor
from torch.autograd import Variable
def logit(x):
eps = 1e-6
x = x.clamp(eps, 1. - eps)
p = torch.reciprocal(x) - 1.
return -torch.log(p)
def logit_logit_gate(s, g):
"""
:param s:
:param g:
:return: logit(sigmoid(m) * sigmoid(g))
"""
s = torch.sigmoid(s) * torch.sigmoid(g)
return logit(s)
def count_parameters(model):
total = sum(p.numel() for p in model.parameters())
trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total, trainable
def find_in_dir(dirname):
return [os.path.join(dirname, fname) for fname in sorted(os.listdir(dirname))]
def to_numpy(x) -> np.ndarray:
if isinstance(x, np.ndarray):
return x
elif isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
else:
raise ValueError('Unsupported type')
return x
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') == 0:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
def compute_mask_class(y_true: Tensor):
"""
Computes index [0;4] for masks. 0 - <20%, 1 - 20-40%, 2 - 40-60%, 3 - 60-80%, 4 - 80-100%
:param y_true:
:return:
"""
y_true = y_true.detach().cpu()
batch_size = y_true.size(0)
num_classes = y_true.size(1)
if num_classes == 1:
y_true = y_true.view(batch_size, -1)
elif num_classes == 2:
y_true = y_true[:, 1, ...].contiguous().view(batch_size, -1) # Take salt class
else:
raise ValueError('Unknown num_classes')
img_area = float(y_true.size(1))
percentage = y_true.sum(dim=1) / img_area
class_index = (percentage * 4).round().byte()
return class_index
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
|
985,057 | 6f4f61dceea274151e64fc511b935abe11226b70 | import functions
# Input Data
Person1 = [['9:00', '10:30'], ['12:00', '13:00'], ['16:00', '18:00']]
Person1_Avail = [['9:00', '20:00']]
Person2 = [['10:00', '11:30'], ['12:30', '14:30'], ['14:30', '15:00'], ['16:00', '17:00']]
Person2_Avail = [['10:00', '18:30']]
Duration = 30
def Arrange_Meeting(Person1, Person1_Avail, Person2, Person2_Avail):
# includes the start and end time boundaries for each person
Person1_busy = functions.Include_boundaries(Person1, Person1_Avail)
Person2_busy = functions.Include_boundaries(Person2, Person2_Avail)
Person1_free = functions.Find_free_slots(Person1, Duration)
Person2_free = functions.Find_free_slots(Person2, Duration)
result1 = functions.Find_common_slots(Person1_free, Person2_free, Duration)
result2 = functions.Find_common_slots(Person2_free, Person1_free, Duration)
return result1 + result2
print(Arrange_Meeting(Person1, Person1_Avail, Person2, Person2_Avail))
|
985,058 | c78de733bce2dd3aed949e5396b68f0a0fd75601 | ''' PROBLEM STATEMENT as taken from the following Hacker Rank link
https://www.hackerrank.com/challenges/birthday-cake-candles/problem
You are in-charge of the cake for your niece's birthday and have decided the cake will have one
candle for each year of her total age. When she blows out the candles, she’ll only be able to blow
out the tallest ones. Your task is to find out how many candles she can successfully blow out.
For example, if your niece is turning years old, and the cake will have candles of height 3,2,1,3,
she will be able to blow out candles successfully, since the tallest candle is of height and there
are such candles.
Input Format
The first line contains a single integer, , denoting the number of candles on the cake.
The second line contains space-separated integers, where each integer describes the height of candle .
Output Format
Print the number of candles that can be blown out on a new line.
'''
#MY ALGORITHM
n = input('Age of your neice: ')
candles = input('height of candles: ').strip().split()
# print(candles)
def birthdayCakeCandles(candles):
for i in range(0,len(candles)-1):
for j in range(0,len(candles)-1-i):
if candles[j]>candles[j+1]:
candles[j], candles[j+1]= candles[j+1], candles[j]
return candles
sort_candles= birthdayCakeCandles(candles)
print(sort_candles.count(max(sort_candles)))
''' A BETTER APPROACH
I saw some people on the discussion board using the function map to do this task. So after, I had
written my own algorithm to make sure that I understood it, I tried my had at map function. It is a lot of funself.
'''
n = input()
candles = list(map(int, input().strip().split()))
print(candles.count(max(candles)))
|
985,059 | 606611a0448348e41cb5a7689d3246e268550e6e |
from django.contrib.auth.decorators import user_passes_test
def check_user(user):
return not user.is_authenticated
user_logout_required=user_passes_test(check_user, '/', None)
def auth_user_should_not_access(viewfunc):
return user_logout_required(viewfunc)
|
985,060 | d85e198580ce9d97e08de86a8422cf1918c5c360 | import random
import score
#define deck
class Deck():
def __init__(self):
self.card_list=list()
def deck_generator(self):
suits=['Heart', 'Diamond', 'Club', 'Spade']
cards=['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'K', 'Q', 'J', 'Ace']
for suit in suits:
for card in cards:
self.card_list.append([suit, card])
random.shuffle(self.card_list)
#get one card from deck
def get_card(self):
return self.card_list.pop()
#define player
class player():
def __init__(self, player_deck: list, money= None, score= None):
if money is None:
self.money=100
else:
self.money=money
if score is None:
self.score=0
self.player_deck = player_deck
def hit(self, new_card: list):
self.player_deck.append(new_card)
def stand(self):
return score.get_score(self.player_deck)
def display_player_deck(self):
print ("You have: ", end='')
for c in self.player_deck:
print (c, end='\t')
def check_lose(self):
if score.get_score(self.player_deck)>21:
return True
return False
def start(player, dealer_value, deck):
print ("You have: £%s " %(player.money))
print ("1.bet 2.quit")
choice1 = str()
while choice1 in ['1', '2']:
choice1=input()
bet=0
if choice1=='1':
while bet not in [10, 20, 30]:
bet=int(input("Choose bet money(10, 20, 30): "))
if bet>player.money:
print ("You can't bet more money than you have")
bet=0
print ("Choose bet money: ", bet)
if choice1=='2':
print ("Quit game")
quit()
#infinite loop(as occur every turn)
while True:
player.display_player_deck()
player.score=player.stand()
print ("Your score is: %s" %(player.score))
print ("1. hit 2.stand")
choice2=input()
if choice2=='1':#choice=hit
player.hit(deck.get_card())
if player.check_lose():#player lose
player.display_player_deck()
print ("Your score is : %s" %(player.stand()))
print ("You lose! You lose your bet money.")
player.money-=bet
return player.money
if choice2=='2':#choice=stand
if player.stand()<dealer_value and dealer_value<=21:#player lose
print ("dealer value: %s" %(dealer_value))
print ("Your total value: %s" %(player.stand()))
print ("You lose!")
print ("You lose your bet money.")
player.money-=bet
return player.money
elif player.stand==dealer_value:
print("dealer value: %s" % (dealer_value))
print("Your total value: %s" % (player.stand()))
print("You draw")
print("You do not lose and earn money.")
elif dealer_value>21:#dealer lose
print("dealer value: %s" % (dealer_value))
print("Your total value: %s" % (player.stand()))
print ("You win! you get bet money.")
player.money+=bet
return player.money
else:#player win
print ("dealer value: %s" % (dealer_value))
print ("Your total value: %s" % (player.stand()))
print ("You win!")
print ("You get bet money")
player.money+=bet
return player.money
|
985,061 | f80f994f50b51f896f998f56fdc05a5db98fbe74 | """Create Database for IMF-data. Update Database with IMF-data
====================================================================
create_db - creates sqlite3 database file (param name - path to new db-file);
it reads needed indicators list from csv-file (param 'indi_file' - path to file, file must have right format)
and needed country list from txt-file (param 'country_file' - path to file, file must have right format)
function create two service table: INDICATORS with indicators list for next update db,
and COUNTRIES with list of needed countries.
then create INDICATORS_FULL view for reading all data in database at once
then run 'update' function for read all needed indicators for all needed countries from IMF web source
started from 1970 year
====================================================================
update_db - read list of needed indicators and countries from existing IMF-database (param 'db_name' - path to sqlite3-file),
then read IMF-database from Internet started from year in 'start' param,
and save it in given sqlite3 database, using UPSERT (append new data and update existing)
return all readed data in one pandas DataFrame
====================================================================
get_countryes - read countries list from IMF-database on Internet,
list of datasets read from existing sqlite3 database (param 'db_name' - path)
list of needed ountries read from txt-file (param 'country_txt_file' - path)
make and return pandas DataFrame with intersection countries lists from Internet and local file,
not save results in sqlite3 file
"""
import os.path
import pandas as pd
import sqlalchemy as sa
import datetime as dt
from COMMON import pandas_sql as pds
from COMMON import readers as cmm
_db_indicators=cmm.work_db_IMF
def update_db(db_name=_db_indicators, start=1950, end=dt.datetime.now().year, write_db=True):
"""update existing sqlite3 local database with data readed from IMF Internet database"""
def read_indicators(pdfI=None, coutries=[], ctry_chunksize=50, write_db=True):
print('UPDATE IMF: Start reading {0} indicators'.format(pdfI.shape[0]))
#dct_not_data=dict()
lst_ret=[]
for k, v in pdfI.iterrows():
lst_pdf=list()
lst_not_country=list()
tbl_name=k #'{0}_{1}'.format(k, freq)
print('UPDATE IMF ({2}-{3}): reading {0}, tDS={1}\t'.format(k, v['Dataset'], start, end), end='... ')
for cs in cmm.iterate_group(coutries, ctry_chunksize):
try:
pdf = pds.read_imf(strDataSetID=v['Dataset'], indiID=k, countryCode=cs,
frequency=v['Freq'], startDate=start, endDate=end)
lst_pdf.append(pdf)
lst_not_country+=pdf.not_country
#print(pdf.name, pdf.shape, len(pdf.not_country))
except ValueError as e:
lst_not_country += cs
#print(e, k, 0, 50)
try:
pdfC=pds.DataFrameDATA(pd.concat([ppdf for ppdf in lst_pdf if not ppdf.empty]))
pdfC.name=tbl_name
#dct_not_data.update({'IND_NOT':tbl_name, 'NOT_DATA':lst_not_country})
print('read {name},\tlen {len_df},\tnot data countries - {nc}'.format(name=pdfC.name,
len_df=pdfC.shape[0],
nc=len(lst_not_country)), end='... ')
if write_db:
print('write to DB...', end='')
lstWrite=[c for c in pdfC.columns.tolist() if c !='mult']
pdfC[lstWrite].to_sql(pdfC.name, coni, if_exists='upsert')
cmm.write_status(db_name, k, pdfC.shape[0], mult=pdfC['mult'].unique()[0])
print('done', end='\n')
pdfC['INDI']=k
lst_ret.append(pdfC)
#print(dct_not_data)
except ValueError as e:
print(e, 'not data for ', k, v['Dataset'], len(cs))
return pd.concat(lst_ret)
coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))
# pdfIndi=pd.read_sql('select * from INDICATORS where LastUpdateDateA is NULL', coni, index_col='Code')
pdfIndi = pd.read_sql('select * from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), coni, index_col='Code')#.iloc[:40]
pdfCountry = pd.read_sql('select * from {COUNTRY_NAME}'.format(COUNTRY_NAME=cmm.strCOUNTRY_db_name), coni, index_col='id')
country_list = pdfCountry.index.tolist()
print('UPDATE IMF: reading {0} countries'.format(len(country_list)))
pdfQ=read_indicators(pdfI=pdfIndi.sort_index(), coutries=country_list, write_db=write_db)
print('=' * 50)
print('UPDATE IMF: all done')
return pdfQ
def create_db(name=_db_indicators,
indi_file=os.path.join('Source', 'codes_need.csv'),
country_file=os.path.join('Source', 'work_countries.txt')):
""" Create local sqlite3 database file with data readed from IMF Internet database """
def create_indi_country(pdfI, con, mess, db_name, freq):
if pdfI.shape[0]==0:
return
print('+' * 50, '{} WORKS'.format(mess), '+' * 50)
pdfI.to_sql(cmm.strINDI_db_name, con, if_exists='replace')
print('CREATE IMF.INDICATORS table for {} indicators'.format(pdfI.shape[0]))
pdfC = get_countryes(db_name=db_name, country_txt_file=country_file)
pdfC.to_sql(cmm.strCOUNTRY_db_name, con=con, if_exists='replace')
print('CREATE IMF.COUNTRIES for {0} countries.'.format(pdfC.shape[0]))
update_db(db_name=db_name, start=1970, end=2000)
update_db(db_name=db_name, start=1999)
cmm.create_views(db_name, freq=freq)
pdf = cmm.read_indicators_from_csv(indi_file)
print(indi_file)
pdfQ = pdf[pdf['Freq']=='Q']
pdfA = pdf[pdf['Freq'] == 'Y']
pdfM = pdf[pdf['Freq'] == 'M']
#pdfC = cmm.read_countries(file_name=country_file)
nameA=cmm.db_name2annu(name)
nameM = cmm.db_name2annu(name, suff='_M')
coni = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=name))
coniA = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameA))
coniM = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameM))
create_indi_country(pdfQ, coni, 'QUARTERLY', name, freq='Q')
create_indi_country(pdfA, coniA, 'ANNUAL', nameA, freq='A')
create_indi_country(pdfM, coniM, 'MONTHLY', nameM, freq='M')
def get_countryes(db_name=_db_indicators, country_txt_file=os.path.join('Source', 'work_countries.txt')):
""" Read and return counties list as two-chars code <-> Country's name from IMF Internet database"""
imf = cmm.READ_DB(db_name=None)
country_list = cmm.read_countries(file_name=country_txt_file)
print('CREATE IMF: reading countries from all neede datasets...', end=' ')
coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))
dbSETS=pd.read_sql('SELECT DISTINCT Dataset from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), con=coni)
cntrl=list()
for k, d in dbSETS.iterrows():
try:
cntrl.append(pd.DataFrame(imf.get_datastructure_list(d['Dataset'])['Geographical Areas']).set_index('CL_AREA_{}'.format(d['Dataset'])))
except KeyError:
pass
# pdfC = pd.concat([pd.DataFrame(imf.get_datastructure_list(d['Dataset'])['Geographical Areas']).set_index('CL_AREA_{}'.format(d['Dataset'])) for k, d in dbSETS.iterrows() ])
pdfC = pd.concat(cntrl)
pdfC=pdfC[pdfC.index.isin(country_list)]
pdfC = pdfC[~pdfC.index.duplicated()]
pdfC.index.name='id'
pdfC=pdfC.rename(columns={'Geographical Areas':'Country'})
print('done reading countries', end='\n')
return pdfC
#print(dbSETS)
import sqlite3
if __name__ == "__main__":
create_db(name=_db_indicators)
#con=sqlite3.connect(_db_indicators)
#pdfT=pd.read_sql('select * from INDICATORS_FULL', index_col='id', con=con)
#update_db(db_name=_db_indicators, start=2010)
#update_db(db_name='IMF1_M.sqlite3', start=1999, frequency='M')
#pdfTU = pd.read_sql('select * from INDICATORS_FULL', index_col='id', con=con)
#print('before {0}, after {1}'.format(pdfT.shape[0], pdfTU.shape[0]))
#print(pdfTU.loc[pdfTU.index.duplicated(), :])
#print(update_db(db_name=_db_indicators))
#cmm.create_views(db_name=db_indicators)
#print(get_countryes())
#create_views(db_name=db_indicators, freq='Q')
|
985,062 | c5fc4c91096c124aa3d9efe70a7e2e0e168e4632 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import imp
import os
import unittest
__version__ = '1.2.0'
__version_info__ = (1, 2, 0)
def _import_from(mod, path, mod_dir=None):
"""
Imports a module from a specific path
:param mod:
A unicode string of the module name
:param path:
A unicode string to the directory containing the module
:param mod_dir:
If the sub directory of "path" is different than the "mod" name,
pass the sub directory as a unicode string
:return:
None if not loaded, otherwise the module
"""
if mod_dir is None:
mod_dir = mod
if not os.path.exists(path):
return None
if not os.path.exists(os.path.join(path, mod_dir)):
return None
try:
mod_info = imp.find_module(mod_dir, [path])
return imp.load_module(mod, *mod_info)
except ImportError:
return None
def make_suite():
"""
Constructs a unittest.TestSuite() of all tests for the package. For use
with setuptools.
:return:
A unittest.TestSuite() object
"""
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes():
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
def test_classes():
"""
Returns a list of unittest.TestCase classes for the package
:return:
A list of unittest.TestCase classes
"""
# If we are in a source folder and these tests aren't installed as a
# package, we want to load asn1crypto from this source folder
tests_dir = os.path.dirname(os.path.abspath(__file__))
asn1crypto = None
if os.path.basename(tests_dir) == 'tests':
asn1crypto = _import_from(
'asn1crypto',
os.path.join(tests_dir, '..')
)
if asn1crypto is None:
import asn1crypto
if asn1crypto.__version__ != __version__:
raise AssertionError(
('asn1crypto_tests version %s can not be run with ' % __version__) +
('asn1crypto version %s' % asn1crypto.__version__)
)
from .test_algos import AlgoTests
from .test_cms import CMSTests
from .test_crl import CRLTests
from .test_csr import CSRTests
from .test_init import InitTests
from .test_keys import KeysTests
from .test_ocsp import OCSPTests
from .test_pem import PEMTests
from .test_pkcs12 import PKCS12Tests
from .test_tsp import TSPTests
from .test_x509 import X509Tests
from .test_util import UtilTests
from .test_parser import ParserTests
from .test_core import CoreTests
return [
AlgoTests,
CMSTests,
CRLTests,
CSRTests,
InitTests,
KeysTests,
OCSPTests,
PEMTests,
PKCS12Tests,
TSPTests,
UtilTests,
ParserTests,
X509Tests,
CoreTests
]
|
985,063 | 8d52ec3a77ac63b6ef6bd2d857105227b7d05f22 | #
#
#
def pic_speed(v):
return (77385000.0/2)/(16*(v+1))
def ax12_speed(v):
return 2000000.0/(v+1)
if __name__ == "__main__":
pairs = []
# generate all pairs
for pv in range(0,256):
for av in range(0,256):
p_speed = pic_speed(pv)
a_speed = ax12_speed(av)
pairs.append( (pv, av, p_speed, a_speed, abs(p_speed - a_speed)) )
# now compute min
min_val = pairs[0][4]
min_val_index = 0
for i in range(1,len(pairs)):
if pairs[i][4] < min_val:
min_val = pairs[i][4]
min_val_index = i
print "Minimum Error At:"
#print pairs[min_val_index]
print "AX12 Constant", pairs[min_val_index][1]
print "dsPIC Constant", pairs[min_val_index][0]
print
print
print "Other possible values with error less than 1%"
print "----------------+-----------------+-----------"
print " AX12 | dsPIC |"
print " Speed Value | Speed Value | Error (%)"
print "----------------+-----------------+-----------"
for i in range(0,len(pairs)):
(pv, av, p_speed, a_speed, diff) = pairs[i]
if p_speed > a_speed:
ratio = p_speed / a_speed
else:
ratio = a_speed / p_speed
if (ratio < 1.01)and(ratio > 0.99):
print " %8d %4d | %8d %4d | %5.2f" % (int(a_speed), av, int(p_speed), pv, abs(1-ratio)*100)
#print pairs[i], ratio
|
985,064 | 01be98d443879a307589b0d0f931c08ed1ae63c0 | #! /usr/bin/env python
# coding:utf-8
print "How old are you ?"
age = raw_input()
print " You are %r years old." %(age) |
985,065 | 8a3c4b34098c75fc8176e9b8672d1b89eaf29dfa | import os
import configuration
class remove_user():
def __init__(self, user):
self.user = user
def remove(self):
os.system("sudo userdel %s" % self.user)
print "deleted user"
os.system("sudo rm -r /home/%s" % self.user)
print "Deleted home dir"
os.system("sudo groupdel %s-group" % self.user)
print "Deleted group"
# removes old users from sshd_config, needs amount of users left in db
# OBS THIS IS NOT WORKING, WAS JUST AN IDEA
def fix_ssh_config(self, amount):
# Create blank(defaulted) ssh config
with open("/etc/sshd/sshd_config", 'w') as f:
f.writelines(configuration.ssh_config)
f.close()
#create new Match Group for every user still left in database
for owner in amount:
owner.name
with open("/etc/ssh/sshd_config", 'a') as f:
f.writelines(configuration.sshd % (self.owner, self.owner,))
f.close()
# Example usage of remove users function
#for x in b:
# remove_user(x).remove()
|
985,066 | d28fb28a41a9bc8292c60e513b50de465f28e009 | #!/usr/bin/env python
import datetime
import errno
import logging
import os
import pwd
import random
import signal
import subprocess
import sys
import time
from time import sleep
from scapy.all import sr1, TCP, IP, Raw, hexdump, sr, send, conf, L3RawSocket, rdpcap, Scapy_Exception
from scapy_http.http import HTTPResponse
from s2e_utils import *
# a hotfix to scapy TCP answer function
def myanswers(self, other):
if not isinstance(other, TCP):
return 0
# RST packets don't get answers
if other.flags.R:
return 0
# We do not support the four-way handshakes with the SYN+ACK
# answer split in two packets (one ACK and one SYN): in that
# case the ACK will be seen as an answer, but not the SYN.
if self.flags.S:
# SYN packets without ACK are not answers
if not self.flags.A:
return 0
# SYN+ACK packets answer SYN packets
if not other.flags.S:
return 0
if conf.checkIPsrc:
if not ((self.sport == other.dport) and
(self.dport == other.sport)):
return 0
# Do not check ack value for SYN packets without ACK
#if not (other.flags.S and not other.flags.A) \
# and abs(other.ack - self.seq) > 2:
# return 0
# Do not check ack value for RST packets without ACK
if self.flags.R and not self.flags.A:
return 1
#if abs(other.seq - self.ack) > 2 + len(other.payload):
# return 0
return 1
TCP.answers = myanswers
from z3 import *
set_param('model_compress', False)
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
#consoleHandler = logging.StreamHandler()
#logger.addHandler(consoleHandler)
fileHandler = logging.FileHandler("probe_dpi.log", mode='w')
logger.addHandler(fileHandler)
MAX_INT = 9999999
START_FROM = 1
END_BY = MAX_INT
# time wait for server's reply
# for localhost use a small value such as 0.5s
# for remote host use a larger value such as 3s
TIME_WAIT_SERVER_ACK = 2
# wait additional 0.5s between packets
INTERVAL_BETWEEN_PACKET = 0.5
# Because the GFW blocks a 3-tuple (client IP, server IP, server Port) for 90 seconds
# We shouldn't use the same server again within 90 seconds
GFW_TIME_INTERVAL = 100
# Wait 3s after sending sensitive keyword for any RST packets
GFW_TIME_WAIT_RST = 3
LOCAL_TIME_INTERVAL = 0
PROJECT_DIR = "/home/alan/Work/s2e/s2e/projects/tcp"
PCAP_DIR = "./pcaps"
APACHE_LOG_PATH = "/var/log/apache2/"
SNORT_LOG_PATH = "/var/log/snort/"
BRO_LOG_PATH = "."
NETFILTER_LOG_PATH = "../logs"
apache_log_file_name = None
snort_log_file_name = None
bro_log_file_name = None
netfilter_log_file_name = None
SYN = 0x02
RST = 0x04
ACK = 0x10
FIN = 0x01
TCP_NO_SOCK = 0
TCP_ESTABLISHED = 1
TCP_CLOSE_WAIT = 8
TCP_LISTEN = 10
TCP_NEW_SYN_RECV = 12
TCP_SYN_RECV = 3
TCP_FLAGS_LST = {
'SYN': SYN,
'RST': RST,
'ACK': ACK,
'FIN': FIN,
'RSTACK': RST | ACK,
'FINACK': FIN | ACK
}
# SERVER_IP = '183.131.178.75'
# #SERVER_IP = '127.0.0.1'
# SERVER_PORT = 80
# #SERVER_PORT = 5555
SERVER_IP = '127.0.0.1'
#SERVER_IP = '172.20.0.2'
SERVER_PORT = 80
#SERVER_PORT = 5555
conf.L3socket=L3RawSocket
#HTTP_REQ = 'GET /search.php?keyword=ultrasurf HTTP/1.1\r\nHost: www.whatever.com\r\n\r\n'
HTTP_REQ_PREFIX = 'GET /'
HTTP_REQ_SUFFIX = '# HTTP/1.1\r\nHost: local_test_host\r\n\r\n'
BAD_KEYWORD = 'ultrasurf'
# error code
ERR_NO_SYNACK = 1
ERR_UNSOLVABLE = 2
ERR_NO_PCAP = 3
ERR_BAD_PCAP = 4
ERR_UNSOLVABLE2 = 5
server_list = []
dump_pcaps = False
tcpdump_interface = 'any'
args = None
MOD32 = 2**32
params = {}
def add(a, b):
return (a + b) % MOD32
def sub(a, b):
return (a - b) % MOD32
def before(a, b):
if abs(a - b) > 2**31:
if a < b:
return False
else:
return True
else:
if a < b:
return True
else:
return False
def after(a, b):
return before(b, a)
def check_log_files():
global apache_log_file_name, snort_log_file_name, bro_log_file_name, netfilter_log_file_name
apache_log_file_name = os.path.join(APACHE_LOG_PATH, 'access.log')
logger.info("Apache log file: %s" % apache_log_file_name)
assert os.path.isfile(apache_log_file_name), "Cannot find apache log."
files = glob.glob(SNORT_LOG_PATH + '/snort.*')
assert files, "Cannot find snort log."
snort_log_file_name = max(files, key=os.path.getctime)
logger.info("Snort log file: %s" % snort_log_file_name)
bro_log_file_name = os.path.join(BRO_LOG_PATH, 'notice.log')
logger.info("Bro log file: %s" % bro_log_file_name)
# bro log is generated after detection of a bad keyword
#assert os.path.isfile(bro_log_file_name), "Cannot find bro log."
files = glob.glob(NETFILTER_LOG_PATH + '/netfilter.pcap.*')
assert files, "Cannot find netfilter log."
netfilter_log_file_name = max(files, key=os.path.getctime)
logger.info("Netfilter log file: %s" % netfilter_log_file_name)
def int2hexarr(num):
v = []
for i in range(4):
v.append(num % 256)
num /= 256
return v
def dump(pkt):
if IP not in pkt or TCP not in pkt:
return
logger.info("%s:%d -> %s:%d" % (pkt[IP].src, pkt[TCP].sport, pkt[IP].dst, pkt[TCP].dport))
logger.info("SEQ: %08x" % pkt[TCP].seq)
logger.info("ACK: %08x" % pkt[TCP].ack)
logger.info("Data offset: %d" % (pkt[TCP].dataofs * 4))
logger.info("TCP flags: %s" % (pkt[TCP].flags or 'None'))
logger.info("Window: %d" % (pkt[TCP].window))
logger.info("Checksum: %04x" % (pkt[TCP].chksum or 0))
logger.info("Urgent pointer: %04x" % (pkt[TCP].urgptr or 0))
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
enable_other_packets()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def exception_handler(exc_type, exc_value, exc_traceback):
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
enable_other_packets()
sys.excepthook = exception_handler
def demote(user_uid, user_gid):
def result():
os.setgid(user_gid)
os.setuid(user_uid)
return result
def disable_other_packets():
os.system("iptables -t raw -A OUTPUT -p tcp --dport %d -m ttl ! --ttl-eq 163 -j DROP" % SERVER_PORT)
def enable_other_packets():
os.system("iptables -t raw -D OUTPUT -p tcp --dport %d -m ttl ! --ttl-eq 163 -j DROP" % SERVER_PORT)
def load_server_list(fname):
f = open(fname, 'r')
for line in f:
line = line.strip()
if line:
server_ip = line
server_list.append([server_ip, 0])
f.close()
def pick_a_server():
if args.gfw:
assert server_list
time_interval = GFW_TIME_INTERVAL
else:
if not server_list:
return None
time_interval = LOCAL_TIME_INTERVAL
server_ip, ts = server_list.pop(0)
now = time.time()
if now - ts < time_interval:
logger.info("Server is not ready yet. Sleeping for %d seconds..." % (time_interval + ts - now))
time.sleep(time_interval + ts - now)
ts = time.time()
server_list.append([server_ip, ts])
return server_ip
def check_gfw_rst(packets):
rst_ack_cnt = 0
for packet in packets:
if packet['TCP'].flags == RST:
if packet['IP'].flags == 0 and packet['TCP'].window != 0:
recved_gfw_type1_rst = True
elif packet['TCP'].flags == RST | ACK:
rst_ack_cnt += 1
# since we only send one data packet, if we received more than 1 RST/ACK, then they should be from the GFW
if rst_ack_cnt > 1:
recved_gfw_type2_rst = True
client_port = random.randint(10000, 40000)
def get_next_client_port():
global client_port
client_port += 1
if client_port > 60000:
client_port = 10000
return client_port
def generate_constraint_str(varname, val, size):
constraint = "(assert (and"
for i in range(size):
constraint += " (= (select {0} (_ bv{1} 32) ) #x{2:02x})".format(varname, i, val[i])
constraint += "))"
return constraint
def get_value_from_model(m, d, size):
val = [0] * size
if is_K(m[d]):
for i in range(size):
if i >= m[d].num_args():
break
val[i] = m[d].arg(i).as_long()
elif isinstance(m[d], FuncInterp):
for i in range(size):
if i >= m[d].num_entries():
break
e = m[d].entry(i)
assert e.num_args() == 1
val[e.arg_value(0).as_long()] = e.value().as_long()
return val
def extract_example_from_model(m):
example = {}
for d in m:
k = str(d)
if 'tcp_seq_num' in k:
field_val = get_value_from_model(m, d, 4)
example[k] = field_val
elif 'tcp_ack_num' in k:
field_val = get_value_from_model(m, d, 4)
example[k] = field_val
elif 'tcp_doff_reserved_flags' in k:
field_val = get_value_from_model(m, d, 1)
example[k] = field_val
elif 'tcp_flags' in k:
field_val = get_value_from_model(m, d, 1)
example[k] = field_val
elif 'tcp_win' in k:
field_val = get_value_from_model(m, d, 2)
example[k] = field_val
elif 'tcp_urg_ptr' in k:
field_val = get_value_from_model(m, d, 2)
example[k] = field_val
elif 'tcp_options' in k:
field_val = get_value_from_model(m, d, args.payload_len)
example[k] = field_val
return example
def save_example_to_params(example, params, cur_packet_idx):
for k, v in example.iteritems():
packet_idx, _ = get_packet_idx(k)
if 0 < packet_idx <= cur_packet_idx:
varname_short = k.split('_', 1)[1].rsplit('_', 1)[0]
params[varname_short] = bytes2int_be(v)
def solve_constraints(constraints, params, cur_packet_idx):
logger.info("Solving constraints...")
logger.debug("params: %s" % params)
seq2_gt_seq1 = False
bnums = re.findall('\?B(\d+)', constraints)
bmax = max([int(num) for num in bnums])
constraints = constraints.split('\n')
constraints_new = []
varnames = {}
#set_param('smt.phase_selection', 5)
for line in constraints:
if line and line != '(check-sat)' and line != '(exit)':
constraints_new.append(line)
if line.startswith("(declare-"):
varname = line.split()[1]
if re.match('v\d+_.*_\d+', varname):
varname_short = varname.split('_', 1)[1].rsplit('_', 1)[0]
varnames[varname_short] = varname
# declare variables if not present
for p in params:
if p not in varnames:
logger.debug("Declaring %s..." % p)
# all variable declaration are the same
constraint = "(declare-fun %s () (Array (_ BitVec 32) (_ BitVec 8) ) )" % p
#logger.debug("New declaration: %s" % constraint)
constraints_new.append(constraint)
varnames[p] = p
# add constraints
for p in params:
if p.startswith(('tcp_seq_num', 'tcp_ack_num')):
# tcp_seq_num/tcp_ack_num is network-order (big-endian) because we symbolized a packet field
v = int2bytes_be(params[p], 4)
constraint = generate_constraint_str(varnames[p], v, 4)
constraints_new.append(constraint)
elif p == 'tcp_svr_isn':
# server ISN is host-order (little-endian) because we symbolized a local variable
v = int2bytes_le(params[p], 4)
constraint = generate_constraint_str(varnames[p], v, 4)
constraints_new.append(constraint)
elif p.startswith('tcp_doff_reserved_flags'):
v = int2bytes_be(params[p], 1)
constraint = generate_constraint_str(varnames[p], v, 1)
constraints_new.append(constraint)
elif p.startswith('tcp_flags'):
v = int2bytes_be(params[p], 1)
constraint = generate_constraint_str(varnames[p], v, 1)
constraints_new.append(constraint)
elif p.startswith('tcp_win'):
v = int2bytes_be(params[p], 2)
constraint = generate_constraint_str(varnames[p], v, 2)
constraints_new.append(constraint)
elif p.startswith('tcp_urg_ptr'):
v = int2bytes_be(params[p], 2)
constraint = generate_constraint_str(varnames[p], v, 2)
constraints_new.append(constraint)
elif p.startswith('tcp_options'):
v = int2bytes_be(params[p], args.payload_len)
constraint = generate_constraint_str(varnames[p], v, args.payload_len)
constraints_new.append(constraint)
# try to make tcp_seq_num2 larger than tcp_seq_num1
if seq2_gt_seq1:
constraints_new2 = constraints_new[:]
logger.debug("Trying to add constraint: tcp_seq_num2 > tcp_seq_num1")
if 'tcp_seq_num1' not in varnames:
logger.debug("Declaring tcp_seq_num1...")
constraint = "(declare-fun tcp_seq_num1 () (Array (_ BitVec 32) (_ BitVec 8) ) )"
#logger.debug("New declaration: %s" % constraint)
constraints_new2.append(constraint)
varnames['tcp_seq_num1'] = 'tcp_seq_num1'
if 'tcp_seq_num2' not in varnames:
logger.debug("Declaring tcp_seq_num2...")
constraint = "(declare-fun tcp_seq_num2 () (Array (_ BitVec 32) (_ BitVec 8) ) )"
constraints_new2.append(constraint)
#logger.debug("New declaration: %s" % constraint)
varnames['tcp_seq_num2'] = 'tcp_seq_num2'
constraint = "(assert (let ( (?B{0:d} ((_ zero_extend 32) ((_ extract 31 0) (bvlshr ((_ zero_extend 32) ((_ extract 31 0) (bvsub ((_ zero_extend 32) (concat (select {1} (_ bv0 32) ) (concat (select {1} (_ bv1 32) ) (concat (select {1} (_ bv2 32) ) (select {1} (_ bv3 32) ) ) ) ) ) ((_ zero_extend 32) (concat (select {2} (_ bv0 32) ) (concat (select {2} (_ bv1 32) ) (concat (select {2} (_ bv2 32) ) (select {2} (_ bv3 32) ) ) ) ) ) ) ) ) (_ bv31 64) ) ) ) ) ) (= false (= (_ bv0 64) (bvand (bvand ?B{0:d} ?B{0:d} ) (_ bv255 64) ) ) ) ) )".format(bmax, varnames['tcp_seq_num1'], varnames['tcp_seq_num2'])
#logger.debug("New constraint: %s" % constraint)
constraints_new2.append(constraint)
constraints = '\n'.join(constraints_new2)
# solve
s = Solver()
F = parse_smt2_string(constraints)
#logger.debug(F.sexpr())
s.add(F)
res = s.check()
#logger.debug(res)
if res == sat:
m = s.model()
#logger.debug(m)
example = extract_example_from_model(m)
logger.info("---------Solved Example---------")
logger.info(example)
logger.info("---------Example End---------")
seq_num1 = bytes2int_be(example[varnames['tcp_seq_numm1']])
seq_num2 = bytes2int_be(example[varnames['tcp_seq_numm2']])
#logger.debug("seq_num1: 0x%08x" % seq_num1)
#logger.debug("seq_num2: 0x%08x" % seq_num2)
assert(before(seq_num1, seq_num2))
save_example_to_params(example, params, cur_packet_idx)
return example
logger.debug("Cannot make seq_num1 > seq_num2.")
constraints = '\n'.join(constraints_new)
#logger.debug(constraints)
s = Solver()
F = parse_smt2_string(constraints)
#logger.debug(F.sexpr())
s.add(F)
res = s.check()
#logger.debug(res)
if res == sat:
m = s.model()
#logger.debug(m)
example = extract_example_from_model(m)
logger.info("---------Solved Example---------")
logger.info(example)
logger.info("---------Example End---------")
save_example_to_params(example, params, cur_packet_idx)
return example
else:
logger.debug("####### Cannot solve constraint! #######")
return None
# relax the constraints in params in the order given in priority list
def relax_constraints(params, priority_list):
for k in priority_list:
if k in params:
del params[k]
logger.debug("Removed constraint on %s." % k)
return True
return False
# update the packet with concrete example of packet with index `idx`
def update_tcp_header_with_example(pkt, example, idx):
tcp_options_var = None
for k, v in example.iteritems():
if 'tcp_header' + str(idx) in k:
octets = example[k]
logger.info('tcp_header: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].seq = (octets[0] << 24) + (octets[1] << 16) + (octets[2] << 8) + octets[3]
pkt[TCP].ack = (octets[4] << 24) + (octets[5] << 16) + (octets[6] << 8) + octets[7]
pkt[TCP].dataofs = ((octets[8] & 0xF0) >> 4)
pkt[TCP].reserved = ((octets[8] & 0x0E) >> 1)
pkt[TCP].flags = ((octets[8] & 0x01) << 8) + octets[9]
#pkt[TCP].flags = octets[9]
#pkt[TCP].flags = 'A'
pkt[TCP].window = (octets[10] << 8) + octets[11]
#pkt[TCP].chksum = (octets[12] << 8) + octets[13]
pkt[TCP].urgptr = (octets[14] << 8) + octets[15]
#pkt[TCP].payload = [ chr(o) for o in octets[16:] ]
elif 'tcp_seq_num' + str(idx) in k:
octets = example[k]
logger.info('tcp_seq_num: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].seq = (octets[0] << 24) + (octets[1] << 16) + (octets[2] << 8) + octets[3]
elif 'tcp_ack_num' + str(idx) in k:
octets = example[k]
logger.info('tcp_ack_num: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].ack = (octets[0] << 24) + (octets[1] << 16) + (octets[2] << 8) + octets[3]
elif 'tcp_doff_reserved_flags' + str(idx) in k:
octets = example[k]
logger.info('tcp_doff_reserved_flags: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].dataofs = octets[0] >> 4
pkt[TCP].reserved = octets[0] & 0xf
elif 'tcp_flags' + str(idx) in k:
octets = example[k]
logger.info('tcp_flags: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].flags = octets[0]
elif 'tcp_win' + str(idx) in k:
octets = example[k]
logger.info('tcp_win: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].window = (octets[0] << 8) + octets[1]
elif 'tcp_urg_ptr' + str(idx) in k:
octets = example[k]
logger.info('tcp_urg_ptr: ' + ' '.join([ ('%02X' % o) for o in octets ]))
pkt[TCP].urgptr = (octets[0] << 8) + octets[1]
elif 'tcp_options' + str(idx) in k:
tcp_options_var = k
# tcp options has to be updated after data offset, since we need to use it to calculate the payload
if tcp_options_var:
octets = example[tcp_options_var]
logger.info('tcp_options: ' + ' '.join([ ('%02X' % o) for o in octets ]))
# prepend it to the payload
data_offset = pkt[TCP].dataofs * 4
opt_len = data_offset - 20
pkt[TCP].payload.load = ''.join([ chr(o) for o in octets[:opt_len] ]) + pkt[TCP].payload.load[opt_len:]
#ls(pkt)
#pkt.show()
#pkt.show2()
#wireshark(pkt)
#hexdump(pkt)
#send(pkt)
def send_3way_handshake_and_data(server_ip, client_port, example_id, packet_num):
# client initial sequence number
client_isn = random.getrandbits(32)
# server initial sequence number
server_isn = 0
syn_pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='S', seq=client_isn)
syn_pkt['IP'].ttl = 163 # to bypass the iptables rule
#hexdump(syn_pkt)
reply_pkt = sr1(syn_pkt, timeout=3)
logger.info("Sent SYN packet...")
client_seq = client_isn + 1
if reply_pkt:
#hexdump(reply_pkt)
if TCP in reply_pkt and reply_pkt['TCP'].flags & (SYN | ACK) == SYN | ACK:
logger.info("Received SYN/ACK packet...")
# update isn_server with received reply_pkt
server_isn = reply_pkt['TCP'].seq
server_seq = server_isn + 1
else:
logger.warn("Received non SYN/ACK packet.")
return
else:
logger.warn("No SYN/ACK packet received.")
return
ack_pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='A', seq=client_seq, ack=server_seq)
ack_pkt['IP'].ttl = 163 # to bypass the iptables rule
#hexdump(ack_pkt)
send(ack_pkt)
logger.info("Sent ACK packet...")
payload = "GET /" + BAD_KEYWORD + '#' + str(example_id) + '#' + str(packet_num) + HTTP_REQ_SUFFIX
data_pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='A', seq=client_seq, ack=server_seq)/Raw(load=payload)
data_pkt['IP'].ttl = 163 # to bypass the iptables rule
#hexdump(data_pkt)
sleep(0.2)
send(data_pkt)
logger.info("Sent Data packet...")
if args.gfw:
logger.info("Waiting %ds for server and GFW response..." % GFW_TIME_WAIT_RST)
sleep(GFW_TIME_WAIT_RST)
def send_ack_and_data(server_ip, client_port, client_seq, server_seq, client_isn, example_id, packet_num):
ack_pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='A', seq=client_seq, ack=server_seq)
ack_pkt['IP'].ttl = 163 # to bypass the iptables rule
#hexdump(ack_pkt)
send(ack_pkt)
logger.info("Sent ACK packet...")
if client_seq < client_isn:
# SEQ number wraparound
offset = client_seq + 2**32 - client_isn - 1
else:
offset = client_seq - client_isn - 1
if offset < 0:
offset = 0
if offset >= 5:
payload = BAD_KEYWORD + '#' + str(example_id) + '#' + str(packet_num) + HTTP_REQ_SUFFIX
else:
payload = "GET /" + BAD_KEYWORD + '#' + str(example_id) + '#' + str(packet_num) + HTTP_REQ_SUFFIX
payload = payload[offset:]
data_pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='A', seq=client_seq, ack=server_seq)/Raw(load=payload)
data_pkt['IP'].ttl = 163 # to bypass the iptables rule
#hexdump(data_pkt)
sleep(0.2)
send(data_pkt)
logger.info("Sent Data packet...")
if args.gfw:
logger.info("Waiting %ds for server and GFW response..." % GFW_TIME_WAIT_RST)
sleep(GFW_TIME_WAIT_RST)
def send_data(server_ip, client_port, client_seq, server_seq, client_isn, example_id, packet_num):
if client_seq < client_isn:
# SEQ number wraparound
offset = client_seq + 2**32 - client_isn - 1
else:
offset = client_seq - client_isn - 1
if offset < 0:
offset = 0
if offset >= 5:
payload = BAD_KEYWORD + '#' + str(example_id) + '#' + str(packet_num) + HTTP_REQ_SUFFIX
else:
payload = "GET /" + BAD_KEYWORD + '#' + str(example_id) + '#' + str(packet_num) + HTTP_REQ_SUFFIX
payload = payload[offset:]
data_pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='A', seq=client_seq, ack=server_seq)/Raw(load=payload)
data_pkt['IP'].ttl = 163 # to bypass the iptables rule
#hexdump(data_pkt)
sleep(0.2)
send(data_pkt)
logger.info("Sent Data packet...")
if args.gfw:
logger.info("Waiting %ds for server and GFW response..." % GFW_TIME_WAIT_RST)
sleep(GFW_TIME_WAIT_RST)
def kill_process(p):
sleep(1)
p.terminate()
os.system("pkill tcpdump")
os.system("pkill tcpdump")
os.system("pkill tcpdump")
def calculate_payload(example, pkt_idx, payload_len, client_isn):
client_seq = client_isn
data_offset = 0
# get the client seq number and data offset from the example if there's any
client_seq_var_name = 'tcp_seq_num' + str(pkt_idx)
data_off_var_name = 'tcp_doff_reserved_flags' + str(pkt_idx)
for k, v in example.iteritems():
if client_seq_var_name in k:
client_seq = (v[0] << 24) + (v[1] << 16) + (v[2] << 8) + v[3]
if data_off_var_name in k:
data_offset = (v[0] >> 4) * 4
payload = ''
if payload_len:
if pkt_idx == 1:
payload = '\xff' * (data_offset - 20) + HTTP_REQ_PREFIX
else:
# calculate the payload offset using client seq number
padding = ''
if after(client_seq, client_isn):
offset = client_seq - client_isn - 1
if offset < 0:
offset += 2**32
else:
offset = client_seq - client_isn - 1
if offset > 0:
offset -= 2**32
padding = 'A' * min(-offset, 100) # we pad maximum 100 bytes
offset = 0
payload = '\xff' * (data_offset - 20) + padding + HTTP_REQ_PREFIX[offset:]
payload = payload[:payload_len]
if len(payload) < payload_len:
payload += 'A' * (payload_len - len(payload))
return payload
def send_probing_packets(test_case, server_ip, packet_num, is_evasion_pkt, example_id, tcp_flags, payload_len, bad_checksum_case):
#logger.debug(test_case)
client_port = get_next_client_port()
# client initial sequence number
client_isn = random.getrandbits(32)
# server initial sequence number
server_isn = 0
params = {}
#params['tcp_seq_num1'] = client_isn
# initialize SEQ and ACK
client_seq = client_isn
client_ack = 0
server_seq_recved = False
server_ack_recved = False
server_seq = 0
server_ack = 0
example = solve_constraints(test_case['constraints'], params, 1)
if not example:
logger.warn("Failed to solve constraints.")
return -ERR_UNSOLVABLE
for k, v in example.iteritems():
if 'tcp_seq_num1' in k:
client_seq = bytes2int_be(v)
break
# if constraint solver has generated a client ISN, then use it, otherwise use our own
if client_seq != client_isn:
# constraint solve has generated a new client ISN
client_isn = client_seq
for i in range(1, packet_num + 1):
logger.info("---------Packet #%d---------" % i)
if i == packet_num:
# try to constrain the TCP flags and SEQ and ACK number with valid values for the insertion/evasion packet,
# to make it more/less likely to be accepted by the DPI
logger.debug("Trying to constrain the %s packet (SEQ, ACK, flags)..." % ("evasion" if is_evasion_pkt else "insertion"))
if packet_num > 1 and server_ack_recved:
if is_evasion_pkt:
params['tcp_seq_num' + str(i)] = server_ack + 10
else:
params['tcp_seq_num' + str(i)] = server_ack
if packet_num > 1 and server_seq_recved:
if is_evasion_pkt:
params['tcp_ack_num' + str(i)] = server_seq + 10
else:
params['tcp_ack_num' + str(i)] = server_seq
if tcp_flags:
params['tcp_flags' + str(i)] = tcp_flags
example = solve_constraints(test_case['constraints'], params, i)
if not example:
logger.debug("Seems we are overconstraining, we need to relax the constraint...")
ok = False
while relax_constraints(params, ['tcp_ack_num' + str(packet_num), 'tcp_seq_num' + str(packet_num)]):
example = solve_constraints(test_case['constraints'], params, i)
if example:
ok = True
break
else:
logger.debug("Still not working, relaxing more constriant...")
if not ok:
logger.debug("No more constraints to relax, giving up...")
logger.warn("Failed to solve constraints.")
return -ERR_UNSOLVABLE2
payload = ''
if payload_len:
payload = calculate_payload(example, i, payload_len, client_isn)
if i == packet_num and bad_checksum_case:
pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='S', seq=client_seq, chksum=0xffff)/Raw(load=payload)
else:
pkt = IP(dst=server_ip)/TCP(sport=client_port, dport=SERVER_PORT, flags='S', seq=client_seq)/Raw(load=payload)
update_tcp_header_with_example(pkt, example, i)
logger.info("payload: %s" % ' '.join([ "%02x" % ord(c) for c in pkt[TCP].payload.load]))
pkt[IP].ttl = 163 # to bypass the iptables rule
#hexdump(pkt)
#dump(pkt)
if i == packet_num:
logger.info('Flags: %s' % (pkt[TCP].flags or 'None'))
if is_evasion_pkt:
logger.info("Sending evasion packet...")
else:
logger.info("Sending insertion packet...")
else:
params, logger.info("Sending packet...")
reply_pkt = sr1(pkt, timeout=TIME_WAIT_SERVER_ACK)
if reply_pkt:
#hexdump(reply_pkt)
if TCP in reply_pkt:
server_seq_recved = True
server_seq = reply_pkt[TCP].seq
if reply_pkt[TCP].flags & ACK == ACK:
server_ack_recved = True
server_ack = reply_pkt[TCP].ack
if reply_pkt[TCP].flags & (SYN | ACK) == SYN | ACK:
logger.info("Received SYN/ACK packet from server.")
# received a SYN/ACK packet
# update server_isn with sequence number in SYN/ACK
# assuming the firewall doesn't send deceptive SYN/ACK
server_isn = reply_pkt[TCP].seq
params['tcp_svr_isn'] = server_isn
server_seq = server_isn + 1
example = solve_constraints(test_case['constraints'], params, 2)
if not example:
logger.warn("Failed to solve constraints.")
return -ERR_UNSOLVABLE
else:
logger.warn("Received non TCP packet.")
if i == 1 and 'tcp_svr_isn' not in params:
# didn't receive SYN/ACK?
if is_evasion_pkt:
# for evasion packet, it should always be accepted
logger.warn("Didn't receive SYN/ACK packet after sending the first packet.")
return -ERR_NO_SYNACK
else:
# for insertion packet, if it has more than 1 packet, then the first one should be accepted.
if packet_num > 1:
logger.warn("Didn't receive SYN/ACK packet after sending the first packet.")
return -ERR_NO_SYNACK
#sleep(INTERVAL_BETWEEN_PACKET)
# send follow-up packets
sk_state = test_case['sk_state'][packet_num]
if sk_state == 0 and packet_num > 1:
sk_state = test_case['sk_state'][packet_num-1]
if sk_state == TCP_NO_SOCK:
logger.info("*******************************")
logger.info("* Current stat is TCP_NO_SOCK *")
logger.info("*******************************")
send_3way_handshake_and_data(server_ip, client_port, example_id, packet_num)
elif sk_state == TCP_LISTEN:
logger.info("******************************")
logger.info("* Current stat is TCP_LISTEN *")
logger.info("******************************")
send_3way_handshake_and_data(server_ip, client_port, example_id, packet_num)
elif sk_state == TCP_NEW_SYN_RECV:
logger.info("************************************")
logger.info("* Current stat is TCP_NEW_SYN_RECV *")
logger.info("************************************")
send_ack_and_data(server_ip, client_port, server_ack, server_seq, client_isn, example_id, packet_num)
elif sk_state == TCP_SYN_RECV:
logger.info("************************************")
logger.info("* Current stat is TCP_SYN_RECV *")
logger.info("************************************")
send_ack_and_data(server_ip, client_port, server_ack, server_seq, client_isn, example_id, packet_num)
elif sk_state == TCP_ESTABLISHED:
logger.info("***********************************")
logger.info("* Current stat is TCP_ESTABLISHED *")
logger.info("***********************************")
send_data(server_ip, client_port, server_ack, server_seq, client_isn, example_id, packet_num)
elif sk_state == TCP_CLOSE_WAIT:
# server may still be able to receive data in TCP_CLOSE_WAIT state
logger.info("***********************************")
logger.info("* Current stat is TCP_CLOSE_WAIT *")
logger.info("***********************************")
send_data(server_ip, client_port, server_ack, server_seq, client_isn, example_id, packet_num)
else:
logger.warn("Unexpected sk_state: %d" % sk_state)
sleep(1)
return 0
def probe_dpi(test_case, server_ip, packet_num, is_evasion_pkt, example_id, tcp_flags=None, tcp_flags_name=None, payload_len=0, bad_checksum_case=False):
disable_other_packets()
if dump_pcaps:
if not os.path.exists(PCAP_DIR):
os.mkdir(PCAP_DIR)
if tcp_flags and tcp_flags_name:
pcap_file = '%s/packet_dump_%s_%s_%s_%s_%d.pcap' % (PCAP_DIR, test_case['state_id'], server_ip, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), tcp_flags_name, packet_num)
else:
pcap_file = '%s/packet_dump_%s_%s_%s_%d.pcap' % (PCAP_DIR, test_case['state_id'], server_ip, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), packet_num)
logger.info("Recoding by pcap filename: " + pcap_file)
tcpdump_p = subprocess.Popen(['tcpdump', '-i', tcpdump_interface, '-w', pcap_file, 'host %s and tcp port %d' % (server_ip, SERVER_PORT)])
sleep(1)
ret = send_probing_packets(test_case, server_ip, packet_num, is_evasion_pkt, example_id, tcp_flags, payload_len, bad_checksum_case)
retry = 0
while ret == -ERR_NO_SYNACK or ret == -ERR_UNSOLVABLE:
logger.debug("Retrying...")
retry += 1
ret = send_probing_packets(test_case, server_ip, packet_num, is_evasion_pkt, example_id, tcp_flags, payload_len, bad_checksum_case)
if retry >= 3:
break
if dump_pcaps:
kill_process(tcpdump_p)
#sleep(1)
#tcpdump_p.terminate()
#os.kill(os.getpgid(tcpdump_p.pid), signal.SIGTERM)
#os.system("pkill tcpdump")
#os.system("pkill tcpdump")
#os.system("pkill tcpdump")
enable_other_packets()
#raw_input("Press ENTER to continue...")
return ret
# find client port using 3-way handshake
def find_client_port(packets):
client_ports = {}
for packet in packets:
if TCP in packet:
if packet[TCP].dport == 80:
if packet[TCP].flags & SYN == SYN:
# SYN packet
#print("SYN recved.")
if packet[TCP].sport not in client_ports:
client_ports[packet[TCP].sport] = 1
if packet[TCP].flags & ACK == ACK:
# ACK packet
#print("ACK recved.")
if client_ports.get(packet[TCP].sport) == 2:
client_ports[packet[TCP].sport] = 3
# found
return packet[TCP].sport
elif packet[TCP].sport == 80:
if packet[TCP].flags & (SYN | ACK) == SYN | ACK:
# SYN/ACK packet
#print("SYN/ACK recved.")
if client_ports.get(packet[TCP].dport) == 1:
client_ports[packet[TCP].dport] = 2
else:
print('Non-TCP packet?!')
print(packet.summary())
return 0
def check_gfw_rst(packets):
rst_ack_cnt = 0
recved_gfw_type1_rst, recved_gfw_type2_rst = False, False
for packet in packets:
if not packet.haslayer(TCP):
if args.debug:
logger.warn("No TCP layer detected.")
continue
if packet['TCP'].flags == RST:
if packet['IP'].flags == 0 and packet['TCP'].window != 0:
recved_gfw_type1_rst = True
elif packet['TCP'].flags == RST | ACK:
rst_ack_cnt += 1
# since we only send one data packet, if we received more than 1 RST/ACK, then they should be from the GFW
if rst_ack_cnt > 1:
recved_gfw_type2_rst = True
return recved_gfw_type1_rst or recved_gfw_type2_rst
def check_server_response(packets):
client_port = find_client_port(packets)
for packet in packets:
if packet.haslayer(HTTPResponse) and packet[TCP].dport == client_port:
return True
return False
def verify_gfw_result(example_id):
logger.debug("===Verifying results of '%s'===" % example_id)
result = { 'server': False, 'gfw': False }
state_id_with_flags, packet_num = example_id.split('#')
if state_id_with_flags.endswith(('SYN', 'RST', 'ACK', 'FIN', 'RSTACK', 'FINACK')):
state_id, tcp_flags = state_id_with_flags.rsplit('_', 1)
pcap_files = glob.glob(PCAP_DIR + "/packet_dump_%s_*_%s_%s.pcap" % (state_id, tcp_flags, packet_num))
if len(pcap_files) != 1:
logger.error("Found more than 1 pacp files: %s" % pcap_files)
pcap_file = pcap_files[0]
else:
state_id = state_id_with_flags
tcp_flags = ''
pcap_files = glob.glob(PCAP_DIR + "/packet_dump_%s_*_%s.pcap" % (state_id, packet_num))
pcap_file = ''
for pf in pcap_files:
parts = pf.split('/')[-1].split('_')
if 'c' in state_id:
# bad checksum case
if len(parts) == 10:
pcap_file = pf
break
else:
if len(parts) == 7:
pcap_file = pf
break
if not pcap_file:
logger.error("Cannot find pcap file. %s" % pcap_files)
return -ERR_NO_PCAP
try:
packets = rdpcap(pcap_file)
except Scapy_Exception:
logger.error("Bad pcap...")
return -ERR_BAD_PCAP
if check_server_response(packets):
result['server'] = True
logger.debug("Server received: %s" % result['server'])
if check_gfw_rst(packets):
result['gfw'] = True
logger.debug("GFW detected: %s" % result['gfw'])
return result
def verify_local_result(example_id):
logger.debug("===Verifying results of '%s'===" % example_id)
result = { 'apache': False, 'snort': False, 'bro': False, 'netfilter': False }
state_id_with_flags, packet_num = example_id.split('#')
if state_id_with_flags.endswith(('SYN', 'RST', 'ACK', 'FIN', 'RSTACK', 'FINACK')):
state_id, tcp_flags = state_id_with_flags.rsplit('_', 1)
pcap_files = glob.glob(PCAP_DIR + "/packet_dump_%s_*_%s_%s.pcap" % (state_id, tcp_flags, packet_num))
if len(pcap_files) != 1:
logger.error("Found more than 1 pacp files: %s" % pcap_files)
pcap_file = pcap_files[0]
else:
state_id = state_id_with_flags
tcp_flags = ''
pcap_files = glob.glob(PCAP_DIR + "/packet_dump_%s_*_%s.pcap" % (state_id, packet_num))
pcap_file = ''
for pf in pcap_files:
parts = pf.split('/')[-1].split('_')
if 'c' in state_id:
# bad checksum case
if len(parts) == 10:
pcap_file = pf
break
else:
if len(parts) == 7:
pcap_file = pf
break
if not pcap_file:
logger.error("Cannot find pcap file. %s" % pcap_files)
return -ERR_NO_PCAP
try:
packets = rdpcap(pcap_file)
except Scapy_Exception:
logger.error("Bad pcap...")
return -ERR_BAD_PCAP
if check_server_response(packets):
result['apache'] = True
logger.debug("Apache received: %s" % result['apache'])
# bro log is generated after detection of a bad keyword
if os.path.isfile(bro_log_file_name):
f = open(bro_log_file_name, 'r')
for line in f.readlines()[-10:]:
if example_id in line:
result['bro'] = True
break
f.close()
logger.debug("Bro detected: %s" % result['bro'])
f = open(snort_log_file_name, 'rb')
f.seek(0, os.SEEK_END)
size = f.tell()
if size > 1024:
size = 1024
f.seek(-size, os.SEEK_END)
content = f.read()
if example_id in content:
result['snort'] = True
f.close()
logger.debug("Snort detected: %s" % result['snort'])
f = open(netfilter_log_file_name, 'rb')
f.seek(0, os.SEEK_END)
size = f.tell()
if size > 1024:
size = 1024
f.seek(-size, os.SEEK_END)
content = f.read()
if example_id in content:
result['netfilter'] = True
f.close()
logger.debug("Netfilter detected: %s" % result['netfilter'])
return result
def verify_local_result2(example_id):
logger.debug("===Verifying results of '%s'===" % example_id)
result = { 'apache': False, 'snort': False, 'bro': False, 'netfilter': False }
f = open(apache_log_file_name, 'r')
for line in f.readlines()[-10:]:
if example_id in line:
result['apache'] = True
break
f.close()
logger.debug("Apache received: %s" % result['apache'])
# bro log is generated after detection of a bad keyword
if os.path.isfile(bro_log_file_name):
f = open(bro_log_file_name, 'r')
for line in f.readlines()[-10:]:
if example_id in line:
result['bro'] = True
break
f.close()
logger.debug("Bro detected: %s" % result['bro'])
f = open(snort_log_file_name, 'rb')
f.seek(0, os.SEEK_END)
size = f.tell()
if size > 1024:
size = 1024
f.seek(-size, os.SEEK_END)
content = f.read()
if example_id in content:
result['snort'] = True
f.close()
logger.debug("Snort detected: %s" % result['snort'])
f = open(netfilter_log_file_name, 'rb')
f.seek(0, os.SEEK_END)
size = f.tell()
if size > 1024:
size = 1024
f.seek(-size, os.SEEK_END)
content = f.read()
if example_id in content:
result['netfilter'] = True
f.close()
logger.debug("Netfilter detected: %s" % result['netfilter'])
return result
def run_test_case(entry, packet_idx=None, tcp_flags=None):
state_id = entry['state_id']
tcp_flags_to_run = []
if tcp_flags:
# specified TCP flags
tcp_flags_name = tcp_flags
tcp_flags_hex = TCP_FLAGS_LST[tcp_flags_name]
tcp_flags_to_run.append((str(state_id) + '_' + tcp_flags_name, tcp_flags_hex, tcp_flags_name))
else:
tcp_flags_to_run.append((str(state_id), None, None))
if args.tcp_flags_fuzzing:
for tcp_flags_name, tcp_flags_hex in TCP_FLAGS_LST.iteritems():
tcp_flags_to_run.append((str(state_id) + '_' + tcp_flags_name, tcp_flags_hex, tcp_flags_name))
packet_num = entry['packet_num']
for j in range(1, packet_num + 1):
if packet_idx and j != packet_idx:
continue
logger.info(">> Evaluating packet (%d/%d) <<" % (j, packet_num))
succeeded = False
if j not in entry['results']:
entry['results'][j] = {}
for state_id_with_flags, tcp_flags_hex, tcp_flags_name in tcp_flags_to_run:
#logger.info("==================================================")
#logger.info("== Evaluating test case %i with specific flags..." % i)
#logger.info("==================================================")
if tcp_flags_name is None:
logger.info(">> Use original TCP flag <<")
else:
logger.info(">> Picked TCP flag %s <<" % tcp_flags_name)
server_ip = pick_a_server()
if not server_ip:
server_ip = SERVER_IP
is_evasion_pkt = j < packet_num or not entry['drop_points']
if 'c' in entry['state_id'] and not is_evasion_pkt:
bad_checksum_case = True
else:
bad_checksum_case = False
ret = probe_dpi(entry, server_ip, j, is_evasion_pkt, state_id_with_flags, tcp_flags_hex, tcp_flags_name, args.payload_len, bad_checksum_case)
if ret < 0:
entry['results'][j][tcp_flags_name] = ret
else:
if args.gfw:
result = verify_gfw_result("%s#%d" % (state_id_with_flags, j))
if isinstance(result, dict):
if result['server'] and not result['gfw']:
# server received but GFW not detected
succeeded = True
else:
result = verify_local_result("%s#%d" % (state_id_with_flags, j))
if isinstance(result, dict):
if result['apache'] and not result['snort'] and not result['bro'] and not result['netfilter']:
# apache received but all DPIs not detected
succeeded = True
entry['results'][j][tcp_flags_name] = result
if args.debug:
raw_input("Press ENTER to continue...")
if succeeded:
logger.info("Already succeeded. No need to send later packets.")
break
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Probe DPI with test cases generated from symbolic execution.')
parser.add_argument('test_case_file', type=argparse.FileType('r'), help='test case file')
parser.add_argument('-P', '--dump-pcaps', default=False, action='store_true', help='dump pcap files for each test case')
parser.add_argument('-G', '--gfw', default=False, action='store_true', help='probing the gfw')
parser.add_argument('-I', '--int', help='interface to listen on')
parser.add_argument('-F', '--tcp-flags-fuzzing', action='store_true', help='switch of tcp flags fuzzing')
parser.add_argument('--tcp-flags', type=str, help='Use specific TCP flags for testing')
parser.add_argument('-D', '--debug', action='store_true', help='turn on debug mode')
parser.add_argument('-p', dest='payload_len', type=int, default=0, help='TCP payload length (because header len is symbolic, payload may be counted as optional header')
parser.add_argument('-N', dest='num_insts', default=1, type=int)
parser.add_argument('-S', dest='split_id', default=0, type=int)
parser.add_argument('-t', dest='test_case_idx', type=int, help='test case index')
parser.add_argument('--packet-idx', type=int, help='packet index in the test case')
parser.add_argument('--replay', type=argparse.FileType('r'), help='replay a list of successful cases')
args = parser.parse_args()
if args.gfw:
load_server_list('server_list')
else:
load_server_list('server_list.local')
check_log_files()
if args.int:
tcpdump_interface = args.int
dump_pcaps = args.dump_pcaps
if args.payload_len:
# !!!assuming we only send 3 packets at most for now!!!
HTTP_REQ_PREFIX = 'GET /' + 'A' * args.payload_len * 3 + '#'
fo = open("probe_dpi_result", 'w', buffering=0)
if args.replay:
cases_to_run = {}
for line in args.replay:
line = line.rstrip()
parts = line.split(',')
state_id = parts[0]
packet_idx = int(parts[1])
tcp_flags = parts[2]
if tcp_flags == 'None':
tcp_flags = None
if state_id not in cases_to_run:
cases_to_run[state_id] = []
cases_to_run[state_id].append((packet_idx, tcp_flags))
i = 0
for line in args.test_case_file:
i += 1
entry = eval(line)
if entry['state_id'] not in cases_to_run:
continue
logger.info("==============================")
logger.info("== Evaluating test case %i..." % i)
logger.info("==============================")
entry['results'] = {}
for packet_idx, tcp_flags in cases_to_run[entry['state_id']]:
run_test_case(entry, packet_idx, tcp_flags)
# results is stored in entry['results']
fo.write("%s\n" % entry)
else:
i = 0
for line in args.test_case_file:
i += 1
if i < START_FROM:
continue
if i > END_BY:
break
if args.test_case_idx and i < args.test_case_idx:
continue
if (i - START_FROM) % args.num_insts != args.split_id:
continue
entry = eval(line)
logger.info("==============================")
logger.info("== Evaluating test case %i..." % i)
logger.info("==============================")
entry['results'] = {}
run_test_case(entry, args.packet_idx, args.tcp_flags)
# results is stored in entry['results']
fo.write("%s\n" % entry)
if args.test_case_idx:
break
fo.close()
|
985,067 | 2cf6048c242014cb4c6c2c8fbc2aa124b398f29f | from imports import *
class FF(torch.nn.Module):
def __init__(self, in_flat_dim, up1, out_dim, h1, h2):
super(FF, self).__init__()
self.flat_dim = up1 * up1 * in_flat_dim
self.up0 = torch.nn.UpsamplingBilinear2d(scale_factor=up1)
self.linear1 = torch.nn.Linear(up1 * up1 * in_flat_dim, h1)
self.linear2 = torch.nn.Linear(h1, h2)
self.linear3 = torch.nn.Linear(h2, out_dim)
def forward(self, x):
x = self.up0(x)
x = x.reshape(x.shape[0], self.flat_dim)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
return self.linear3(x) # Need softmax outside (CE loss)
class CNN(torch.nn.Module):
def __init__(self, up1, out_dim, chan1, chan2, chan3, k1, k2, k3, p12, color):
super(CNN, self).__init__()
self.up0 = torch.nn.UpsamplingBilinear2d(scale_factor=up1)
self.conv1 = torch.nn.Conv2d(color, chan1, kernel_size=k1)
self.conv2 = torch.nn.Conv2d(chan1, chan2, kernel_size=k2)
self.conv3 = torch.nn.Conv2d(chan2, chan3, k3)
self.linear4 = torch.nn.Linear(chan3, out_dim)
self.pool12 = nn.MaxPool2d(p12, p12)
def forward(self, x):
x = self.up0(x)
x = F.relu(self.conv1(x))
x = self.pool12(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
pool_lin = nn.MaxPool2d(x.shape[2], x.shape[3])
x = pool_lin(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
x = F.relu(self.linear4(x)) # Need softmax outside (CE loss)
return x
class composite(torch.nn.Module):
def __init__(self, up1, out_dim, chan1, chan2, chan3, k1, k2, k3, h4, h5, p12, p23, color):
super(composite, self).__init__()
self.up0 = torch.nn.UpsamplingBilinear2d(scale_factor=up1)
self.conv1 = torch.nn.Conv2d(color, chan1, kernel_size=k1)
self.conv2 = torch.nn.Conv2d(chan1, chan2, kernel_size=k2)
self.conv3 = torch.nn.Conv2d(chan2, chan3, kernel_size=k3)
self.linear4 = torch.nn.Linear(chan3, h4)
self.linear5 = torch.nn.Linear(h4, h5)
self.linear6 = torch.nn.Linear(h5, out_dim)
self.pool12 = nn.MaxPool2d(p12, p12)
self.pool23 = nn.MaxPool2d(p23, p23)
def forward(self, x):
x = self.up0(x)
x = F.relu(self.conv1(x))
x = self.pool12(x)
x = F.relu(self.conv2(x))
x = self.pool23(x)
x = F.relu(self.conv3(x))
pool_lin = nn.MaxPool2d(x.shape[2], x.shape[3])
x = pool_lin(x)
x = torch.flatten(x, start_dim=1, end_dim=-1)
x = F.relu(self.linear4(x))
x = F.relu(self.linear5(x))
return self.linear6(x) # Need softmax outside (CE loss)
def train(model, optimizer, trainloader, name, epoch=2,
loss_fcn=torch.nn.CrossEntropyLoss(), clip=False,
plot=False, devloader=None, denoiser=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
plot_batch = 50 # save plotting data every 50 mini-batches
print_batch = 1 # print every 4*50 = 200 mini-batches
iteration_track = []
devloss_track = []
devacc_track = []
batch_count = 0
running_loss = 0
for ep in range(epoch):
print_idx = 0
for i_batch, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
inputs = inputs.float()
if (len(labels.shape) == 3):
labels = torch.unsqueeze(labels, 1)
if (len(inputs.shape) == 3):
inputs = torch.unsqueeze(inputs, 1)
if (denoiser):
inputs = denoiser(inputs)
output = model(inputs)
loss = loss_fcn(output, labels)
loss.backward()
if (clip == True):
torch.nn.utils.clip_grad_value_(model.parameters(), 0.00001)
optimizer.step()
running_loss += loss.item()
batch_count += 1
if i_batch % plot_batch == plot_batch - 1:
if (print_idx % print_batch == print_batch - 1):
print('[%d, %5d] loss: %.3f' %
(ep + 1, i_batch + 1, running_loss / plot_batch))
print_idx += 1
running_loss = 0.0
if plot:
iteration_track = np.append(iteration_track, batch_count)
dev_acc, dev_loss = test(model, devloader, loss_fcn=loss_fcn)
devacc_track = np.append(devacc_track, dev_acc)
devloss_track = np.append(devloss_track, dev_loss.cpu())
model.train()
if plot:
fig, a = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))
a[0].plot(iteration_track, devacc_track)
a[0].set_title('Dev. Accuracy')
a[0].set_xlabel("Total Iterations")
# a[0].set_ylim([0, 1])
a[1].plot(iteration_track, devloss_track)
a[1].set_title('Dev. Loss')
a[1].set_xlabel("Total Iterations")
plt.show()
PATH = "/content/drive/My Drive/ML Final Project Files/"
SAVE_PATH = PATH + name + '.pth'
torch.save(model.state_dict(), SAVE_PATH)
def test(model, testloader, loss_fcn=None, denoiser=None): # Use a loss fcn for dev_set
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
correct = 0
total = 0
total_loss = 0.0
with torch.no_grad():
for data in testloader:
inputs, labels = data
inputs = inputs.float()
try:
inputs, labels = inputs.to(device), labels.to(device)
except:
print("Found String in Data")
continue
if (len(labels.shape) == 3):
labels = torch.unsqueeze(labels, 1)
if (len(inputs.shape) == 3):
inputs = torch.unsqueeze(inputs, 1)
if (denoiser):
inputs = denoiser(inputs)
output = model(inputs.float())
if (len(output.shape) < 2):
output = torch.unsqueeze(output, 0)
if (loss_fcn):
total_loss += loss_fcn(output, labels)
_, pred = torch.max(output.data, 1)
total += labels.size(0)
correct += (pred == labels).sum().item()
return (correct / total), (total_loss / total)
|
985,068 | bc0a9e3fb61c8524af76cf0d2d68ca6a584b8699 | n,k=map(int,input().split())
a=list(map(int,input().split()))
p=0
for i in range(len(a)):
if(a[i]%k==0 and a[i]//k > p):
p=a[i]//kint
else:
print()
print(p)
|
985,069 | dbf86a19457331afd8ae2ee8333fac03bd2d5607 | # -*- coding: utf-8 -*-
# Copyright 2018-2019 Mateusz Klos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base class for resources using Google AppEngine ndb as storage.
"""
from __future__ import absolute_import, unicode_literals
# stdlib imports
from logging import getLogger
from typing import Optional
# 3rd party imports
import restible.util
import webapp2 # pylint: disable=wrong-import-order
from google.appengine.ext import ndb # pylint: disable=wrong-import-order
from restible.model import ModelResource
from serafin import Fieldspec
# local imports
from . import util
L = getLogger(__name__)
class NdbResource(ModelResource):
""" Base class for ndb based resources.
This provides a basic implementation that can be used out of the box. It
provides no authentication/authorization.
"""
name = None
model = None
spec = Fieldspec('*')
schema = {}
def create_item(self, request, params, payload):
payload.pop('id', None) # ignore ID (force create).
item = self.model(**payload)
item.put()
return item
def update_item(self, request, params, payload):
item = self.item_for_request(request)
if item is None:
return None
payload.pop('id', None)
restible.util.update_from_values(item, payload)
item.put()
return item
def delete_item(self, request, params, payload):
item = self.item_for_request(request)
if item and item.key:
item.key.delete()
else:
raise restible.NotFound()
def query_items(self, request, params, payload):
""" Return a model query with the given filters.
The query can be further customised like any ndb query.
:return google.appengine.ext.ndb.Query:
The query with the given filters already applied.
"""
return util.ndb_query_from_values(self.model, params).fetch()
def get_item(self, request, params, payload):
return self.item_for_request(request)
def item_for_request(self, request):
# type: (webapp2.Request) -> Optional[ndb.Model]
pk = self.get_pk(request)
return self.model.get_by_id(int(pk))
# Used only in type hint comments
del Optional, webapp2, ndb
|
985,070 | 0c4336f2d93b5ead9a30e34aa4e57e0bb942c073 | #!/usr/bin/python
# encoding: UTF-8
import json
import string
import time
import urllib2
import MySQLdb
from codepkg import mod_config
BASE_URL = "http://lvyou.baidu.com/destination/ajax/jingdian?format=ajax&surl="
def getSpotsCount(url):
req = urllib2.Request(url)
#print req
res_data = urllib2.urlopen(req)
res = res_data.read()
#print res
respJson = json.loads(res)
count = respJson["data"]["scene_total"]
return count
def getSpots(url,city):
try:
mysql = "insert into spot(ename,cname,city,lat,lng,formataddress,status,finish,createtime,updatetime) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
db = MySQLdb.connect(mod_config.dbhost, mod_config.dbuser, mod_config.dbpassword, mod_config.dbname, mod_config.dbcharset)
cursor = db.cursor()
req = urllib2.Request(url)
# print req
res_data = urllib2.urlopen(req)
res = res_data.read()
# print res
respJson = json.loads(res)
datas = respJson['data']["scene_list"]
for spot in datas:
#spotJson = json.loads(spot)
now = int(time.time())
timeArray = time.localtime(now)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
print otherStyleTime
ename = spot['surl']
sname = spot['sname']
mapinfo = string.strip(spot['ext']['map_info'])
address = string.strip(spot['ext']['address'])
address = string.strip(address[0:200])
lng = 0.0
lat = 0.0
if ',' in mapinfo:
coords = string.split(mapinfo,',')
if len(coords) == 2:
lng = float(coords[0])
lat = float(coords[1])
if lat>100:
temp = lat
lat = lng
lng = temp
print address,mapinfo
#ename = pinyin.get_pinyin(sname)
cursor.execute(mysql,(ename, sname, city, lat,lng,address,1,0,otherStyleTime,otherStyleTime))
db.commit()
except Exception,msg:
db.rollback()
print msg
def crawlSpot():
try:
mysql = 'select * from city where finish = 0'
db = MySQLdb.connect(mod_config.dbhost,
mod_config.dbuser,
mod_config.dbpassword,
mod_config.dbname,
mod_config.dbcharset)
cursor = db.cursor()
cursor.execute(mysql)
results = cursor.fetchall()
for row in results:
params = '&pn=1&rn=10'
url = BASE_URL+row[1]+params
print row[2],'\t',url
count = getSpotsCount(url)
perpage = 10
pages = (count + perpage - 1) / perpage
i = 1
while i<=pages:
params = "&pn="+str(i)+"&rn="+str(perpage)
url = BASE_URL+row[1]+params
getSpots(url,row[2])
i+=1
mysql2='update city set finish =1 where id = %s' % row[0]
cursor.execute(mysql2)
db.commit()
except Exception,msg:
print msg
finally:
pass
def getNote(city,cname,ename,maxcount):
try:
mysql = "insert into note(nid,title,url,postime,status,finish,source,spot,city,abs,departure,destinations,path,places,viewcount,favoritecount,recommendcount,createtime,updatetime) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s, %s)"
db = MySQLdb.connect(mod_config.dbhost,
mod_config.dbuser,
mod_config.dbpassword,
mod_config.dbname,
mod_config.dbcharset)
cursor = db.cursor()
i = 0
index = 0
while i<maxcount:
currentURL = "https://lvyou.baidu.com/search/ajax/search?format=ajax&word=" + cname + "&surl=" + ename + "&pn=" + str(i)+ "&rn=10&t=" +str(time.time())
currentURL = currentURL.encode("utf8")
req = urllib2.Request(currentURL)
res_data = urllib2.urlopen(req)
res = res_data.read()
# print res
respJson = json.loads(res)
if respJson['errno'] == 0:
notes_list = respJson['data']['search_res']['notes_list']
print respJson['data']['search_res']
if notes_list is None or len(notes_list) <= 1:
break
for note in notes_list:
nid = ''
recommend_count=0
favorite_count=0
view_count=0
publish_time =0
title=''
departure=''
places=''
destinations=''
path=''
loc=''
abstracts=''
nid = note['nid']
recommend_count = note['recommend_count']
view_count = note['view_count']
favorite_count = note['favorite_count']
publish_time = note['publish_time']
ltime = time.localtime(float(publish_time))
postime = time.strftime("%Y-%m-%d %H:%M:%S", ltime)
title = note['title']
departure = note['departure']
places = note['places']
destinations = note['destinations']
path = note['path']
loc = note['loc']
abstracts = note['content']
now = int(time.time())
timeArray = time.localtime(now)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
destinationStrs = string.join(destinations, '-')
pathStrs = string.join(path, '-')
placeStrs = string.join(places, '-')
print 'index=%d' % index, nid, recommend_count, view_count, favorite_count, postime, title, departure, placeStrs, destinationStrs, pathStrs, loc, abstracts
cursor.execute(mysql,(nid,title,loc,postime,1,0,'百度',cname,city,abstracts,departure,destinationStrs,pathStrs,placeStrs,view_count,favorite_count,recommend_count,otherStyleTime,otherStyleTime))
db.commit()
index+=1
i+=10
except Exception,msg:
db.rollback()
print msg
def crawlNotes():
try:
mysql = 'select * from spot where finish = 0'
db = MySQLdb.connect(mod_config.dbhost,
mod_config.dbuser,
mod_config.dbpassword,
mod_config.dbname,
mod_config.dbcharset)
cursor = db.cursor()
cursor.execute(mysql)
results = cursor.fetchall()
index = 1
for row in results:
print str(index),row[2],row[1],row[3]
getNote(row[3],row[2],row[1],5000)
mysql2 = 'update spot set finish = 1 where id = %s' % row[0]
cursor.execute(mysql2)
db.commit()
index+=1
except Exception,msg:
#db.rollback()
print msg
def getCNote(city,cname,ename,maxcount):
try:
mysql = "insert into citynote(nid,title,url,postime,status,finish,source,spot,city,abs,departure,destinations,path,places,viewcount,favoritecount,recommendcount,createtime,updatetime) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s, %s)"
db = MySQLdb.connect('localhost', 'root', 'admin', 'pythondb', charset='utf8')
cursor = db.cursor()
i = 0
index = 0
while i<maxcount:
currentURL = "https://lvyou.baidu.com/search/ajax/search?format=ajax&word=" + cname + "&surl=" + ename + "&pn=" + str(i)+ "&rn=10&t=" +str(time.time())
currentURL = currentURL.encode("utf8")
req = urllib2.Request(currentURL)
res_data = urllib2.urlopen(req)
res = res_data.read()
# print res
respJson = json.loads(res)
if respJson['errno'] == 0:
notes_list = respJson['data']['search_res']['notes_list']
print respJson['data']['search_res']
if notes_list is None or len(notes_list) <= 1:
break
for note in notes_list:
nid = ''
recommend_count=0
favorite_count=0
view_count=0
publish_time =0
title=''
departure=''
places=''
destinations=''
path=''
loc=''
abstracts=''
nid = note['nid']
recommend_count = note['recommend_count']
view_count = note['view_count']
favorite_count = note['favorite_count']
publish_time = note['publish_time']
ltime = time.localtime(float(publish_time))
postime = time.strftime("%Y-%m-%d %H:%M:%S", ltime)
title = note['title']
departure = note['departure']
places = note['places']
destinations = note['destinations']
path = note['path']
loc = note['loc']
abstracts = note['content']
now = int(time.time())
timeArray = time.localtime(now)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
destinationStrs = string.join(destinations, '-')
pathStrs = string.join(path, '-')
placeStrs = string.join(places, '-')
print 'index=%d' % index, nid, departure, city, cname, loc
startend = departure+city+loc
if startend not in myset:
cursor.execute(mysql,(nid,title,loc,postime,1,0,'百度',cname,city,abstracts,departure,destinationStrs,pathStrs,placeStrs,view_count,favorite_count,recommend_count,otherStyleTime,otherStyleTime))
db.commit()
index+=1
myset.add(startend)
i+=10
except Exception,msg:
db.rollback()
print msg
def crawlCNotes():
try:
mysql = 'select * from spot where finish = 0'
db = MySQLdb.connect('localhost', 'root', 'admin', 'pythondb', charset='utf8')
cursor = db.cursor()
cursor.execute(mysql)
results = cursor.fetchall()
index = 1
for row in results:
print str(index),row[2],row[1],row[3]
getCNote(row[3],row[2],row[1],5000)
mysql2 = 'update spot set finish = 1 where id = %s' % row[0]
cursor.execute(mysql2)
db.commit()
index+=1
except Exception,msg:
#db.rollback()
print msg
def getUNote(city,cname,ename,maxcount):
try:
mysql = "insert into unote(nid,title,url,postime,status,finish,source,spot,city,abs,departure,destinations,path,places,viewcount,favoritecount,recommendcount,createtime,updatetime) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s, %s)"
db = MySQLdb.connect('localhost', 'root', 'admin', 'pythondb', charset='utf8')
cursor = db.cursor()
i = 0
index = 0
while i<maxcount:
currentURL = "https://lvyou.baidu.com/search/ajax/search?format=ajax&word=" + cname + "&surl=" + ename + "&pn=" + str(i)+ "&rn=10&t=" +str(time.time())
currentURL = currentURL.encode("utf8")
req = urllib2.Request(currentURL)
res_data = urllib2.urlopen(req)
res = res_data.read()
# print res
respJson = json.loads(res)
if respJson['errno'] == 0:
notes_list = respJson['data']['search_res']['notes_list']
print respJson['data']['search_res']
if notes_list is None or len(notes_list) <= 1:
break
for note in notes_list:
nid = ''
recommend_count=0
favorite_count=0
view_count=0
publish_time =0
title=''
departure=''
places=''
destinations=''
path=''
loc=''
abstracts=''
nid = note['nid']
recommend_count = note['recommend_count']
view_count = note['view_count']
favorite_count = note['favorite_count']
publish_time = note['publish_time']
ltime = time.localtime(float(publish_time))
postime = time.strftime("%Y-%m-%d %H:%M:%S", ltime)
title = note['title']
departure = note['departure']
places = note['places']
destinations = note['destinations']
path = note['path']
loc = note['loc']
abstracts = note['content']
now = int(time.time())
timeArray = time.localtime(now)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
destinationStrs = string.join(destinations, '-')
pathStrs = string.join(path, '-')
placeStrs = string.join(places, '-')
print 'index=%d' % index, nid, departure, city, cname, loc
if loc not in urlset:
cursor.execute(mysql,(nid,title,loc,postime,1,0,'百度',cname,city,abstracts,departure,destinationStrs,pathStrs,placeStrs,view_count,favorite_count,recommend_count,otherStyleTime,otherStyleTime))
db.commit()
index+=1
urlset.add(loc)
index += 1
i+=10
except Exception,msg:
db.rollback()
print msg
def crawlUNotes():
try:
mysql = 'select * from spot where finish = 0'
db = MySQLdb.connect('localhost', 'root', 'admin', 'pythondb', charset='utf8')
cursor = db.cursor()
cursor.execute(mysql)
results = cursor.fetchall()
index = 1
for row in results:
print str(index),row[2],row[1],row[3]
getUNote(row[3],row[2],row[1],5000)
mysql2 = 'update spot set finish = 1 where id = %s' % row[0]
cursor.execute(mysql2)
db.commit()
index+=1
except Exception,msg:
#db.rollback()
print msg
myset = set()
urlset = set()
if __name__ == '__main__':
# crawlSpot()
# crawlNotes()
# crawlCNotes()
crawlUNotes() |
985,071 | 1c0d417138b0302b21254a9531f812534d15524e | from setuptools import setup
'''
# local install
python setup.py install
# local developer install
python setup.py develop
#registering on PyPI
python setup.py register
#create a source distribution
python setup.py sdist
# upload the source distribution tp PyPI
python setup.py sdist upload
'''
setup(name='simulator',
version='0.1',
description='healthcare agent-based simulator',
url='http://github.com/ttrikalin/simulator',
author='TA Trikalinos',
author_email='thomas_trikalinos@brown.edu',
license='MIT',
packages=['simulator'],
install_requires=[
'pint',
'numpy'
],
zip_safe=False)
|
985,072 | d7be7b9bab25cdd59f55017a5913662cb544ed33 | class Dog():
#Class object attributes
species = 'Mammal'
def __init__(self, breed, name, spots):
self.breed = breed
self.name = name
self.spots = spots
my_dog = Dog(breed="poodle", name = "FEFE", spots = True)
print(f"My dog is named {my_dog.name}. He is a {my_dog.breed}. Does he have spots, you ask? {my_dog.spots}. Oh by the way he is a {my_dog.species}") |
985,073 | 340123ad88a39dc75e63a6f35b2cba44229ad000 | # -*- coding: utf-8 -*-
# This file is part of ScapeList.
#
# ScapeList is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ScapeList is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ScapeList. If not, see <http://www.gnu.org/licenses/>.
# Written by Mohamed Sordo (@neomoha)
# Email: mohamed ^dot^ sordo ^at^ gmail ^dot^ com
# Website: http://msordo.weebly.com
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
from django.db import models
class Image(models.Model):
instagram_id = models.CharField(max_length=50, primary_key=True)
instagram_link = models.URLField()
#instagram_tags = models.CharField(max_length=400)
standard_resolution = models.URLField()
low_resolution = models.URLField()
thumbnail = models.URLField()
# On Python 3: def __str__(self):
def __unicode__(self):
return self.instagram_id
class Meta:
ordering = ('instagram_id',)
class Keyword(models.Model):
name = models.CharField(max_length=255, unique=True)
images = models.ManyToManyField(Image)
# On Python 3: def __str__(self):
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
class Song(models.Model):
echonest_id = models.CharField(max_length=50, primary_key=True)
spotify_trackid = models.CharField(max_length=50, unique=True)
title = models.CharField(max_length=255)
artist = models.CharField(max_length=255)
url = models.URLField()
# On Python 3: def __str__(self):
def __unicode__(self):
return self.artist+" "+self.title
class Meta:
ordering = ('artist','title')
class Annotation(models.Model):
keyword = models.ForeignKey(Keyword)
image = models.ForeignKey(Image)
song = models.ForeignKey(Song)
user = models.ForeignKey(User)
def __unicode__(self):
return "User %s has attached song %s-%s to image %s (keyword=%s)" % (self.user.username, self.song.artist, self.song.title,
self.image.instagram_link, self.keyword.name)
class Meta:
unique_together = ("user", "keyword")
|
985,074 | c33fa33fe9cc2a86578dc1b5445ab5b9eb2e6ed2 | # Solution to https://leetcode.com/problems/reverse-integer/
class Solution:
def reverse(self, x: int) -> int:
if x < 0:
result = int(str(x)[:0:-1]) * -1
else:
result = int(str(x)[::-1])
if result < pow(-2, 31) or result > pow(2, 31) - 1:
return 0
return result
|
985,075 | 5e8a7e0d00f39389610ee065f431dac5ff76cd2c | from typing import List, Tuple
def knight_tours(board: List[List[int]], curr: Tuple[int, int], count: int) -> -1:
"""
Currently this only solves a knight tour for a specific starting point.
"""
if count == len(board) ** 2:
return
deltas = [
(2, 1),
(1, 2),
(-2, 1),
(-1, 2),
(2, -1),
(1, -2),
(-2, -1),
(-1, -2),
]
for delta in deltas:
next_x, next_y = curr[0] + delta[0], curr[1] + delta[1]
if not is_valid_coordinate((next_x, next_y), len(board)):
continue
board[next_x][next_y] = count
knight_tours(board, (next_x, next_y), count + 1)
board[next_x][next_y] = -1
def is_valid_coordinate(coordinate: Tuple[int, int], board_size: int) -> bool:
x, y = coordinate
return 0 <= x < board_size and 0 <= y < board_size
test_board = [
[-1, -1, -1, -1, 0, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1]
]
knight_tours(test_board, (0, 4), 1)
|
985,076 | 35ba9114068783d2b2d17c74abbf712eee042729 | import time
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, date2num
import numpy as np
import subprocess as sp
import time
import sys
import os
from itertools import groupby
fdate = sp.check_output(['date','--date','-1 days','+%Y%m%d'])
fdate = fdate.replace('\n','')
yr = int(fdate[:4])
mo = int(fdate[4:6])
dy = int(fdate[6:])
date = "%i_%02d_%02d" %(yr,mo,dy)
dirlist = os.listdir('/home/augta/data/north/Events/%i%02d%02d/' %(yr,mo,dy))
dirlist = [k for k in dirlist if '.tsp' in k]
t2_counts = np.empty(0)
for tspfile in dirlist:
tmp = sp.check_output(['/home/augta/web_monitor/testtsp',
'/home/augta/data/north/Events/%i%02d%02d/%s' %(yr,mo,dy,tspfile)])
tmp = tmp.split('\n')
tmp = [int(k.split('.')[0]) for k in tmp if len(k) > 0]
tmp = [len(list(group)) for key, group in groupby(tmp)]
t2_counts = np.concatenate((t2_counts,np.array(tmp)),axis=0)
with open('/home/augta/web_monitor/T2north_record.txt','a') as f:
# DATE <T2> STD(T2)
f.write('%s %.4f %.4f\n' %(date,t2_counts.mean(),t2_counts.std()))
fpath = '/home/augta/web_monitor/recent_t2north.txt'
with open('/home/augta/web_monitor/recent_t2north.txt','w') as f:
sp.call(['tail','-n','14','/home/augta/web_monitor/T2north_record.txt'],stdout=f)
t2 = np.loadtxt(fpath,usecols=(1,))
t2err = np.loadtxt(fpath,usecols=(2,))
rawdates = np.loadtxt(fpath,usecols=(0,),dtype=np.str)
x=np.arange(len(t2))
plt.fill_between(x,t2-t2err,t2+t2err,color='red',alpha=0.1,linewidth=0.)
plt.plot(x,t2,'o--',color='red',fillstyle='none',ms=14)
plt.ylim(ymin=0.)
plt.xticks(x[::2],rawdates[::2],rotation=90,fontsize='large')
plt.yticks(fontsize='large')
plt.ylabel('T2 Rate [Hz]',fontsize='large')
plt.tight_layout()
plt.savefig('/var/www/html/monitor/img/T2north_recent.png',frameon=1)
plt.clf()
|
985,077 | 1bfb66081bed58b309e3a581b34f403d9f68548c |
from jpath4.query import data as d
def jpath_to_json(value, strict=False):
"""
Converts the specified JPath data into Python objects/lists/numbers/etc
representing the data. The resulting data is suitable for passing into
Python's json.dumps.
If strict is true, object keys are required to be strings, as JSON
mandates; a non-string key will result in an exception. If strict is
false, such keys will be allowed, but the resulting object might not be
passable to json.dumps without causing an exception during the call.
"""
if isinstance(value, d.Boolean):
return value.get_value()
if isinstance(value, d.Null):
return None
if isinstance(value, d.Number):
return value.get_as_float()
if isinstance(value, d.String):
return value.get_value()
if isinstance(value, d.Object):
result = {}
for k, v in value:
k = jpath_to_json(k)
if strict and not isinstance(k, d.String):
raise Exception("Key " + str(k) + " of an object was not a "
"string, but strict formatting was requested. JSON "
"object keys must always be strings when strict "
"formatting is on.")
result[k] = jpath_to_json(v)
return result
if isinstance(value, d.List):
return [jpath_to_json(v) for v in value]
raise TypeError("Can't convert values of type " + str(type(value))
+ " to json")
def json_to_jpath(value):
if value is None:
return d.StandardNull()
if isinstance(value, bool):
return d.StandardBoolean(value)
if isinstance(value, (int, float, long)):
return d.StandardNumber(value)
if isinstance(value, (str, unicode)):
return d.StandardString(value)
if isinstance(value, (list, tuple)):
return d.StandardList([json_to_jpath(v) for v in value])
if isinstance(value, dict):
return d.StandardObject([
d.StandardPair(json_to_jpath(k), json_to_jpath(v))
for (k, v) in value.items()
])
raise TypeError("Can't convert values of type " + str(type(value))
+ " to jpath data")
|
985,078 | 3672e0e2ceaf06262312259b7e314f3ad0c352b1 | from sampleapp.worker import name as webjob_name
import os
import sys
import shutil
if __name__ == "__main__":
try:
wwwroot = os.environ['WEBROOT_PATH']
src_path = os.path.join(wwwroot, 'sampleapp', 'worker', 'run.py')
dest_dir = os.path.join(wwwroot, 'App_Data', 'jobs', 'continuous', webjob_name)
shutil.copy(src_path, dest_dir)
except Exception as e:
print("failed to deploy webjob with reason: " + str(e))
sys.stdout.flush()
|
985,079 | 6f5e5e001f6c214647fb5fe5ac948b6d3b461716 | #!/usr/bin/python
import random
import sys
import getopt
from math import sqrt
import math
nauticalMilePerLat = 60.00721
nauticalMilePerLongitude = 60.10793
rad = math.pi / 180.0
milesPerNauticalMile = 1.15078
def read_coords(coord_file):
'''
read the coordinates from file and return the distance matrix.
coords should be stored as comma separated floats, one x,y pair per line.
'''
coords=[]
for line in coord_file:
x,y=line.strip().split(',')
coords.append((float(x),float(y)))
return coords
def order(best):
file = open('cities','r')
cities = []
for city in file:
cities.append(city)
file.close()
for i in best:
print cities[i]
def calcDistance(lat1, lon1, lat2, lon2):
"""
Caclulate distance between two lat lons in NM
"""
yDistance = (lat2 - lat1) * nauticalMilePerLat
xDistance = (math.cos(lat1 * rad) + math.cos(lat2 * rad)) * (lon2 - lon1) * (nauticalMilePerLongitude / 2)
distance = math.sqrt( yDistance**2 + xDistance**2 )
return distance * milesPerNauticalMile
def calculatePathLen(path, arr):
first = True
lastpoint = ()
length = 0
for point in path:
if first:
lastpoint = point
first = False
else:
length += calcDistance(arr[lastpoint][0],arr[lastpoint][1],arr[point][0],arr[point][1])
lastpoint = point
return length
def usage():
print "usage: python %s [-v] -n <max iterations> <city file>" % sys.argv[0]
def main():
try:
options, args = getopt.getopt(sys.argv[1:], "ho:vm:n:a:", ["cooling="])
except getopt.GetoptError:
usage()
sys.exit(2)
max_iterations=None
verbose=None
for option,arg in options:
if option == '-v':
verbose=True
elif option == '-h':
usage()
sys.exit()
elif option == '-n':
max_iterations=int(arg)
if max_iterations is None:
usage();
sys.exit(2)
if out_file_name and not out_file_name.endswith(".png"):
usage()
print "output image file name must end in .png"
sys.exit(1)
if len(args) != 1:
usage()
print "no city file specified"
sys.exit(1)
city_file=args[0]
# enable more verbose logging (if required) so we can see workings
# of the algorithms
import logging
format='%(asctime)s %(levelname)s %(message)s'
if verbose:
logging.basicConfig(level=logging.INFO,format=format)
else:
logging.basicConfig(format=format)
# setup the things tsp specific parts hillclimb needs
coords=read_coords(file(city_file))
# output results
print order(best)
print iterations,score,best
print str(calculatePathLen(best,coords)) + " mi"
if __name__ == "__main__":
main()
|
985,080 | bf812b6f75924afeb32e3c7bf0889b56c511e980 | # İnheritance (KALITIM) : Miras alma
# Person => name,lastname,age ,eat(),run(),drink()
#Student(Person),Teacher(Person)
# Animal=> Dog(Animal),Cat(Animal)
class Person():
def __init__(self,fname,lname):
self.firstname=fname
self.lastname=lname
print("Person created")
def whoAmİ(self):
print("I am a Person")
def eat(self):
print("I am eating")
class Student(Person):
def __init__(self,fname,lname,number):
Person.__init__(self,fname,lname)
self.number=number
print("student created")
# OVERRİDE
def whoAmİ(self):
print("I am a student")
def sayhello(self):
print("hello ı am student")
class Teacher(Person):
def __init__(self,fname,lname,branch):
super().__init__(fname,lname)
self.branch=branch
def whoAmİ(self):
print("ı am a teacher")
p1=Person("oğulcan","kırtay")
s1=Student("ali","vural",1234)
t1=Teacher("sadık","yılmaz","math")
print(p1.firstname+" "+p1.lastname)
print(s1.firstname+" "+s1.lastname+" "+str(s1.number))
p1.whoAmİ()
s1.whoAmİ()
p1.eat()
s1.eat()
s1.sayhello()
t1.whoAmİ()
|
985,081 | 86190a87fd7f4a3e8e0ed1ccf7624071a7a67b58 | from compound.o.oH import *
db_gases = ["H2", "He", "N2", "O2", "O3", "F2", "Cl2", "Ne", "Ar", "Kr", "Xe", "Og", "NH3", "SiH4", "CO", "CO2", "N2O", "NO", "NO2", "SO2", "H2S", "CH2=C=CH2", "CH2=CH-CH=CH2"]
def is_gase(comp: 'Compound'):
if comp.formula.value in db_gases:
return True
elif comp.comp_type[0] == 'o':
if comp.skeleton.value in db_gases:
return True
if comp.comp_type == "oHAa":
return oHAa_parse(comp.skeleton) <= 4
elif comp.comp_type == "oHAe":
return oHAe_parse(comp.skeleton)[0] <= 4
elif comp.comp_type == "oHAy":
return oHAy_parse(comp.skeleton)[0] <= 4
elif comp.comp_type == "oHCa":
return oHCa_parse(comp.skeleton) <= 4
else:
return False
else:
return False
|
985,082 | 3c755049117467bf4b5343a98eb5b9da14678081 | class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
if len(s) < 4 or len(s) > 12:
return []
ans = []
temp = []
temp.append(s[:1])
recur_IP(ans, temp, s[1:], 0)
temp.pop()
if s[0] != '0':
temp.append(s[:2])
recur_IP(ans, temp, s[2:], 0)
temp.pop()
else:
return ans
if int (s[:3]) <= 255:
temp.append(s[:3])
recur_IP(ans, temp, s[3:], 0)
temp.pop()
return ans
def recur_IP(ans, base, s, idx):
if idx == 3:
return
if len(s) < 1:
return
temp = base[:]
temp.append(s[:1])
if idx == 2 and len(s) == 1:
ans.append(".".join(temp))
return
else:
recur_IP(ans, temp, s[1:], idx+1)
temp.pop()
if len(s) < 2:
return
if s[0] != '0':
temp.append(s[:2])
if idx == 2 and len(s) == 2:
ans.append(".".join(temp))
return
else:
recur_IP(ans, temp, s[2:], idx+1)
temp.pop()
else:
return
if len(s) <3:
return
if int (s[:3]) <= 255:
temp.append(s[:3])
if idx == 2 and len(s) == 3:
ans.append(".".join(temp))
return
else:
recur_IP(ans, temp, s[3:], idx+1)
temp.pop()
|
985,083 | 3e6396c39e72afa92778a17ad12edc4079583116 | import collections
import systems
import scales
import sformat
SECTIONS_ORDER = ["front", "center", "rear"]
SYSTEMS_ORDER = ["1","2","3","4","5","6","core"]
SECTION_SCOPE_ALIASES = {
"front": { "dr": "front_dr" },
"center": { "dr": "center_dr" },
"rear": { "dr": "rear_dr" }}
def add_values_dict(d1, d2, alias):
for k2,v2 in d2.items():
k1 = alias.get(k2,k2)
if k1 in d1:
d1[k1] += v2
else:
d1[k1] = v2
def mult_values_dict_scalar(d, n, alias):
for k in d:
d[k] *= n
def system_key_to_indx(key):
if key == "core":
return 7
else:
return int(key)
def indx_to_system_key(idx):
if idx == 7:
return "core"
else:
return "%.0f" % idx
class Ship:
name = ""
role = ""
sm = 1
# all values:
# ht hnd sr lwt length front_dr center_dr rear_dr
# thrust pp_draw pp_gen cost
values = None
# {"size": <number>, "section": <front|center|rear>, "location":
# <1-7|core>, "count": <number>, "weapon": <weapon_id>}
weapons = []
# section -> location -> [weapon_idx, ...]
# weapon_idx is index into weapons array
weapon_mount_mapping = None
# section -> location (1-6,core) -> part
systems = {}
def __init__(self, ship_json, all_systems, all_weapons):
self.sm = ship_json["sm"]
self.name = ship_json.get("name","")
self.role = ship_json.get("role","")
self.systems = ship_json["systems"]
self.weapons = [
{
"size": self.sm + w["rel_size"],
"section": w["section"],
"location": w["location"],
"count": w["count"],
"is_turret": w["is_turret"],
"weapon": w["weapon"]} for w in ship_json["weapons"]]
self.reset_values(all_systems)
# iterate all systems as ((section_key, system_key, system_sm), system)
def iter_systems(self, all_systems):
for section_key in SECTIONS_ORDER:
section = self.systems[section_key]
for system_key in SYSTEMS_ORDER:
if system_key not in section: continue
system_id_json = section[system_key]
system_ids = []
system_sm = self.sm
if isinstance(system_id_json, list):
system_ids = system_id_json
system_sm -= 1
else:
system_ids = [system_id_json]
for system_id in system_ids:
system = all_systems[system_id]
yield ((section_key,system_key,system_sm),system)
def iter_grouped_systems(self, all_systems):
group_first_system_key = None
group_last_system_key = None
group_section_key = None
group_system = None
group_sm = None
group_count = -1
for ((section_key, system_key, system_sm),
system) in self.iter_systems(all_systems):
group_count += 1
if (group_section_key != section_key or
group_system.s_id != system.s_id or
group_sm != system_sm):
if group_section_key != None:
yield ((group_section_key,
(group_first_system_key, group_last_system_key),
group_sm, group_count), group_system)
group_count = 0
group_first_system_key = system_key
group_section_key = section_key
group_sm = system_sm
group_system = system
group_last_system_key = system_key
yield ((group_section_key,
(group_first_system_key, group_last_system_key),
group_sm, group_count+1), group_system)
# Iterate all weapon mounts on ((section, size, count, is_turret), weapon)
# where weapon is an object that inherits weapon.BaseWeapon. Empty mount
# slots have weapon of None
def iter_mounted_weapons(self, all_weapons):
for section, section_info in self.weapon_mount_mapping.items():
for location, weapon_idxs in section_info.items():
for weapon_idx in weapon_idxs:
weapon_info = self.weapons[weapon_idx]
yield ((section,
weapon_info["size"],
weapon_info["count"],
weapon_info["is_turret"]),
all_weapons[weapon_info["weapon"]])
def iter_grouped_mounted_weapons(self, all_weapons):
# section -> size -> weapon_id -> is_turret -> count
weapon_groupings = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(int))))
for ((section, size, count, is_turret),
weapon) in self.iter_mounted_weapons(all_weapons):
weapon_groupings[section][size][weapon.id][is_turret] += count
for section, section_info in weapon_groupings.items():
for size, size_info in section_info.items():
for weapon_idx, weapon_idx_info in size_info.items():
for is_turret, count in weapon_idx_info.items():
yield ((section,
size,
count,
is_turret),
all_weapons[weapon_idx])
def reset_values(self, all_systems):
self.values = {}
self.weapon_mounts = collections.defaultdict(
lambda: collections.defaultdict(int))
self.calc_self_values()
self.calc_systems_values(all_systems)
self.calc_weapon_mountings(all_systems)
def calc_self_values(self):
sthp = scales.DRScale(20,5).get_scale_value(self.sm)
self.values["st"] = self.values["hp"] = sthp
ht = 13
# TODO: also -1 ht if TL 7-9 and automation
if self.sm <= 9:
# TODO: technically only -1 if no engine room
ht -= 1
self.values["ht"] = ht
self.values["hnd"] = -((self.sm - 4) // 3)
self.values["sr"] = 4 if self.sm <= 6 else 5
self.values["lwt"] = scales.Geometric10HalfScale(10,4).get_scale_value(self.sm)
self.values["length"] = scales.DRScale(15,5).get_scale_value(self.sm)
self.values["mass"] = scales.CycleScale(30,5,[1,3]).get_scale_value(self.sm)
def calc_systems_values(self, all_systems):
for ((section_key, system_key, system_sm),
system) in self.iter_systems(all_systems):
section_scope_alias = SECTION_SCOPE_ALIASES[section_key]
add_values_dict(self.values,
system.get_values_delta(system_sm),
section_scope_alias)
add_values_dict(self.weapon_mounts[section_key],
system.get_weapon_mounts(self.sm),
{})
def calc_weapon_mountings(self, all_systems):
self.weapon_mount_mapping = collections.defaultdict(
lambda: collections.defaultdict(list))
# section -> location -> size -> count
weapon_mount_slots = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(int)))
for ((section_key, system_key, system_sm),
system) in self.iter_systems(all_systems):
if not isinstance(system, systems.WeaponsBatterySystem):
next
weapon_mount_slots[section_key][system_key] = system.get_weapon_mounts(
system_sm)
for weapon_idx,weapon in enumerate(self.weapons):
section = weapon["section"]
location = weapon["location"]
size = weapon["size"]
count = weapon["count"]
if weapon_mount_slots[section][location][size] >= count:
weapon_mount_slots[section][location][size] -= count
else:
raise Exception(
"Too many weapons of size %s mounted at %s %s" %
(size, section, location))
self.weapon_mount_mapping[section][location].append(weapon_idx)
def to_markdown(self, all_systems, all_weapons):
lines = []
lines.append("%s-class " % self.name)
lines.append("SM%s %s " % (self.sm, self.role))
lines.append("%sm / %sT" % (
self.values["length"],
sformat.si_number(self.values["mass"])))
lines.append("\n")
st_hp_str = sformat.neq_slashed(self.values["st"],self.values["hp"])
if self.values.get("shield_hp",0) > 0:
st_hp_str += " + %.0f" % self.values["shield_hp"]
dr_str = sformat.neq_slashed(self.values["front_dr"],
self.values["center_dr"], self.values["rear_dr"])
if self.values.get("shield_dr",0) > 0:
dr_str += " + %.0f" % self.values["shield_dr"]
lines.append("| dST/HP | Hnd/SR | HT | Move | LWt | Load | SM | Occ | dDR | Range | Cost |")
lines.append("|--------|--------|----|------|-----|------|----|-----|-----|-------|------|")
lines.append("|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|" % (
st_hp_str, # hp/str
"%s/%s" % (self.values["hnd"],self.values["sr"]), # hnd/sr
self.values["ht"], # ht
"%sG" % self.values["thrust"], #thrust
sformat.si_number(self.values["lwt"]), # load weight
" ",# load , sformat.si_number(self.values["load"]),
self.sm, #sm
" ",# occ
dr_str, # dr
" ", #range
sformat.money_number(self.values["cost"]) #cost
))
lines.append("\n")
lines.append("Weapons")
lines.append("===")
weapons_lines = collections.defaultdict(list)
for ((weap_section, weap_size, weap_count, weap_is_turret), weap
) in self.iter_grouped_mounted_weapons(all_weapons):
weapons_lines[weap_section].append(
((weap_size, weap_count, weap_is_turret), weap))
for section_lines in weapons_lines.values():
section_lines.sort(key=lambda x: -x[0][0])
for section_key in SECTIONS_ORDER:
lines.append("\n")
lines.append(section_key)
lines.append("---")
lines.append("| # | SM | Weapon | sAcc | Damage | Range | RoF | Rcl |")
lines.append("|---|----|--------|------|--------|-------|-----|-----|")
for ((weap_size, weap_count, weap_is_turret), weap) in weapons_lines[section_key]:
size_str = "%s (%s)" % (weap_size, weap_size - self.sm)
if weap == None:
lines.append("|%s|%s|%s|%s|%s|%s|%s|%s|" % (
weap_count, size_str, "No Weapon", "N/A", "N/A", "N/A", "N/A", "N/A"))
else:
weap_data = weap.get_weap_data(weap_size, weap_is_turret)
range_str = None
if weap_data["type"] == "beam":
range_str = sformat.neq_slashed(weap_data["half_damage_range"], weap_data["range"])
elif weap_data["type"] == "launcher":
range_str = "%.0f (%.0f move for %.0f turns)" % (
weap_data["range"],
weap_data["speed"],
weap_data["turns"])
else:
range_str = "%.0f" % weap_data["range"]
lines.append("|%s|%s|%s|%s|%s|%s|%s|%s|" % (
weap_count, # count
size_str, # size
weap_data["size_label"], # label
weap_data.get("sacc", "N/A"), # sAcc
weap_data["dmg_label"] + (" (%s)" % # dmg
(weap.armor_div if weap.armor_div != 1 else "")),
range_str, # range
weap.rof, # rof
"" # rcl
))
lines.append("\n")
lines.append("Systems")
lines.append("===")
systems_lines = collections.defaultdict(list)
for ((section_key, (system_start_key, system_end_key), system_sm,
num_systems), system) in self.iter_grouped_systems(all_systems):
systems_lines[section_key].append(
(((system_start_key, system_end_key), system_sm,
num_systems), system))
for section_key in SECTIONS_ORDER:
lines.append("\n")
lines.append(section_key)
lines.append("---")
for (((system_start_key, system_end_key), system_sm, num_systems),
system) in sorted(systems_lines[section_key],
key=lambda x: x[0][0]):
system_key_label = None
if system_start_key == system_end_key:
system_key_label = system_start_key
else:
system_key_label = "%s-%s" % (
system_start_key,system_end_key)
pp_draw = num_systems * system.pp_draw
pp_gen = num_systems * system.get_values_delta(system_sm
).get("pp_gen",0)
system_detail_lines = system.details_to_markdown_lines(
system_sm, num_systems)
if isinstance(system, systems.WeaponsBatterySystem):
# size -> weapon_id -> is_turret -> count
mounted_weapons = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(int)))
system_key_range = range(
system_key_to_indx(system_start_key),
system_key_to_indx(system_end_key)+1)
weapon_strs = []
for system_key_indx in system_key_range:
pp_draw_add = 0
location = indx_to_system_key(system_key_indx)
for weapon_idx in self.weapon_mount_mapping[
section_key][location]:
weapon_info = self.weapons[weapon_idx]
weapon = all_weapons[weapon_info["weapon"]]
if weapon.draws_power:
pp_draw_add = 1
weapon_strs.append(
"%sx%s" % (
weapon_info["count"],
weapon.get_weap_data(
weapon_info["size"],
weapon_info["is_turret"])["size_label"]))
pp_draw += pp_draw_add
system_detail_lines.append("Mounted: %s" % ", ".join(weapon_strs))
system_detail_lines = ("\t - %s" % s for s in
system_detail_lines)
system_key_label += "!" * pp_draw
system_key_label += "+" * pp_gen
range_size = system_key_to_indx(system_end_key) - \
system_key_to_indx(system_start_key) + 1
name_str = system.name
if range_size != num_systems:
name_str += " x%s" % num_systems
lines.append("* [%s] %s" % (system_key_label, name_str))
lines += system_detail_lines
return "\n".join(lines)
|
985,084 | e43142cffad32a73234044a8aabb2de09e34492d | from ConfigParser import SafeConfigParser
class abstract_parser:
def retrieve_parser(self, filename):
parser = SafeConfigParser()
parser.read(filename)
return parser
def parse(self, filename):
raise NotImplementedError |
985,085 | 7b52f7025b0c82a73f6b53d81115e74edfcb024d | import pyfirmata, os, time, threading, os.path, sys
from roasts.models import Roast, RoastSnapshot
THERMO_ENV_DATA = 0x0A
THERMO_BEAN_DATA = 0x0B
def debug(str):
print str
class Roaster:
"""
RoasterBoard is an Arduino firmata client communicating with the hardware roaster itself.
"""
def __init__(self):
self.envTemp = 0
self.beanTemp = 0
self.lastEnvTemp = 0
self.lastBeanTemp = 0
# all roaster tuples are pin/spec
self.components = {
"heater": (13, 0),
"drawfan": (12, 0),
"scrollfan": (11, 0),
"light": (8, 10),
"drum_low": (9, 0),
"drum_high": (10, 0)
}
self.thread = None
self.roast = None
self.isRoasting = False
self.board = None
def start(self):
self.thread = threading.Thread(name="roaster", target=self.run)
self.thread.start()
self.isRoasting = True
def stop(self):
self.isRoasting = False
def run(self):
debug("loading Arduino")
self.loadBoard()
while self.isRoasting:
# loop in 10s chunks of time
for i in range(10):
roast = None
results = Roast.objects.filter(is_active_roast=1)
if results:
roast = results[0]
# starting a new roast
if self.roast == None and roast:
debug("New roast!")
self.roast = roast
# the previously running roast was stopped
if self.roast and not roast:
debug("Previously running roast was stopped")
self.roast = None
break
# get out of this loop if no active roast
if not roast:
debug("No roast. All off.")
time.sleep(1)
self.setWhenDifferent("heater", 0)
self.setWhenDifferent("drawfan", 0)
self.setWhenDifferent("scrollfan", 0)
self.setWhenDifferent("heater", 0)
self.setWhenDifferent("drum_low", 0)
self.setWhenDifferent("drum_high", 0)
self.reconcile(0)
break
heater = roast.heater if roast.heater else 0
drawfan = roast.drawfan if roast.drawfan else 0
scrollfan = roast.scrollfan if roast.scrollfan else 0
drum = roast.drum if roast.drum else 0
env_temp = self.envTemp
bean_temp = self.beanTemp
print "heater = %s drawfan = %s scrollfan = %s drum = %s, env_temp = %s bean_temp = %s" % (heater, drawfan, scrollfan, drum, env_temp, bean_temp)
self.setWhenDifferent("heater", heater)
self.setWhenDifferent("drawfan", drawfan)
self.setWhenDifferent("scrollfan", scrollfan)
self.setWhenDifferent("heater", heater)
# stop drum
if drum == 0:
self.setWhenDifferent("drum_low", 0)
self.setWhenDifferent("drum_high", 0)
# low
if drum == 1:
self.setWhenDifferent("drum_low", 10)
self.setWhenDifferent("drum_high", 0)
# high
if drum == 2:
self.setWhenDifferent("drum_low", 0)
self.setWhenDifferent("drum_high", 10)
self.reconcile(i)
snapshot = RoastSnapshot(roast=roast, heater=heater, drawfan=drawfan, scrollfan=scrollfan, drum=drum, env_temp=env_temp, bean_temp=bean_temp)
snapshot.save()
time.sleep(1)
self.board.exit()
def reconcile(self, tick=0):
pwm_profile = [
[0,0,0,0,0,0,0,0,0,0], # 0 -- all off
[1,0,0,0,0,0,0,0,0,0], # 1 -- on 10%
[1,0,0,0,0,1,0,0,0,0], # 2 -- on 20%
[1,0,0,1,0,0,1,0,0,0], # 3 -- on 30%
[1,0,0,1,0,0,1,0,0,1], # 4 -- on 40%
[1,0,1,0,1,0,1,0,1,0], # 5 -- on 50%
[1,1,1,0,1,0,1,0,1,0], # 6 -- on 60%
[1,1,1,0,1,0,1,1,1,0], # 7 -- on 70%
[1,1,1,1,1,0,1,1,1,0], # 8 -- on 80%
[1,1,1,1,1,1,1,1,1,0], # 9 -- on 90%
[1,1,1,1,1,1,1,1,1,1], # 10 -- on 100%
]
for key in self.components:
pin, value = self.components[key]
onOff = pwm_profile[value][tick]
self.board.digital[pin].write(onOff)
def setWhenDifferent(self, key, desiredValue):
if self.components.has_key(key):
pin, currentValue = self.components[key]
if desiredValue != currentValue:
print "%s spec change from %s to %s" % (key, currentValue, desiredValue)
self.components[key] = (pin, desiredValue)
else:
print "roaster has no component: %s", key
def addEnvTemp(self, temp):
if self.lastEnvTemp == 0:
self.lastEnvTemp = temp
avg = (self.lastEnvTemp + temp) / 2
self.envTemp = avg
self.lastEnvTemp = temp
def addBeanTemp(self, temp):
if self.lastBeanTemp == 0:
self.lastBeanTemp = temp
avg = (self.lastBeanTemp + temp) / 2
self.beanTemp = avg
self.lastBeanTemp = temp
def loadBoard(self):
serialPath = ""
for i in range(5):
# osx paths for arduino
path = "/dev/cu.wchusbserial142%s" % i
if os.path.exists(path):
serialPath = path
break
# raspberry pi paths for arduino
path = "/dev/ttyUSB%s" % i
if os.path.exists(path):
serialPath = path
break
if serialPath == "":
raise Exception("no serial path found for roaster")
def getTemp(args):
temp = args[0]
temp = temp + args[1] * 128
temp = temp + args[2] * 256
temp = temp + args[3] * 512
return temp
def printEnv(*args, **kwargs):
self.addEnvTemp(getTemp(args))
def printBean(*args, **kwargs):
self.addBeanTemp(getTemp(args))
print "using %s" % path
self.board = pyfirmata.Arduino(path)
# custom firmata events sent from arduino contains temperature data
self.board.add_cmd_handler(THERMO_ENV_DATA, printEnv)
self.board.add_cmd_handler(THERMO_BEAN_DATA, printBean)
for c in self.components:
self.board.digital[self.components[c][0]].mode = pyfirmata.OUTPUT
self.board.digital[13].mode = pyfirmata.OUTPUT
it = pyfirmata.util.Iterator(self.board)
it.start()
|
985,086 | a72fec2c015d4054780e66e38184f6b304f36a98 | import numpy as np
import DtoF as DF
import JONSWAP as J
import OmegaTheta as OT
import rieneckerfenton as RF
def create_gauss(kx,ky,**kwargs):
return 0
def create_jonswap(kx,ky,**kwargs):
#-- The Jonswap Parameters
Param = kwargs.get('Param',None)
Nx = len(kx)
Ny = len(ky)
omega, theta = OT.OmegaTheta(kx, ky, g=Param['g'], h=Param['h'])
Pk, Sj, D = J.JONSWAP(omega, theta, g=Param['g'], alpha_p=Param['alpha_p'], omega_p=Param['omega_p'], \
gamma=Param['gamma'],ThetaPar= Param['dir_spr'], theta_shift=Param['theta_p']);
#Introduce random phase
hetaD = np.multiply(np.sqrt(Pk),np.exp(np.multiply(1j*2*np.pi,np.random.rand(Nx,Ny))))
#Convert to wavenumber space
hetar, hphir = DF.DtoF(hetaD, kx, ky, g=Param['g'], h=Param['h'])
hetar = hetar.T
hphir = hphir.T
# Compute wave field in the physical space
#NOTE need to use fourier shift as hetar was not generated using the fft
etar = np.fft.ifft2(np.fft.ifftshift(hetar))
etar = etar.real
phir = np.fft.ifft2(np.fft.ifftshift(hphir))
phir = phir.real
# Rescale the wave field to the desired eta variance
rescale = np.std(etar)/(Param['hs']/4)
etar = etar/rescale
phir = phir/rescale
return etar, phir
def create_rieneckerfenton(kx,ky,**kwargs):
#-- Use the parameters as in Rienecker & Fenton (1981)
Param = kwargs.get('Param', None)
eta, phi = RF.RieneckerFenton(kx,ky)
return eta, phi
def create_stokes(kx,ky,**kwargs):
return 0
|
985,087 | d08eec0cbf25f868fa04841bf489d28f27af42d4 | #!/usr/bin/env python3
def fibs(maxnumber):
fib1, fib2 = 1, 2
while fib1 < maxnumber:
yield fib1
fib1, fib2 = fib2, fib1 + fib2
print(sum(f for f in fibs(4000000) if f % 2 == 0))
|
985,088 | d512aba7e621a30f8ba7eb5e46f0952ab67c3c61 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
np.random.seed(0)
n = 15
x = np.linspace(0,10,n) + np.random.randn(n)/5
y = np.sin(x)+x/6 + np.random.randn(n)/10
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)
# You can use this function to help you visualize the dataset by
# plotting a scatterplot of the data points
# in the training and test sets.
#def part1_scatter():
import matplotlib.pyplot as plt
plt.figure(figsize=(5,4))
plt.scatter(X_train, y_train, label='training data',marker= 'o')
#plt.scatter(X_test, y_test, label='test data')
plt.legend(loc=4);
plt.xlabel('Feature value (x)')
plt.ylabel('Target value (y)')
plt.show()
# NOTE: Uncomment the function below to visualize the data, but be sure
# to **re-comment it before submitting this assignment to the autograder**.
#part1_scatter()
#def answer_one():
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
result_output = np.empty((0,100), float)
# Your code here
for degree in [0, 1, 3, 6, 9]:
poly = PolynomialFeatures(degree=degree)
X_poly = poly.fit_transform(x.reshape(-1,1)) #reshare x with 1D to 2D & transform into Poly function
X_train, X_test, y_train, y_test = train_test_split(X_poly, y,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
print('(poly deg {}) linear model coeff (w):\n{}'
.format(degree,linreg.coef_))
print('(poly deg {}) linear model intercept (b): {:.3f}'
.format(degree,linreg.intercept_))
print('(poly deg {}) R-squared score (training): {:.3f}'
.format(degree, linreg.score(X_train, y_train)))
print('(poly deg {}) R-squared score (test): {:.3f}\n'
.format(degree, linreg.score(X_test, y_test)))
#prediction for 100 new inputs
X_to_predict = np.linspace(0,10,100)
X_to_predict_poly = poly.fit_transform(X_to_predict.reshape(-1,1)) #reshare x with 1D to 2D & transform into Poly function
y_predict_output_poly = linreg.predict(X_to_predict_poly).reshape(1,-1) #reshare to transpose array as per assignment requirement
result_output = np.append(result_output, np.array(y_predict_output_poly), axis=0)
#plot graph
import matplotlib.pyplot as plt
plt.figure(figsize=(10,5))
plt.plot(X_train[:,[1]], y_train, 'o', label='training data', markersize=10)
plt.plot(X_test[:,[1]], y_test, 'o', label='test data', markersize=10)
for i,degree in enumerate([0,1,3,6,9]):
plt.plot(np.linspace(0,10,100), result_output[i], alpha=0.8, lw=2, label='degree={}'.format(degree))
plt.ylim(-1,2.5)
plt.legend(loc=4)
#Question2:
from sklearn.metrics.regression import r2_score
r2_train = np.empty((10,), float).flatten()
r2_test = np.empty((10,), float).flatten()
for i in range(10):
poly = PolynomialFeatures(degree=i)
X_poly = poly.fit_transform(x.reshape(-1,1)) #reshare x with 1D to 2D & transform into Poly function
X_train, X_test, y_train, y_test = train_test_split(X_poly, y,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
estimated_y_train = linreg.predict(X_train)
r2_train_score = r2_score(y_train, estimated_y_train)
r2_train[i] = np.array(r2_train_score)
estimated_y_test = linreg.predict(X_test)
r2_test_score = r2_score(y_test, estimated_y_test)
r2_test[i] = np.array(r2_test_score)
print('(poly deg {}) R-squared score (training): {:.3f}'
.format(i, r2_score(y_train, estimated_y_train)))
print('(poly deg {}) R-squared score (test): {:.3f}\n'
.format(i, r2_score(y_test, estimated_y_test)))
result_output_2 = np.array((r2_train,r2_test))
#Question3
import matplotlib.pyplot as plt
plt.figure(figsize=(10,5))
plt.plot(np.linspace(0,9,10), r2_train, label='training data', markersize=10)
plt.plot(np.linspace(0,9,10), r2_test, label='test data', markersize=10)
plt.xlabel('Degree')
plt.ylabel('R-square score')
plt.legend(loc='best')
fit_assessment = [0,1,2,3,4,8,9,5,6,7]
#Question4
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.metrics.regression import r2_score
poly = PolynomialFeatures(degree=12)
X_poly_12 = poly.fit_transform(x.reshape(-1,1)) #reshare x with 1D to 2D & transform into Poly function
X_train, X_test, y_train, y_test = train_test_split(X_poly_12, y,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
estimated_y_test_12 = linreg.predict(X_test)
r2_test_score_12 = r2_score(y_test, estimated_y_test_12)
print('(poly deg 12) R-squared score (test): {:.3f}\n'
.format(r2_score(y_test, estimated_y_test_12)))
linlasso = Lasso(alpha=0.01, max_iter = 10000).fit(X_train, y_train)
print('lasso regression linear model intercept: {}'
.format(linlasso.intercept_))
print('lasso regression linear model coeff:\n{}'
.format(linlasso.coef_))
print('Non-zero features: {}'
.format(np.sum(linlasso.coef_ != 0)))
print('R-squared score (training): {:.3f}'
.format(linlasso.score(X_train, y_train)))
print('R-squared score (test): {:.3f}\n'
.format(linlasso.score(X_test, y_test)))
estimated_lasso_y_test_12 = linlasso.predict(X_test)
lasso_r2_test_score_12 = r2_score(y_test, estimated_lasso_y_test_12)
print('(poly deg 12) R-squared score (test): {:.3f}\n'
.format(r2_score(y_test, estimated_lasso_y_test_12)))
result_output_4 = [r2_test_score_12, lasso_r2_test_score_12]
|
985,089 | 54725795ec472d357ccaf1372592f6de339b100d | #!/usr/bin/env python3
import xmlrpc.client
import ssl
import os
import sys
import gettext
DEBUG=True
class UMount:
def __init__(self):
context=ssl._create_unverified_context()
self.client=xmlrpc.client.ServerProxy("https://localhost:9779",allow_none=True,context=context)
self.user=os.environ["USER"]
self.home="/home/%s"%self.user
self.dirs={}
#def init
def dprint(self,msg):
if DEBUG:
print("[UMOUNT] %s"%msg)
def get_ticket(self):
self.client.create_ticket(self.user)
ticket="/run/n4d/tickets/%s"%self.user
if not os.path.exists(ticket):
return None
f=open(ticket)
t=f.readline().strip("\n")
f.close()
return t
#def get_ticket
def get_paths(self):
try:
exec(open(self.home+"/.config/user-dirs.dirs").read())
documents=locals()["XDG_DOCUMENTS_DIR"].split("/")[1]
desktop=locals()["XDG_DESKTOP_DIR"].split("/")[1]
gettext.textdomain("xdg-user-dirs")
share=gettext.gettext("Share")
gettext.textdomain("homelinker")
teachers_share=gettext.gettext("Teachers_Share")
groups_share=gettext.gettext("Groups_Share")
data_alum=gettext.gettext("Data_Alum")
self.dirs["desktop"]=self.home+"/"+desktop
self.dirs["documents"]=self.home+"/"+documents
self.dirs["share"]=self.home+"/"+share
self.dirs["teachers_share"]=self.home+"/"+teachers_share
self.dirs["groups_share"]=self.home+"/"+groups_share
self.dirs["data_alum"]=self.home+"/"+data_alum
return True
except Exception as e:
return False
#def get_paths
def umount(self,ticket):
user=(self.user,ticket)
for item in self.dirs:
path=self.dirs[item]
self.dprint("Umounting %s ..."%path)
ret=self.client.restricted_umount(user,"MountManager",self.user,path,True)
self.dprint(ret)
#umount
# class UMount
if __name__=="__main__":
if os.getuid() < 1042:
sys.exit(0)
um=UMount()
ticket=um.get_ticket()
rpaths=um.get_paths()
if not rpaths:
um.dprint("[!] Failed to get paths")
sys.exit(0)
if not ticket:
um.dprint("[!] Failed to get ticket")
sys.exit(0)
um.umount(ticket)
|
985,090 | 28cdfe398d791ea3ff28cf70ff7fd4f815457e8c | cube=[value**3 for value in range(1,11)]
print(cube)
for value in cube:
print(value)
|
985,091 | 8efa6e07e52d7743d05b83be79a6bde554a884e8 | import fastapi
import pytest
import pytest_check as check
import requests
requests.Response.json()
router = fastapi.APIRouter(
prefix="/nodes",
tags=["nodes"]
)
def test_query_nodes(client):
resp = client.get("/nodes/")
check.is_true(200 <= resp.status_code < 300)
res = resp.json()
check.is_true(len(res["data"]) == 5)
check.is_true(len(res["status"]) == 6)
@pytest.mark.parametrize(
"node_id, want_length",
[
["g1-n1", 1],
["xx", 1],
["xxyy", 0],
]
)
def test_query_get_node(client, node_id, want_length):
want = "g1-n1"
resp = client.get(f"/nodes/{want}")
check.is_true(200 <= resp.status_code < 300)
res = resp.json()
check.is_true(len(res["data"]) == 1)
check.is_true(len(res["status"]) == 6)
if want_length:
check.is_true(res["data"][0]["id"] == want)
def test_query_related_nodes(client):
node_id = "g1-n1"
resp = client.get(f"/nodes/related/{node_id}")
check.is_true(200 <= resp.status_code < 300)
res = resp.json()
check.is_true(len(res["data"]) == 3)
check.is_true(len(res["status"]) == 6)
|
985,092 | 46b1476607bc1a4fc026874738a754bc538212dc | """Pipeline functions for channel invitations"""
from django.db import transaction
from channels.api import get_admin_api
from channels.models import ChannelInvitation
def resolve_outstanding_channel_invites(
strategy, backend, *args, user=None, is_new=False, **kwargs
): # pylint: disable=unused-argument
"""
Resolves outstanding channel invitations when a user joins
Args:
strategy (social_django.strategy.DjangoStrategy): the strategy used to authenticate
backend (social_core.backends.base.BaseAuth): the backend being used to authenticate
user (User): the current user
is_new (bool): True if the user just got created
"""
if not is_new:
return {}
admin_api = get_admin_api()
# resolve all channel invitations by adding the user as a contributor and a subscriber
for invite_id in ChannelInvitation.objects.filter(
email=user.email, redeemed=False
).values_list("id", flat=True):
# redeem the update such that any error here will rollback the redemption
with transaction.atomic():
channel_invite = ChannelInvitation.objects.select_for_update().get(
id=invite_id
)
channel_invite.user = user
channel_invite.redeemed = True
channel_invite.save()
admin_api.add_contributor(user.username, channel_invite.channel.name)
admin_api.add_subscriber(user.username, channel_invite.channel.name)
return {}
|
985,093 | a421f790ffc4376e8bb1e2cd58c4488bbe770f53 | '''
2 types
1. Returning a fucntion from a fucntion. But the retured fuction is somewhere else or a different fun no part of the current
2. When the returing function is a innner function of the function we called. This hlep to develop a concept called
lazy initialization
'''
@track
def square(x):
return x*x
def track(func):
def callf(func):
print('calling')
r = func(*args,**kwargs)
return r
return callf
if __name__ == '__main__':
a = square(10) |
985,094 | 9a24c8253a38ae57647f4b29a0870ab209349714 | import torch
import numpy as np
from models.modules.loss import GANLoss,FilterLoss
from skimage.color import rgb2hsv,hsv2rgb
from scipy.signal import convolve2d
import time
from scipy.ndimage.morphology import binary_opening
from sklearn.feature_extraction.image import extract_patches_2d
from utils.util import IndexingHelper, Return_Translated_SubImage, Return_Interpolated_SubImage
from cv2 import dilate
class Optimizable_Temperature(torch.nn.Module):
def __init__(self,initial_temperature=None):
super(Optimizable_Temperature,self).__init__()
self.log_temperature = torch.nn.Parameter(data=torch.zeros([1]).type(torch.cuda.DoubleTensor))
if initial_temperature is not None:
self.log_temperature.data = torch.log(torch.tensor(initial_temperature).type(torch.cuda.DoubleTensor))
def forward(self):
return torch.exp(self.log_temperature)
class SoftHistogramLoss(torch.nn.Module):
def __init__(self,bins,min,max,desired_hist_image_mask=None,desired_hist_image=None,gray_scale=True,input_im_HR_mask=None,patch_size=1,automatic_temperature=False,
image_Z=None,temperature=0.05,dictionary_not_histogram=False,no_patch_DC=False,no_patch_STD=False):
self.temperature = temperature#0.05**2#0.006**6
self.exp_power = 2#6
self.SQRT_EPSILON = 1e-7
super(SoftHistogramLoss,self).__init__()
# min correspond to the CENTER of the first bin, and max to the CENTER of the last bin
self.device = torch.device('cuda')
self.bin_width = (max-min)/(bins-1)
self.max = max
self.no_patch_DC = no_patch_DC
self.no_patch_STD = no_patch_STD
assert no_patch_DC or not no_patch_STD,'Not supporting removing of only patch STD without DC'
self.temperature_optimizer = automatic_temperature
if automatic_temperature:
self.optimizable_temperature = Optimizable_Temperature(self.temperature)
self.image_Z = image_Z
else:
self.temperature = torch.tensor(self.temperature).type(torch.cuda.DoubleTensor)
self.bin_centers = torch.linspace(min,max,bins)
self.gray_scale = gray_scale
self.patch_size = patch_size
self.num_dims = 3
self.KDE = not gray_scale or patch_size>1 # Using Kernel Density Estimation rather than histogram
if gray_scale:
self.num_dims = self.num_dims//3
self.bins = 1. * self.bin_centers.view([1] + list(self.bin_centers.size())).type(torch.cuda.DoubleTensor)
if desired_hist_image is not None:
desired_hist_image = [hist_im.mean(1, keepdim=True).view([-1,1]) for hist_im in desired_hist_image]
if patch_size>1:
assert gray_scale and (desired_hist_image is not None),'Not supporting color images or patch histograms for model training loss for now'
self.num_dims = patch_size**2
DESIRED_HIST_PATCHES_OVERLAP = (self.num_dims-patch_size)/self.num_dims # Patches overlap should correspond to entire patch but one row/column.
desired_im_patch_extraction_mat = [ReturnPatchExtractionMat(hist_im_mask,patch_size=patch_size,device=self.device,
patches_overlap=DESIRED_HIST_PATCHES_OVERLAP) for hist_im_mask in desired_hist_image_mask]
desired_hist_image = [torch.sparse.mm(desired_im_patch_extraction_mat[i],desired_hist_image[i]).view([self.num_dims,-1,1]) for i in range(len(desired_hist_image))]
# desired_hist_image = [self.Desired_Im_2_Bins(hist_im,prune_only=True) for hist_im in desired_hist_image]
desired_hist_image = torch.cat(desired_hist_image,1)
if self.no_patch_DC:
desired_hist_image = desired_hist_image-torch.mean(desired_hist_image,dim=0,keepdim=True)
if self.no_patch_STD:
self.mean_patches_STD = torch.max(torch.std(desired_hist_image, dim=0, keepdim=True),other=torch.tensor(1/255).to(self.device))
desired_hist_image = (desired_hist_image/self.mean_patches_STD)
self.mean_patches_STD = 1*self.mean_patches_STD.mean().item()
desired_hist_image = desired_hist_image*self.mean_patches_STD#I do that to preserve the original (pre-STD normalization) dynamic range, to avoid changing the kernel support size.
self.desired_hist_image_mask = None
else:
if len(desired_hist_image)>1: print('Not supproting multiple hist image versions for non-patch histogram/dictionary. Removing extra image versions.')
desired_hist_image,desired_hist_image_mask = desired_hist_image[0],desired_hist_image_mask[0]
self.desired_hist_image_mask = torch.from_numpy(desired_hist_image_mask).to(self.device).view([-1]).type(torch.ByteTensor) if desired_hist_image_mask is not None else None
if desired_hist_image is not None:
desired_hist_image = 1 * desired_hist_image.view([self.num_dims, -1, 1])
if self.KDE:
if self.desired_hist_image_mask is not None:
desired_hist_image = desired_hist_image[:,self.desired_hist_image_mask,:]
# The bins are now simply the multi-dimensional pixels/patches. So now I remove redundant bins, by checking if there is duplicacy:
# if patch_size==1:#Otherwise I already did this step before for each image version, and I avoid repeating this pruning for the entire patches collection for memory limitation reasons.
self.bins = self.Desired_Im_2_Bins(desired_hist_image)
if not dictionary_not_histogram:
self.loss = torch.nn.KLDivLoss()
if patch_size>1:
self.patch_extraction_mat = ReturnPatchExtractionMat(input_im_HR_mask.data.cpu().numpy(),patch_size=patch_size,device=self.device,patches_overlap=0.5)#.to(self.device)
self.image_mask = None
else:
self.image_mask = input_im_HR_mask.view([-1]).type(torch.ByteTensor) if input_im_HR_mask is not None else None
self.dictionary_not_histogram = dictionary_not_histogram
if not dictionary_not_histogram:
if not automatic_temperature and desired_hist_image is not None:
with torch.no_grad():
self.desired_hists_list = [self.ComputeSoftHistogram(desired_hist_image,image_mask=self.desired_hist_image_mask,return_log_hist=False,
reshape_image=False,compute_hist_normalizer=True).detach()]
else:
self.desired_hist_image = desired_hist_image
def Feed_Desired_Hist_Im(self,desired_hist_image):
self.desired_hists_list = []
for desired_im in desired_hist_image:
if self.gray_scale:
desired_im = desired_im.mean(0, keepdim=True).view([1,-1, 1])
with torch.no_grad():
self.desired_hists_list.append(self.ComputeSoftHistogram(desired_im,image_mask=self.desired_hist_image_mask,return_log_hist=False,
reshape_image=False,compute_hist_normalizer=True).detach())
def Desired_Im_2_Bins(self,desired_im):
image_2_big = True
num_sub_images = 1
while image_2_big:
try:
bins = []
sub_image_sizes = [desired_im.size(1)//num_sub_images]*(num_sub_images-1)
sub_image_sizes += ([desired_im.size(1)-sum(sub_image_sizes)] if desired_im.size(1)-sum(sub_image_sizes)>0 else [])
sub_images = torch.split(desired_im,sub_image_sizes,dim=1)
for im in sub_images:
repeated_elements_mat = (im.view([self.num_dims, -1, 1]) - im.view([im.size(0)] + [1, -1])).abs()
repeated_elements_mat = (repeated_elements_mat < self.bin_width / 2).all(0)
repeated_elements_mat = torch.mul(repeated_elements_mat,(1 - torch.diag(torch.ones([repeated_elements_mat.size(0)]))).type(
repeated_elements_mat.dtype).to(repeated_elements_mat.device))
repeated_elements_mat = torch.triu(repeated_elements_mat).any(1) ^ 1
bins.append(im[:, repeated_elements_mat])
del repeated_elements_mat
image_2_big = False
except:
num_sub_images += 1
print('Hist bin pruning failed, retrying with %d sub-images' % (num_sub_images))
# if prune_only:
# return bins
bins = [b.view([desired_im.size(0), 1, -1]).type(torch.cuda.DoubleTensor) for b in bins]
return torch.cat(bins,-1)
def TemperatureSearch(self,desired_image,initial_image,desired_KL_div):
log_temperature_range = [0.1,1]
STEP_SIZE = 10
KL_DIV_TOLERANCE = 0.1
cur_KL_div = []
desired_temp_within_range = False
with torch.no_grad():
while True:
next_temperature = np.exp(np.mean(log_temperature_range))
if np.isinf(next_temperature) or next_temperature==0:
print('KL div. is %.3e even for temperature of %.3e, aborting temperature search with that.'%(cur_KL_div[-1],self.temperature))
break
self.temperature = 1*next_temperature
desired_im_hist = self.ComputeSoftHistogram(desired_image,image_mask=self.desired_hist_image_mask,return_log_hist=False,reshape_image=False,compute_hist_normalizer=True)
initial_image_hist = self.ComputeSoftHistogram(initial_image,image_mask=self.image_mask,return_log_hist=True,reshape_image=False,compute_hist_normalizer=False)
cur_KL_div.append(self.loss(initial_image_hist,desired_im_hist).item())
KL_div_too_big = cur_KL_div[-1] > desired_KL_div
if np.abs(np.log(max([0,cur_KL_div[-1]])/desired_KL_div))<=np.log(1+KL_DIV_TOLERANCE):
print('Automatically set histogram temperature to %.3e'%(self.temperature))
break
elif not desired_temp_within_range:
if len(cur_KL_div)==1:
initial_KL_div_too_big = KL_div_too_big
else:
desired_temp_within_range = initial_KL_div_too_big^KL_div_too_big
if not desired_temp_within_range:
if KL_div_too_big:
log_temperature_range[1] += STEP_SIZE
else:
log_temperature_range[0] -= STEP_SIZE
if desired_temp_within_range:
if KL_div_too_big:
log_temperature_range[0] = 1*np.log(self.temperature)
else:
log_temperature_range[1] = 1*np.log(self.temperature)
def ComputeSoftHistogram(self,image,image_mask,return_log_hist,reshape_image,compute_hist_normalizer,temperature=None):
CANONICAL_KDE_4_DICTIONARY = True
if temperature is None:
temperature = 1*self.temperature
if not reshape_image:
image = image.type(torch.cuda.DoubleTensor)
else:
if self.patch_size > 1:
image = torch.sparse.mm(self.patch_extraction_mat, image.view([-1, 1])).view([self.num_dims, -1])
if self.no_patch_DC:
image = image-torch.mean(image,dim=0,keepdim=True)
if self.no_patch_STD:
image = image / torch.max(torch.std(image, dim=0, keepdim=True), other=torch.tensor(1 / 255).to(self.device))*self.mean_patches_STD
else:
image = image.contiguous().view([self.num_dims,-1])
if image_mask is not None:
image = image[:, image_mask]
image = image.unsqueeze(-1).type(torch.cuda.DoubleTensor)
hist = (image-self.bins).abs()
hist = torch.min(hist,(image-self.bins-self.max).abs())
hist = torch.min(hist,(image-self.bins+self.max).abs())
if not self.dictionary_not_histogram or CANONICAL_KDE_4_DICTIONARY:
hist = -((hist+self.SQRT_EPSILON)**self.exp_power)/temperature
hist = hist.mean(0)
if self.dictionary_not_histogram and not CANONICAL_KDE_4_DICTIONARY:
# return torch.exp(self.bin_width/(hist+self.bin_width/2))
return hist.min(dim=1)[0].view([1,-1])
# return hist.min(dim=1)[0].view([1, -1])
if self.dictionary_not_histogram and CANONICAL_KDE_4_DICTIONARY:
return -1*torch.log(torch.exp(hist).mean(1)).view([1, -1])
hist = torch.exp(hist).mean(0)
if compute_hist_normalizer or not self.KDE:
self.normalizer = hist.sum()/image.size(1)
hist = (hist/self.normalizer/image.size(1)).type(torch.cuda.FloatTensor)
if self.KDE: # Adding another "bin" to account for all other missing bins
hist = torch.cat([hist,(1-torch.min(torch.tensor(1).type(hist.dtype).to(hist.device),hist.sum())).view([1])])
if return_log_hist:
return torch.log(hist+torch.finfo(hist.dtype).eps).view([1,-1])
else:
return hist.view([1,-1])
def forward(self,cur_images):
cur_images_hists,KLdiv_grad_sizes = [],[]
for i,cur_image in enumerate(cur_images):
if self.gray_scale:
cur_image = cur_image.mean(0, keepdim=True)
if self.temperature_optimizer:
self.temperature = self.optimizable_temperature()
self.desired_hists_list.append(self.ComputeSoftHistogram(self.desired_hist_image, image_mask=self.desired_hist_image_mask,return_log_hist=False,
reshape_image=False, compute_hist_normalizer=True))
else:
temperature = self.temperature*(1 if (len(cur_images)==1 or True) else 5**(i-1)) #I used to multiply temperature for multi-scale histogram - I'm not sure why I did that, and I cancel it now since I use multiple images for the random initializations of the z optimization.
cur_images_hists.append(self.ComputeSoftHistogram(cur_image, self.image_mask, return_log_hist=True,reshape_image=True, compute_hist_normalizer=False,temperature=temperature))
if self.temperature_optimizer:
KLdiv_grad_sizes.append(-1*(torch.autograd.grad(outputs=self.loss(cur_images_hists[-1],self.desired_hists_list[-1]),inputs=self.image_Z,create_graph=True)[0]).norm(p=2))
if self.temperature_optimizer:
return self.loss(torch.cat(cur_images_hists,0),torch.cat(self.desired_hists_list,0)),torch.stack(KLdiv_grad_sizes).mean()
elif self.dictionary_not_histogram:
return torch.cat(cur_images_hists,0).mean(1).type(torch.cuda.FloatTensor)
else:
return self.loss(torch.cat(cur_images_hists,0),torch.cat(self.desired_hists_list,0)).type(torch.cuda.FloatTensor)
def ReturnPatchExtractionMat(mask,patch_size,device,patches_overlap=1,return_non_covered=False):
RANDOM_PATCHES_SELECTION = False #If true, patches are dropped in a random order, satisfying the maximal overlap constraint, rather than moving columns first ,than rows. This typically discards of much more patches.
mask = binary_opening(mask, np.ones([patch_size, patch_size]).astype(np.bool))
patches_indexes = extract_patches_2d(np.multiply(mask, 1 + np.arange(mask.size).reshape(mask.shape)),
(patch_size, patch_size)).reshape([-1, patch_size**2])
patches_indexes = patches_indexes[np.all(patches_indexes > 0, 1), :] - 1
if patches_overlap<1:
# I discard patches by discarding those containing too many pixels that are already covered by a previous patch. Patches are ordered right to left, top to bottom.
# For exampe, if the stride corresponds to one row/column, it would be one row. There might be simpler ways to achieve this...
unique_indexes = list(set(list(patches_indexes.reshape([-1]))))
min_index = min(unique_indexes)
index_taken_indicator = np.zeros([max(unique_indexes) - min(unique_indexes)]).astype(np.bool)
valid_patches = np.ones([patches_indexes.shape[0]]).astype(np.bool)
randomized_patches_indexes = np.random.permutation(patches_indexes.shape[0])
oredered_patches_indexes = randomized_patches_indexes if RANDOM_PATCHES_SELECTION else np.arange(patches_indexes.shape[0])
for patch_num in oredered_patches_indexes:
if (patches_overlap==0 and np.any(index_taken_indicator[patches_indexes[patch_num,:] - min_index - 1]))\
or np.mean(index_taken_indicator[patches_indexes[patch_num,:] - min_index - 1])>patches_overlap:
valid_patches[patch_num] = False
continue
index_taken_indicator[patches_indexes[patch_num,:] - min_index - 1] = True
patches_indexes = patches_indexes[valid_patches]
print('%.3f of desired pixels are covered by assigned patches'%(index_taken_indicator[unique_indexes-min_index-1].mean()))
if return_non_covered:
non_covered_indexes = np.array(unique_indexes)
non_covered_indexes = non_covered_indexes[np.logical_not(index_taken_indicator[non_covered_indexes - min_index - 1])]
non_covered_pixels_extraction_mat = Patch_Indexes_2_Sparse_Mat(non_covered_indexes,mask.size,device)
patch_extraction_mat = Patch_Indexes_2_Sparse_Mat(patches_indexes,mask.size,device)
if return_non_covered:
if not patches_overlap<1:
non_covered_pixels_extraction_mat = None#torch.sparse.FloatTensor(torch.Size([0, mask.size]))
return patch_extraction_mat,non_covered_pixels_extraction_mat
else:
return patch_extraction_mat
def Patch_Indexes_2_Sparse_Mat(patches_indexes,mask_size,device):
corresponding_mat_rows = np.arange(patches_indexes.size).reshape([-1])
return torch.sparse.FloatTensor(
torch.LongTensor([corresponding_mat_rows, patches_indexes.transpose().reshape([-1])]),
torch.FloatTensor(np.ones([corresponding_mat_rows.size])), torch.Size([patches_indexes.size, mask_size])).to(device)
class Optimizable_Z(torch.nn.Module):
def __init__(self,Z_shape,Z_range=None,initial_pre_tanh_Z=None,Z_mask=None,random_perturbations=False):
super(Optimizable_Z, self).__init__()
# self.device = torch.device('cuda')
self.Z = torch.nn.Parameter(data=torch.zeros(Z_shape).type(torch.cuda.FloatTensor))
if Z_mask is not None and not np.all(Z_mask):
self.mask = torch.from_numpy(Z_mask).type(torch.cuda.FloatTensor).to(self.Z.data.device)
self.initial_pre_tanh_Z = 1*initial_pre_tanh_Z.type(torch.cuda.FloatTensor).to(self.Z.data.device)
else:
self.mask = None
if initial_pre_tanh_Z is not None:
assert initial_pre_tanh_Z.size()[1:]==self.Z.data.size()[1:] and (initial_pre_tanh_Z.size(0) in [1,self.Z.data.size(0)]),'Initilizer size does not match desired Z size'
if random_perturbations:
initial_pre_tanh_Z += torch.normal(mean=torch.zeros_like(initial_pre_tanh_Z), std=0.001 * torch.ones_like(initial_pre_tanh_Z))
self.Z.data[:initial_pre_tanh_Z.size(0),...] = initial_pre_tanh_Z.to(self.Z.data.device)
self.Z_range = Z_range
if Z_range is not None:
self.tanh = torch.nn.Tanh()
def forward(self):
if self.Z_range is not None:
self.Z.data = torch.min(torch.max(self.Z,torch.tensor(-torch.finfo(self.Z.dtype).max).type(self.Z.dtype).to(self.Z.device)),torch.tensor(torch.finfo(self.Z.dtype).max).type(self.Z.dtype).to(self.Z.device))
if self.mask is not None:
self.Z.data = self.mask * self.Z.data + (1 - self.mask) * self.initial_pre_tanh_Z
if self.Z_range is not None:
return self.Z_range*self.tanh(self.Z)
else:
return self.Z
def PreTanhZ(self):
if self.mask is not None:
return self.mask * self.Z.data + (1 - self.mask) * self.initial_pre_tanh_Z
else:
return self.Z.data
def Randomize_Z(self,what_2_shuffle):
assert what_2_shuffle in ['all','allButFirst']
if what_2_shuffle=='all':
torch.nn.init.xavier_uniform_(self.Z.data,gain=100)
else:
torch.nn.init.xavier_uniform_(self.Z.data[1:], gain=100)
def Return_Detached_Z(self):
return self.forward().detach()
def ArcTanH(input_tensor):
return 0.5*torch.log((1+input_tensor+torch.finfo(input_tensor.dtype).eps)/(1-input_tensor+torch.finfo(input_tensor.dtype).eps))
def TV_Loss(image):
# return torch.pow((image[:,:,:,:-1]-image[:,:,:,1:]).abs(),0.1).mean(dim=(1,2,3))+torch.pow((image[:,:,:-1,:]-image[:,:,1:,:]).abs(),0.1).mean(dim=(1,2,3))
return (image[:,:,:,:-1]-image[:,:,:,1:]).abs().mean(dim=(1,2,3))+(image[:,:,:-1,:]-image[:,:,1:,:]).abs().mean(dim=(1,2,3))
class Z_optimizer():
MIN_LR = 1e-5
PATCH_SIZE_4_STD = 7
def __init__(self,objective,Z_size,model,Z_range,max_iters,data=None,loggers=None,image_mask=None,Z_mask=None,initial_Z=None,initial_LR=None,existing_optimizer=None,
batch_size=1,HR_unpadder=None,auto_set_hist_temperature=False,random_Z_inits=False,jpeg_extractor=None,non_local_Z_optimization=False):
self.jpeg_mode = jpeg_extractor is not None
self.data_keys = {'reconstructed':'SR'} if not self.jpeg_mode else {'reconstructed':'Decomp'}
if (initial_Z is not None or 'cur_Z' in model.__dict__.keys()):
if initial_Z is None:
initial_Z = 1*model.GetLatent()
initial_pre_tanh_Z = initial_Z/Z_range
initial_pre_tanh_Z = torch.clamp(initial_pre_tanh_Z,min=-1+torch.finfo(initial_pre_tanh_Z.dtype).eps,max=1.-torch.finfo(initial_pre_tanh_Z.dtype).eps)
initial_pre_tanh_Z = ArcTanH(initial_pre_tanh_Z)
else:
initial_pre_tanh_Z = None
self.non_local_Z_optimization = non_local_Z_optimization and image_mask is not None and image_mask.mean()<1
self.model_training = HR_unpadder is not None
assert not (self.non_local_Z_optimization and self.model_training),'Shouldn''t happen...'
if not self.model_training:
self.initial_output = model.Output_Batch(within_0_1=True)
if self.non_local_Z_optimization:
# Z Manipulation is allowed in all locations meeting ANY of the following:
# - The entire Z, except for a margin of NON_EDIT_MARGINS pixels on each side in the optimized region (for large images) or image (for small images)
# - The dilated version of image_mask (using a 16x16 square structural element).
new_Z_mask = np.zeros_like(Z_mask)
NON_EDIT_MARGINS = 24
if self.jpeg_mode:
new_Z_mask[NON_EDIT_MARGINS//8:-NON_EDIT_MARGINS//8,NON_EDIT_MARGINS//8:-NON_EDIT_MARGINS//8] = 1
dilated_im_mask = np.max(np.max(dilate(image_mask, np.ones([16, 16])).reshape([Z_mask.shape[0],8,Z_mask.shape[1],8]),-1),1)
else:
new_Z_mask[NON_EDIT_MARGINS:-NON_EDIT_MARGINS,NON_EDIT_MARGINS:-NON_EDIT_MARGINS] = 1
dilated_im_mask = dilate(image_mask, np.ones([16, 16]))
# Z_mask *= new_Z_mask
Z_mask = 1*new_Z_mask
Z_mask = np.minimum(1,Z_mask+dilated_im_mask)
self.Z_model = Optimizable_Z(Z_shape=[batch_size,model.num_latent_channels] + list(Z_size), Z_range=Z_range,initial_pre_tanh_Z=initial_pre_tanh_Z,Z_mask=Z_mask,
random_perturbations=(random_Z_inits and 'random' not in objective) or ('random' in objective and 'limited' in objective))
assert (initial_LR is not None) or (existing_optimizer is not None),'Should either supply optimizer from previous iterations or initial LR for new optimizer'
self.objective = objective
self.data = data
self.device = torch.device('cuda')
self.jpeg_extractor = jpeg_extractor
self.model = model
if image_mask is None:
if 'fake_H' in model.__dict__.keys():
self.image_mask = torch.ones(list(model.fake_H.size()[2:])).type(model.fake_H.dtype).to(self.device)
else:
self.image_mask = None
self.Z_mask = None#torch.ones(Z_size).type(model.fake_H.dtype).to(self.device)
else:
assert Z_mask is not None,'Should either supply both masks or niether'
self.image_mask = torch.from_numpy(image_mask).type(model.fake_H.dtype).to(self.device)
self.Z_mask = torch.from_numpy(Z_mask).type(model.fake_H.dtype).to(self.device)
self.initial_Z = 1.*model.GetLatent()
self.image_mask.requires_grad = False
self.Z_mask.requires_grad = False
if self.non_local_Z_optimization:
self.constraining_mask = (1-(self.image_mask>0)).type(self.image_mask.type())
def constraining_loss(produced_im):
return torch.nn.functional.l1_loss(input=produced_im * self.constraining_mask,target=self.initial_output * self.constraining_mask)
self.constraining_loss = constraining_loss
self.constraining_loss_weight = 0.1 # Setting a default weight, that should probably be adjusted for each different tool
if 'local' in objective:#Used in relative STD change and periodicity objective cases:
desired_overlap = 1 if 'STD' in objective else 0.5
self.patch_extraction_map,self.non_covered_indexes_extraction_mat = ReturnPatchExtractionMat(mask=image_mask,
patch_size=self.PATCH_SIZE_4_STD,device=model.fake_H.device,patches_overlap=desired_overlap,return_non_covered=True)
# self.patch_extraction_map, self.non_covered_indexes_extraction_mat =\
# self.patch_extraction_map.to(model.fake_H.device),self.non_covered_indexes_extraction_mat.to(model.fake_H.device)
if not self.model_training:
self.initial_STD = self.Masked_STD(first_image_only=True)
print('Initial STD: %.3e' % (self.initial_STD.mean().item()))
if existing_optimizer is None:
if any([phrase in objective for phrase in ['l1','scribble']]) and 'random' not in objective:
if data is not None and 'desired' in data.keys():
self.desired_im = data['desired']
if self.image_mask is None:
self.loss = torch.nn.L1Loss().to(torch.device('cuda'))
else:
loss_mask = (self.image_mask>0).type(self.image_mask.type())
SMOOTHING_MARGIN = 1
if 'scribble' in objective:
scribble_mask_tensor = torch.from_numpy(data['scribble_mask']).type(loss_mask.dtype).to(loss_mask.device)
scribble_multiplier = np.ones_like(data['scribble_mask']).astype(np.float32)
scribble_multiplier += data['brightness_factor']*(data['scribble_mask']==2)-data['brightness_factor']*(data['scribble_mask']==3)
if SMOOTHING_MARGIN>0:
scribble_multiplier = convolve2d(np.pad(scribble_multiplier,((SMOOTHING_MARGIN,SMOOTHING_MARGIN),(SMOOTHING_MARGIN,SMOOTHING_MARGIN)),mode='edge'),
np.ones([SMOOTHING_MARGIN*2+1,SMOOTHING_MARGIN*2+1])/((SMOOTHING_MARGIN*2+1)**2),mode='valid')
L1_loss_mask = loss_mask*((scribble_mask_tensor>0)*(scribble_mask_tensor<4)).float()
TV_loss_masks = [loss_mask*(scribble_mask_tensor==id).float().unsqueeze(0).unsqueeze(0) for id in torch.unique(scribble_mask_tensor*loss_mask) if id>3]
cur_HSV = rgb2hsv(np.clip(255*self.initial_output[0].data.cpu().numpy().transpose((1,2,0)).copy(),0,255))
cur_HSV[:,:,2] = cur_HSV[:,:,2]* scribble_multiplier
desired_RGB = hsv2rgb(cur_HSV)
desired_RGB = np.expand_dims(desired_RGB.transpose((2,0,1)),0)/255
desired_RGB_mask = (scribble_mask_tensor==2)+(scribble_mask_tensor==3)
self.desired_im = self.desired_im*(1-desired_RGB_mask).float()+desired_RGB_mask.float()*torch.from_numpy(desired_RGB).type(loss_mask.dtype).to(loss_mask.device)
def Scribble_Loss(produced_im,GT_im):
loss_per_im = []
for im_num in range(produced_im.size(0)):
loss_per_im.append(torch.nn.functional.l1_loss(input=produced_im[im_num].unsqueeze(0) * L1_loss_mask.to(self.device),
target=GT_im * L1_loss_mask.to(self.device)).to(torch.device('cuda')))
# if torch.any(TV_loss_mask.type(torch.uint8)):
if len(TV_loss_masks)>0:
loss_per_im[-1] = loss_per_im[-1] + Scribble_TV_Loss(produced_im[im_num].unsqueeze(0))
return torch.stack(loss_per_im,0)
def Scribble_TV_Loss(produced_im):
loss = 0
for TV_loss_mask in TV_loss_masks:
for y_shift in [-1,0,1]: # Taking differences to 8 neighbors, but calculating only 4 differences for each point (3 y shifts * 2 x shifts minus 2 discarded), to avoid duplicate differences
for x_shift in [-1,0]:
if y_shift in [0,1] and x_shift==0:
continue
point = np.array([y_shift,x_shift])
cur_mask = Return_Translated_SubImage(TV_loss_mask,point) * Return_Translated_SubImage(TV_loss_mask, -point)
loss = loss + (cur_mask * (Return_Translated_SubImage(produced_im,point) - Return_Translated_SubImage(produced_im, -point)).abs()).mean(dim=(1, 2, 3))
return loss
# if len(TV_loss_masks)==0: #No local TV minimization performed
# I need to check what the appropriate normalizer when using local TV minimization
self.constraining_loss_weight = 1#255/10*Z_loss.item()
self.loss = Scribble_Loss
# scheduler_threshold = 1e-2
elif 'Mag' in objective:
self.desired_patches = torch.sparse.mm(self.patch_extraction_map, self.initial_output.mean(dim=1).view([-1, 1])).view([self.PATCH_SIZE_4_STD ** 2, -1])
desired_STD = torch.max(torch.std(self.desired_patches,dim=0,keepdim=True),torch.tensor(1/255).to(self.device))
self.desired_patches = (self.desired_patches-torch.mean(self.desired_patches,dim=0,keepdim=True))/desired_STD*\
(desired_STD+data['STD_increment']*(1 if 'increase' in objective else -1))+torch.mean(self.desired_patches,dim=0,keepdim=True)
self.constraining_loss_weight = 255/10*data['STD_increment']**2
elif 'desired_SVD' in objective:
self.loss = FilterLoss(latent_channels='SVDinNormedOut_structure_tensor',constant_Z=data['desired_Z'],
reference_images={'min':data['reference_image_min'],'max':data['reference_image_max']},masks={'LR':self.Z_mask,'HR':self.image_mask})
elif 'STD' in objective and not any([phrase in objective for phrase in ['periodicity','TV','dict','hist']]):
assert self.objective.replace('local_','') in ['max_STD', 'min_STD','STD_increase','STD_decrease']
if any([phrase in objective for phrase in ['increase','decrease']]):
STD_CHANGE_FACTOR = 1.05
# STD_CHANGE_INCREMENT = data['STD_increment']
self.desired_STD = self.initial_STD
if data['STD_increment'] is None:#Using multiplicative desired STD factor:
self.desired_STD *= STD_CHANGE_FACTOR if 'increase' in objective else 1/STD_CHANGE_FACTOR
else:#Using an additive increment:
self.desired_STD += data['STD_increment'] if 'increase' in objective else -data['STD_increment']
self.constraining_loss_weight = 255/10*data['STD_increment']**2
elif 'periodicity' in objective:
self.STD_PRESERVING_WEIGHT = 20#0.2 if 'Plus' in objective else 20
self.PLUS_MEANS_STD_INCREASE = True
if 'nonInt' in objective:
image_size = list(self.initial_output.size()[2:])
self.periodicity_points,self.half_period_points = [],[]
if 'Plus' in objective and self.PLUS_MEANS_STD_INCREASE:
self.desired_STD = self.initial_STD + data['STD_increment']
for point in data['periodicity_points']:
point = np.array(point)
self.periodicity_points.append([])
self.half_period_points.append([])
for half_period_round in range(1+('Plus' in objective and not self.PLUS_MEANS_STD_INCREASE)):
for minus_point in range(2):
cur_point = 1*point
if half_period_round:
cur_point *= 0.5
if minus_point:
cur_point *= -1
y_range, x_range = [IndexingHelper(cur_point[0]),IndexingHelper(cur_point[0], negative=True)], [IndexingHelper(cur_point[1]),IndexingHelper(cur_point[1],negative=True)]
ranges = []
for axis,cur_range in enumerate([x_range,y_range]):
cur_range = [cur_range[0] if cur_range[0] is not None else 0,image_size[axis]+cur_range[1] if cur_range[1] is not None else image_size[axis]]
cur_range = np.linspace(start=cur_range[0],stop=cur_range[1],
num=image_size[axis]-np.ceil(np.abs(np.array([0,image_size[axis]])-cur_range)).astype(np.int16).max())/image_size[axis]*2-1
ranges.append(cur_range)
grid = np.meshgrid(*ranges)
if half_period_round:
self.half_period_points[-1].append(torch.from_numpy(np.stack(grid, -1)).view([1] + list(grid[0].shape) + [2]).type(
self.initial_output.dtype).to(self.initial_output.device))
else:
self.periodicity_points[-1].append(torch.from_numpy(np.stack(grid,-1)).view([1]+list(grid[0].shape)+[2]).type(
self.initial_output.dtype).to(self.initial_output.device))
else:
self.periodicity_points = [np.array(point) for point in data['periodicity_points']]
elif 'VGG' in objective and 'random' not in objective:
self.GT_HR_VGG = model.netF(self.desired_im).detach().to(self.device)
self.loss = torch.nn.L1Loss().to(torch.device('cuda'))
elif 'TV' in objective:
self.STD_PRESERVING_WEIGHT = 100
elif any([phrase in objective for phrase in ['hist','dict']]):
self.automatic_temperature = auto_set_hist_temperature
self.STD_PRESERVING_WEIGHT = 1e4
if self.automatic_temperature:
assert 'hist' in objective,'Unsupported for dictionary'
self.data['Z'] = self.Z_model()
pre_tanh_Z = self.Z_model.Z
pre_tanh_Z.requires_grad = True
model.feed_data(self.data, need_GT=False)
d_KLdiv_2_d_temperature = SoftHistogramLoss(bins=256,min=0,max=1,desired_hist_image=self.data['desired'].detach(),desired_hist_image_mask=data['Desired_Im_Mask'],
input_im_HR_mask=self.image_mask,gray_scale=True,patch_size=3 if 'patch' in objective else 1,automatic_temperature=True,image_Z=pre_tanh_Z)
temperature_optimizer = torch.optim.Adam(d_KLdiv_2_d_temperature.optimizable_temperature.parameters(), lr=0.5)
temperature_optimizer.zero_grad()
initial_image = model.netG(model.var_L).to(self.device)
temperatures,gradient_sizes,KL_divs = [],[],[]
NUM_ITERS = 50
for tempertaure_seeking_iter in range(NUM_ITERS):
cur_KL_div,temperature_gradients_size = d_KLdiv_2_d_temperature(initial_image)
temperature_gradients_size.backward(retain_graph=(tempertaure_seeking_iter<(NUM_ITERS-1)))
temperature_optimizer.step()
KL_divs.append(cur_KL_div.item())
temperatures.append(d_KLdiv_2_d_temperature.temperature.item())
gradient_sizes.append(temperature_gradients_size.item())
optimal_temperature = temperatures[np.argmin(gradient_sizes)]
else:
optimal_temperature = 5e-4 if 'hist' in objective else 1e-3
self.loss = SoftHistogramLoss(bins=256,min=0,max=1,desired_hist_image=self.data['desired'] if self.data is not None else None,
desired_hist_image_mask=data['Desired_Im_Mask'] if self.data is not None else None,input_im_HR_mask=self.image_mask,
gray_scale=True,patch_size=6 if 'patch' in objective else 1,temperature=optimal_temperature,dictionary_not_histogram='dict' in objective,
no_patch_DC='noDC' in objective,no_patch_STD='no_localSTD' in objective)
self.constraining_loss_weight = 10# if 'no_localSTD' in objective else 0.1 # Empirically set, based on empirically measured loss.
elif 'Adversarial' in objective:
self.netD = model.netD
self.loss = GANLoss('wgan-gp', 1.0, 0.0).to(self.device)
elif 'limited' in objective:
self.initial_image = 1*model.output_image.detach()
self.rmse_weight = data['rmse_weight']
self.optimizer = torch.optim.Adam(self.Z_model.parameters(), lr=initial_LR)
else:
self.optimizer = existing_optimizer
self.LR = initial_LR
self.scheduler = None#torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=self.optimizer,verbose=True,threshold=1e-2,min_lr=self.MIN_LR,cooldown=10)
self.loggers = loggers
self.cur_iter = 0
self.max_iters = max_iters
self.random_Z_inits = 'all' if (random_Z_inits or self.model_training)\
else 'allButFirst' if (initial_pre_tanh_Z is not None and initial_pre_tanh_Z.size(0)<batch_size)\
else False
self.HR_unpadder = HR_unpadder
def Masked_STD(self,first_image_only=False):
model_output = self.model.Output_Batch(within_0_1=True)
if 'local' in self.objective:
values_2_return = []
for im_num in range(1 if first_image_only else model_output.size(0)):
values_2_return.append(torch.sparse.mm(self.patch_extraction_map,model_output[im_num].mean(dim=0).view([-1, 1])).view([self.PATCH_SIZE_4_STD ** 2, -1]).std(dim=0))
if self.non_covered_indexes_extraction_mat is not None:
values_2_return[-1] = torch.cat([values_2_return[-1],torch.sparse.mm(self.non_covered_indexes_extraction_mat,model_output[im_num].mean(dim=0).view(
[-1, 1])).std(dim=0)], 0)
return torch.stack(values_2_return, 1)
else:
return torch.std(model_output * self.image_mask, dim=(1, 2, 3)).view(1,-1)
def feed_data(self,data):
self.data = data
self.cur_iter = 0
if 'l1' in self.objective:
self.desired_im = data['desired'].to(self.device)
elif 'hist' in self.objective:
self.loss.Feed_Desired_Hist_Im(data['desired'].to(self.device))
def Manage_Model_Grad_Requirements(self,disable):
if disable:
self.original_requires_grad_status = []
for p in self.model.netG.parameters():
self.original_requires_grad_status.append(p.requires_grad)
p.requires_grad = False
else:
for i, p in enumerate(self.model.netG.parameters()):
p.requires_grad = self.original_requires_grad_status[i]
def optimize(self):
DETACH_Y_FOR_CHROMA_TRAINING = False
if 'Adversarial' in self.objective:
self.model.netG.train(True) # Preventing image padding in the CEM code, to have the output fit D's input size
self.Manage_Model_Grad_Requirements(disable=True)
self.loss_values = []
if self.random_Z_inits and self.cur_iter==0:
self.Z_model.Randomize_Z(what_2_shuffle=self.random_Z_inits)
z_iter = self.cur_iter
if 'Uncomp' in self.data.keys():
# This is to prevent the error "Trying to backward through the graph a second time, but the buffers have already been freed...".
# I suspect it happens here because I change two portions of self.data['Uncomp'] at two different points in the optimization (Y and chroma channels), so at no point
# does self.data['Uncomp'] get completely overridden, and therefore the optimizer "thinks" it need to use self.data['Uncomp'] in consecutive backward() steps. I solve it by
# explicitly initializing self.data['Uncomp'] to its original value before each iteration.
Uncomp_batch = 1*self.data['Uncomp']
while True:
if self.max_iters>0:
if z_iter==(self.cur_iter+self.max_iters):
break
elif len(self.loss_values)>=-self.max_iters:# Would stop when loss siezes to decrease, or after 5*(-)max_iters
if z_iter==(self.cur_iter-5*self.max_iters):
break
if (self.loss_values[self.max_iters] - self.loss_values[-1]) / np.abs(self.loss_values[self.max_iters]) < 1e-2 * self.LR:
break
self.optimizer.zero_grad()
self.data['Z'] = self.Z_model()
if self.model_training and 'Uncomp' in self.data.keys():
self.data['Uncomp'] = 1*Uncomp_batch
self.model.feed_data(self.data, need_GT=False,detach_Y=DETACH_Y_FOR_CHROMA_TRAINING and self.model_training and self.data['Uncomp'].size(1)==3)
self.model.test(prevent_grads_calc=False,chroma_input=self.data['chroma_input'] if 'chroma_input' in self.data.keys() else None)
self.output_image = self.model.Output_Batch(within_0_1=True)
if self.model_training:
self.output_image = self.HR_unpadder(self.output_image)
if 'random' in self.objective:
if 'l1' in self.objective:
data_in_loss_domain = self.output_image
elif 'VGG' in self.objective:
data_in_loss_domain = self.model.netF(self.output_image)
Z_loss = torch.min((data_in_loss_domain.unsqueeze(0) - data_in_loss_domain.unsqueeze(1)).abs() + torch.eye(
data_in_loss_domain.size(0)).unsqueeze(2).unsqueeze(3).unsqueeze(4).to(data_in_loss_domain.device), dim=0)[0]
if 'limited' in self.objective:
rmse = (data_in_loss_domain - self.initial_image).abs()
if z_iter==0:
rmse_weight = 1*self.rmse_weight#*Z_loss.mean().item()/rmse.mean().item()
Z_loss = Z_loss-rmse_weight*rmse
if self.Z_mask is not None:
Z_loss = Z_loss*self.Z_mask
Z_loss = -1*Z_loss.mean(dim=(1,2,3))
elif any([phrase in self.objective for phrase in ['l1','scribble']]):
Z_loss = self.loss(self.output_image.to(self.device), self.desired_im.to(self.device))
elif 'desired_SVD' in self.objective:
Z_loss = self.loss({'SR':self.output_image.to(self.device)}).mean()
elif any([phrase in self.objective for phrase in ['hist','dict']]):
Z_loss = self.loss(self.output_image.to(self.device))
if 'localSTD' in self.objective:
Z_loss = Z_loss+(self.STD_PRESERVING_WEIGHT*(self.Masked_STD(first_image_only=False)-self.initial_STD)**2).mean(0).to(self.device)
elif 'Adversarial' in self.objective:
Z_loss = self.loss(self.netD(self.model.DTE_net.HR_unpadder(self.output_image).to(self.device)),True)
elif 'STD' in self.objective and not any([phrase in self.objective for phrase in ['periodicity','TV']]):
Z_loss = self.Masked_STD(first_image_only=False)
if any([phrase in self.objective for phrase in ['increase', 'decrease']]):
Z_loss = (Z_loss-self.desired_STD)**2
Z_loss = Z_loss.mean(0)
elif 'Mag' in self.objective:
values_2_return = []
for im_num in range(self.output_image.size(0)):
values_2_return.append(((torch.sparse.mm(self.patch_extraction_map,self.output_image[im_num].mean(dim=0).view([-1, 1])).view(
[self.PATCH_SIZE_4_STD ** 2, -1]) - self.desired_patches) ** 2).mean())
Z_loss = torch.stack(values_2_return,0)
elif 'periodicity' in self.objective:
Z_loss = self.PeriodicityLoss().to(self.device)
if 'Plus' in self.objective and self.PLUS_MEANS_STD_INCREASE:
Z_loss = Z_loss+self.STD_PRESERVING_WEIGHT*((self.Masked_STD(first_image_only=False)-self.desired_STD)**2).mean()
elif 'TV' in self.objective:
Z_loss = (self.STD_PRESERVING_WEIGHT*(self.Masked_STD(first_image_only=False)-self.initial_STD)**2).mean(0)+TV_Loss(self.output_image * self.image_mask).to(self.device)
elif 'VGG' in self.objective:
Z_loss = self.loss(self.model.netF(self.output_image).to(self.device),self.GT_HR_VGG)
if 'max' in self.objective:
Z_loss = -1*Z_loss
cur_LR = self.optimizer.param_groups[0]['lr']
if self.loggers is not None:
for logger_num,logger in enumerate(self.loggers):
cur_value = Z_loss[logger_num].item() if Z_loss.dim()>0 else Z_loss.item()
logger.print_format_results('val', {'epoch': 0, 'iters': z_iter, 'time': time.time(), 'model': '','lr': cur_LR, 'Z_loss': cur_value}, dont_print=True)
if not self.model_training:
self.latest_Z_loss_values = [val.item() for val in Z_loss]
Z_loss = Z_loss.mean()
if self.non_local_Z_optimization:
# if z_iter==self.cur_iter: #First iteration:
# self.constraining_loss_weight = 255/10*Z_loss.item()
Z_loss = Z_loss+self.constraining_loss_weight*self.constraining_loss(self.output_image.to(self.device))
Z_loss.backward()
self.loss_values.append(Z_loss.item())
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step(Z_loss)
if cur_LR<=1.2*self.MIN_LR:
break
z_iter += 1
if 'Adversarial' in self.objective:
self.model.netG.train(False) # Preventing image padding in the DTE code, to have the output fitD's input size
if 'random' in self.objective and 'limited' in self.objective:
self.loss_values[0] = self.loss_values[1] #Replacing the first loss values which is close to 0 in this case, to prevent discarding optimization because loss increased compared to it.
# if 'STD' in self.objective or 'periodicity' in self.objective:
if not self.model_training:
print('Final STDs: ',['%.3e'%(val.item()) for val in self.Masked_STD(first_image_only=False).mean(0)])
self.cur_iter = z_iter+1
Z_2_return = self.Z_model.Return_Detached_Z()
self.Manage_Model_Grad_Requirements(disable=False)
if self.model_training:# Results of all optimization iterations were cropped, so I do another one without cropping and with Gradients computation (for model training)
self.data['Z'] = Z_2_return
if 'Uncomp' in self.data.keys():
self.data['Uncomp'] = 1 * Uncomp_batch
self.model.feed_data(self.data, need_GT=False)
self.model.fake_H = self.model.netG(self.model.model_input)
return Z_2_return
# def Return_Translated_SubImage(self,image, translation):
# y_range, x_range = [IndexingHelper(translation[0]), IndexingHelper(translation[0], negative=True)], [IndexingHelper(translation[1]), IndexingHelper(translation[1], negative=True)]
# return image[:, :, y_range[0]:y_range[1], x_range[0]:x_range[1]]
#
# def Return_Interpolated_SubImage(self,image, grid):
# return torch.nn.functional.grid_sample(image, grid.repeat([image.size(0),1,1,1]))
def PeriodicityLoss(self):
loss = 0 if 'Plus' in self.objective and self.PLUS_MEANS_STD_INCREASE else (self.STD_PRESERVING_WEIGHT*(self.Masked_STD(first_image_only=False)-self.initial_STD)**2).mean()
image = self.output_image
mask = self.image_mask.unsqueeze(0).unsqueeze(0)
for point_num,point in enumerate(self.periodicity_points):
if 'nonInt' in self.objective:
cur_mask = Return_Interpolated_SubImage(mask,point[0])*Return_Interpolated_SubImage(mask,point[1])
loss = loss + (cur_mask * (Return_Interpolated_SubImage(image, point[0]) - Return_Interpolated_SubImage(image,point[1])).abs()).mean(dim=(1, 2, 3))
if 'Plus' in self.objective and not self.PLUS_MEANS_STD_INCREASE:
cur_half_cycle_mask = Return_Interpolated_SubImage(mask,self.half_period_points[point_num][0])*Return_Interpolated_SubImage(mask,self.half_period_points[point_num][1])
loss = loss - (cur_half_cycle_mask * (Return_Interpolated_SubImage(image, self.half_period_points[point_num][0]) -\
Return_Interpolated_SubImage(image,self.half_period_points[point_num][1])).abs()).mean(dim=(1, 2, 3))
else:
cur_mask = Return_Translated_SubImage(mask,point)*Return_Translated_SubImage(mask,-point)
loss = loss+(cur_mask*(Return_Translated_SubImage(image,point)-Return_Translated_SubImage(image,-point)).abs()).mean(dim=(1, 2, 3))
return loss
def ReturnStatus(self):
return self.Z_model.PreTanhZ(),self.optimizer
# def IndexingHelper(index,negative=False):
# if negative:
# return index if index < 0 else None
# else:
# return index if index > 0 else None
|
985,095 | ceaaa7b40c44f8e02989e326cd016013226ab725 | from django.urls import path
from .import views
urlpatterns = [
path('forum/', views.lista_topicos, name ='topicos'),
path('forum/new', views.new_topico, name='new_topico'),
path('forum/<int:pk>/', views.post, name='post'),
path('forum/newpost', views.new_post, name = 'new_post'),
path('forum/newcoment', views.new_coment, name = 'new_coment'),
] |
985,096 | 33c8c8d97ba02d03e98d3b8f524812346bc0fd89 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from gentpl import operation_tpl
operation_tpl.gen_tpl()
|
985,097 | 8ced086640afce35406d4dd86199241239c5230e | import logging
from datetime import datetime, timedelta, timezone
from sqlalchemy import Boolean, Column, DateTime, Integer, String, create_engine, desc
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
from .text_utils import date_string_to_datetime, get_tweet_body
logger = logging.getLogger("haiku_logger")
Base = declarative_base()
def session_factory(database_url: str, echo: bool = None):
echo = echo if echo is not None else False
engine = create_engine(database_url, poolclass=NullPool, echo=echo)
Base.metadata.create_all(engine)
session_factory = sessionmaker(bind=engine)
return session_factory()
class Haiku(Base):
"""To drop this table, run Haiku.metadata.drop_all(engine)"""
__tablename__ = "haikus"
id = Column(Integer, primary_key=True)
status_id_str = Column(String, nullable=False)
user_screen_name = Column(String, nullable=False)
user_id_str = Column(String, nullable=False)
user_verified = Column(Boolean, nullable=False)
created_at = Column(DateTime, nullable=False)
text_original = Column(String, nullable=False)
text_clean = Column(String, nullable=False)
haiku = Column(String, nullable=False)
date_posted = Column(DateTime, nullable=True)
date_deleted = Column(DateTime, nullable=True)
@classmethod
def add_haiku(cls, db_session, status, text, haiku, log_haiku: bool = None):
"""Add haiku record to the database"""
log_haiku = log_haiku if log_haiku is not None else True
tweet_haiku = cls(
status_id_str=status["id_str"],
user_screen_name=status["user"]["screen_name"],
user_id_str=status["user"]["id_str"],
user_verified=status["user"]["verified"],
created_at=date_string_to_datetime(status["created_at"]),
text_original=get_tweet_body(status),
text_clean=text,
haiku=haiku,
date_posted=None,
date_deleted=None,
)
if log_haiku:
db_session.add(tweet_haiku)
try:
db_session.commit()
except Exception as e:
logger.warning(f"Exception when adding haiku: {e}")
db_session.rollback()
return tweet_haiku
@classmethod
def get_haikus_all(cls, db_session) -> list:
"""Get all records"""
q = db_session.query(cls)
return q.all()
@classmethod
def get_haikus_posted(cls, db_session) -> list:
"""Get all posted records"""
q = (
db_session.query(cls)
.filter(cls.date_posted != None) # noqa: E711
.filter(cls.date_deleted == None) # noqa: E711
)
return q.all()
@classmethod
def get_haikus_unposted(cls, db_session) -> list:
"""Get all unposted records"""
q = (
db_session.query(cls)
.filter(cls.date_posted == None) # noqa: E711
.filter(cls.date_deleted == None) # noqa: E711
)
return q.all()
@classmethod
def get_haikus_unposted_timedelta(cls, db_session, td_seconds: int = None) -> list:
"""Get all unposted records from the last N seconds"""
if td_seconds is None:
td_seconds = 3600
filter_td = datetime.now(tz=timezone.utc) - timedelta(seconds=td_seconds)
q = (
db_session.query(cls)
.filter(cls.created_at > filter_td)
.filter(cls.date_posted == None) # noqa: E711
.filter(cls.date_deleted == None) # noqa: E711
)
return q.all()
@classmethod
def update_haiku_posted(cls, db_session, status_id_str: str):
"""Mark haiku as posted"""
try:
db_session.query(cls).filter(cls.status_id_str == status_id_str).update(
{"date_posted": datetime.now(tz=timezone.utc)}
)
db_session.commit()
except Exception as e:
logger.warning(f"Exception when updating haiku as posted: {e}")
db_session.rollback()
@classmethod
def update_haiku_unposted(cls, db_session, status_id_str: str):
"""Mark haiku as unposted"""
try:
db_session.query(cls).filter(cls.status_id_str == status_id_str).update(
{"date_posted": None}
)
db_session.commit()
except Exception as e:
logger.warning(f"Exception when updating haiku as unposted: {e}")
db_session.rollback()
@classmethod
def update_haiku_deleted(cls, db_session, status_id_str: str):
"""Mark haiku as deleted"""
try:
db_session.query(cls).filter(cls.status_id_str == status_id_str).update(
{"date_deleted": datetime.now(tz=timezone.utc)}
)
db_session.commit()
except Exception as e:
logger.warning(f"Exception when updating haiku as deleted: {e}")
db_session.rollback()
@classmethod
def update_haiku_undeleted(cls, db_session, status_id_str: str):
"""Mark haiku as undeleted"""
try:
db_session.query(cls).filter(cls.status_id_str == status_id_str).update(
{"date_deleted": None}
)
db_session.commit()
except Exception as e:
logger.warning(f"Exception when updating haiku as undeleted: {e}")
db_session.rollback()
@classmethod
def delete_haikus_unposted_timedelta(cls, db_session, days: float = None) -> list:
"""Delete all unposted records older than N days"""
if days is not None:
ts_end = datetime.now(tz=timezone.utc) - timedelta(days=days)
try:
logger.info(f"Deleting unposted haikus older than {days} days")
delete_q = (
cls.__table__.delete()
.where(cls.created_at < ts_end)
.where(cls.date_posted == None) # noqa: E711
)
db_session.execute(delete_q)
db_session.commit()
except Exception as e:
logger.warning(f"Exception when deleting old unposted haikus: {e}")
db_session.rollback()
@classmethod
def delete_haikus_posted_timedelta(cls, db_session, days: float = None) -> list:
"""Delete all posted records older than N days"""
if days is not None:
ts_end = datetime.now(tz=timezone.utc) - timedelta(days=days)
try:
logger.info(f"Deleting posted haikus older than {days} days")
delete_q = (
cls.__table__.delete()
.where(cls.created_at < ts_end)
.where(cls.date_posted != None) # noqa: E711
)
db_session.execute(delete_q)
db_session.commit()
except Exception as e:
logger.warning(f"Exception when deleting old posted haikus: {e}")
db_session.rollback()
@classmethod
def keep_haikus_n_rows(cls, db_session, n: int = None):
"""Keep the most recent n rows"""
if n is not None:
ids = db_session.query(cls.id).order_by(desc(cls.created_at)).all()
ids_to_delete = [x[0] for x in ids[n:]]
if ids_to_delete:
try:
logger.info(f"Keeping most recent {n} rows of haikus")
delete_q = cls.__table__.delete().where(cls.id.in_(ids_to_delete))
db_session.execute(delete_q)
db_session.commit()
except Exception as e:
logger.warning(
f"Exception when keeping most recent rows of haikus: {e}"
)
db_session.rollback()
|
985,098 | 6037ea78400fc50d81dd711c70247da0912182b1 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
def start():
#browser = webdriver.PhantomJS('phantomjs.exe')
browser = webdriver.Chrome('chromedriver.exe')
url = 'http://www.footballsquads.co.uk/ger/2017-2018/'
tag = 'gerbun.htm'
browser.get(url+tag)
html = browser.page_source
soup = BeautifulSoup(html, "html.parser")
divTag = soup.find_all("div", {"id": "main"})
h5Tag = divTag[0].find_all('h5')
p_list = []
#print('Enter')
for i in h5Tag:
team = i.text
#print(i.find("a").get("href"))
browser.get(url+i.find("a").get("href"))
html = browser.page_source
soup = BeautifulSoup(html, "html.parser")
divTag = soup.find_all("div", {"id": "main"})
tableTag = divTag[0].find_all('table')
trTag = tableTag[0].find('tbody').find_all('tr')
#print('Enter2')
for i in range(1,len(trTag)):
temp = []
tdTag = trTag[i].find_all('td')
#print(tdTag)
try:
if tdTag[1].text:
temp.append(tdTag[1].text)
temp.append(tdTag[2].text)
temp.append(tdTag[3].text)
temp.append(team)
p_list.append(temp)
except:
break
'''for i in range(len(p_list)):
print(p_list[i])
'''
return(p_list)
|
985,099 | 13a1ff81189a5333693f88b82d5dccda99296cf3 | from __future__ import division, print_function
import numpy as np
import control.matlab as cm
import control as cp
import matplotlib.pyplot as plt
from IPython.display import Image
def control(s, Y, D):
end = 10
G = 1/(3*s*(s + 1))
F = (3*s*(s + 1))
KP, KD, KI = 16, 7, 4
H_pd = (KP + KD * s)
H_pid = (KP + KD * s + KI / s)
PD = cm.feedback((KP + KD * s) * G, 1) # PD controller
PID = cm.feedback((KP + KD * s + KI / s) * G, 1) # PID controller
PD_FF = cm.tf([1], [1])
PID_FF = cm.tf([1], [1]) # PID controller
D_PD = (G) / (1 + (KP + KD * s) * (G))
D_PID = (G) / (1 + (KP + KD * s + KI / s) * (G))
# Now, according to the Example, we calculate the response of
# the closed loop system to a step input of size 10:
out_PD, t = cm.step(Y * PD, np.linspace(0, end, 200))
out_PID, t = cm.step(Y * PID, np.linspace(0, end, 200))
out_PD_FF, t = cm.step(Y * PD_FF, np.linspace(0, end, 200))
out_PID_FF, t = cm.step(Y * PID_FF, np.linspace(0, end, 200))
out_PD_D, t = cm.step(-D * D_PD, np.linspace(0, end, 200))
out_PID_D, t = cm.step(-D * D_PID, np.linspace(0, end, 200))
theta_PD = out_PD + out_PD_D
theta_PID = out_PID + out_PID_D
theta_PD_FF = out_PD_FF + out_PD_D
theta_PID_FF = out_PID_FF + out_PID_D
y_out, t = cm.step(Y, np.linspace(0, end, 200))
plt.plot(t, theta_PD, lw=2, label="PD")
plt.plot(t, theta_PID, lw=2, label="PID")
if D != 0:
plt.plot(t, theta_PD_FF, lw=2, label="PD_FF")
plt.plot(t, theta_PID_FF, lw=2, label="PID_FF")
plt.plot(t, y_out, lw=1, label="Reference")
plt.xlabel('Time')
plt.ylabel('Position')
plt.legend()
s = cm.tf([1, 0], [1])
y = 5
d = 10
Y = [y, y, y, y/s, y/s, (y*2)/(s**2), (y*2)/(s**2)]
D = [0, d, d/s, 0, d, 0, d]
title = ["constant reference, no disturbance",
"constant reference, constant disturbance",
"constant reference, ramp disturbance",
"ramp reference, no disturbance",
"ramp reference, constant disturbance",
"second-order polynomial reference signal, no disturbance",
"second-order polynomial reference signal, constant disturbance"]
# plt.figure(figsize=(12, 4*3))
for i in range(len(Y)):
control(s, Y[i], D[i])
plt.title(title[i])
plt.show()
print("lata") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.